diff --git a/sahara/api/base.py b/sahara/api/base.py index ec046f04..70161b9b 100644 --- a/sahara/api/base.py +++ b/sahara/api/base.py @@ -13,9 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +from sahara.i18n import _ import sahara.utils.api as u def not_implemented(): return u.internal_error( - 501, NotImplementedError("This API operation isn't implemented")) + 501, NotImplementedError(_("This API operation isn't implemented"))) diff --git a/sahara/conductor/__init__.py b/sahara/conductor/__init__.py index c8ef2c90..67e0eaf4 100644 --- a/sahara/conductor/__init__.py +++ b/sahara/conductor/__init__.py @@ -16,6 +16,7 @@ from oslo.config import cfg from sahara.conductor import api as conductor_api +from sahara.i18n import _ def Api(use_local=True, **kwargs): @@ -27,7 +28,8 @@ def Api(use_local=True, **kwargs): if cfg.CONF.conductor.use_local or use_local: api = conductor_api.LocalApi else: - raise NotImplementedError("Remote conductor isn't implemented yet.") + raise NotImplementedError( + _("Remote conductor isn't implemented yet.")) # api = conductor.RemoteApi return api(**kwargs) diff --git a/sahara/conductor/resource.py b/sahara/conductor/resource.py index db3d22d2..f9109a2f 100644 --- a/sahara/conductor/resource.py +++ b/sahara/conductor/resource.py @@ -28,6 +28,7 @@ import datetime import six from sahara.conductor import objects +from sahara.i18n import _ from sahara.swift import swift_helper from sahara.utils import types @@ -114,7 +115,7 @@ class Resource(types.FrozenDict): elif self._is_passthrough_type(entity): return entity else: - raise TypeError("Unsupported type: %s" % type(entity).__name__) + raise TypeError(_("Unsupported type: %s") % type(entity).__name__) def _wrap_list(self, refname, lst): newlst = [self._wrap_entity(refname, entity) for entity in lst] diff --git a/sahara/db/migration/cli.py b/sahara/db/migration/cli.py index 52dce2a4..4f5e2ada 100644 --- a/sahara/db/migration/cli.py +++ b/sahara/db/migration/cli.py @@ -20,6 +20,7 @@ from alembic import config as alembic_cfg from alembic import util as alembic_u from oslo.config import cfg +from sahara.i18n import _ CONF = cfg.CONF @@ -37,7 +38,7 @@ def do_check_migration(config, _cmd): def do_upgrade_downgrade(config, cmd): if not CONF.command.revision and not CONF.command.delta: - raise SystemExit('You must provide a revision or relative delta') + raise SystemExit(_('You must provide a revision or relative delta')) revision = CONF.command.revision diff --git a/sahara/middleware/auth_valid.py b/sahara/middleware/auth_valid.py index 4bfbd45e..3420b939 100644 --- a/sahara/middleware/auth_valid.py +++ b/sahara/middleware/auth_valid.py @@ -15,6 +15,9 @@ import webob.exc as ex +from sahara.i18n import _ +from sahara.i18n import _LI +from sahara.i18n import _LW from sahara.openstack.common import log as logging import sahara.openstack.commons as commons @@ -40,7 +43,7 @@ class AuthValidator: """ token_tenant = env['HTTP_X_TENANT_ID'] if not token_tenant: - LOG.warn("Can't get tenant_id from env") + LOG.warn(_LW("Can't get tenant_id from env")) resp = ex.HTTPServiceUnavailable() return resp(env, start_response) @@ -48,13 +51,14 @@ class AuthValidator: if path != '/': version, url_tenant, rest = commons.split_path(path, 3, 3, True) if not version or not url_tenant or not rest: - LOG.info("Incorrect path: %s", path) - resp = ex.HTTPNotFound("Incorrect path") + LOG.info(_LI("Incorrect path: %s"), path) + resp = ex.HTTPNotFound(_("Incorrect path")) return resp(env, start_response) if token_tenant != url_tenant: LOG.debug("Unauthorized: token tenant != requested tenant") - resp = ex.HTTPUnauthorized('Token tenant != requested tenant') + resp = ex.HTTPUnauthorized( + _('Token tenant != requested tenant')) return resp(env, start_response) return self.app(env, start_response) diff --git a/sahara/service/edp/oozie/workflow_creator/base_workflow.py b/sahara/service/edp/oozie/workflow_creator/base_workflow.py index fa56b6a5..fa5e11e3 100644 --- a/sahara/service/edp/oozie/workflow_creator/base_workflow.py +++ b/sahara/service/edp/oozie/workflow_creator/base_workflow.py @@ -17,6 +17,7 @@ import xml.dom.minidom as xml import sahara.exceptions as ex +from sahara.i18n import _ from sahara.utils import xmlutils as x @@ -45,8 +46,8 @@ class OozieWorkflowCreator(object): def _add_to_prepare_element(self, element, paths): if element not in ['delete', 'mkdir']: raise ex.NotFoundException(element, - message='"%s" child cannot be ' - 'added to prepare element') + message=_('"%s" child cannot be ' + 'added to prepare element')) prop = x.get_and_create_if_not_exist(self.doc, self.tag_name, 'prepare') for path in paths: @@ -56,8 +57,8 @@ class OozieWorkflowCreator(object): def _add_to_streaming_element(self, element, path): if element not in ['mapper', 'reducer']: raise ex.NotFoundException(element, - message='"%s" child cannot be added ' - 'to streaming element') + message=_('"%s" child cannot be added ' + 'to streaming element')) x.get_and_create_if_not_exist(self.doc, self.tag_name, 'streaming') diff --git a/sahara/service/ops.py b/sahara/service/ops.py index 1b9a062f..d3c65480 100644 --- a/sahara/service/ops.py +++ b/sahara/service/ops.py @@ -20,6 +20,7 @@ from oslo import messaging from sahara import conductor as c from sahara import context +from sahara.i18n import _LE from sahara.openstack.common import log as logging from sahara.plugins import base as plugin_base from sahara.service.edp import job_manager @@ -139,8 +140,9 @@ def _provision_cluster(cluster_id): if not g.check_cluster_exists(cluster): LOG.info(g.format_cluster_deleted_message(cluster)) return - LOG.exception("Can't configure cluster '%s' (reason: %s)", - cluster.name, ex) + LOG.exception( + _LE("Can't configure cluster '%(name)s' (reason: %(reason)s)"), + {'name': cluster.name, 'reason': ex}) g.change_cluster_status(cluster, "Error") return @@ -156,8 +158,9 @@ def _provision_cluster(cluster_id): if not g.check_cluster_exists(cluster): LOG.info(g.format_cluster_deleted_message(cluster)) return - LOG.exception("Can't start services for cluster '%s' (reason: %s)", - cluster.name, ex) + LOG.exception( + _LE("Can't start services for cluster '%(name)s' (reason: " + "%(reason)s)"), {'name': cluster.name, 'reason': ex}) g.change_cluster_status(cluster, "Error") return @@ -206,8 +209,10 @@ def _provision_scaled_cluster(cluster_id, node_group_id_map): if not g.check_cluster_exists(cluster): LOG.info(g.format_cluster_deleted_message(cluster)) return - LOG.exception("Can't scale cluster '%s' (reason: %s)", - cluster.name, ex) + LOG.exception( + _LE("Can't scale cluster '%(name)s' (reason: %(reason)s)"), + {'name': cluster.name, 'reason': ex}) + g.change_cluster_status(cluster, "Error") return diff --git a/sahara/service/periodic.py b/sahara/service/periodic.py index 2bade15e..e3f3ecec 100644 --- a/sahara/service/periodic.py +++ b/sahara/service/periodic.py @@ -20,6 +20,7 @@ import six from sahara import conductor as c from sahara import context +from sahara.i18n import _LI from sahara.openstack.common import log from sahara.openstack.common import periodic_task from sahara.openstack.common import threadgroup @@ -92,15 +93,18 @@ class SaharaPeriodicTasks(periodic_task.PeriodicTasks): if CONF.use_identity_api_v3: trusts.use_os_admin_auth_token(cluster) - LOG.info('Terminating transient cluster %s with id %s' % - (cluster.name, cluster.id)) + LOG.info(_LI('Terminating transient cluster %(cluster)s ' + 'with id %(id)s'), + {'cluster': cluster.name, 'id': cluster.id}) try: api.terminate_cluster(cluster.id) except Exception as e: - LOG.info('Failed to terminate transient cluster ' - '%s with id %s: %s.' % - (cluster.name, cluster.id, six.text_type(e))) + LOG.info(_LI('Failed to terminate transient cluster ' + '%(cluster)s with id %(id)s: %(error)s.'), + {'cluster': cluster.name, + 'id': cluster.id, + 'error': six.text_type(e)}) else: if cluster.status != 'AwaitingTermination': diff --git a/sahara/service/validation.py b/sahara/service/validation.py index 20f785a9..d9da8235 100644 --- a/sahara/service/validation.py +++ b/sahara/service/validation.py @@ -18,6 +18,7 @@ import functools import jsonschema from sahara import exceptions as ex +from sahara.i18n import _ import sahara.openstack.common.exception as os_ex from sahara.utils import api as u from sahara.utils import api_validator @@ -73,7 +74,7 @@ def check_exists(get_func, *id_prop, **get_args): raise e if obj is None: e = ex.NotFoundException(get_kwargs, - 'Object with %s not found') + _('Object with %s not found')) return u.not_found(e) return func(*args, **kwargs) diff --git a/sahara/service/validations/base.py b/sahara/service/validations/base.py index 61dd5388..7b6a32b7 100644 --- a/sahara/service/validations/base.py +++ b/sahara/service/validations/base.py @@ -19,6 +19,7 @@ from oslo.config import cfg from sahara import conductor as cond from sahara import context import sahara.exceptions as ex +from sahara.i18n import _ import sahara.plugins.base as plugin_base import sahara.service.api as api from sahara.utils import general as g @@ -48,20 +49,21 @@ def _get_plugin_configs(plugin_name, hadoop_version, scope=None): def check_plugin_name_exists(name): if name not in [p.name for p in api.get_plugins()]: - raise ex.InvalidException("Sahara doesn't contain plugin with name " - "'%s'" % name) + raise ex.InvalidException( + _("Sahara doesn't contain plugin with name '%s'") % name) def check_plugin_supports_version(p_name, version): if version not in plugin_base.PLUGINS.get_plugin(p_name).get_versions(): - raise ex.InvalidException("Requested plugin '%s' doesn't support" - " version '%s'" % (p_name, version)) + raise ex.InvalidException( + _("Requested plugin '%(name)s' doesn't support version " + "'%(version)s'") % {'name': p_name, 'version': version}) def check_image_registered(image_id): if image_id not in [i.id for i in nova.client().images.list_registered()]: - raise ex.InvalidException("Requested image '%s' is not registered" - % image_id) + raise ex.InvalidException( + _("Requested image '%s' is not registered") % image_id) def check_node_group_configs(plugin_name, hadoop_version, ng_configs, @@ -71,13 +73,15 @@ def check_node_group_configs(plugin_name, hadoop_version, ng_configs, hadoop_version) for app_target, configs in ng_configs.items(): if app_target not in pl_confs: - raise ex.InvalidException("Plugin doesn't contain applicable " - "target '%s'" % app_target) + raise ex.InvalidException( + _("Plugin doesn't contain applicable target '%s'") + % app_target) for name, values in configs.items(): if name not in pl_confs[app_target]: - raise ex.InvalidException("Plugin's applicable target '%s' " - "doesn't contain config with name " - "'%s'" % (app_target, name)) + raise ex.InvalidException( + _("Plugin's applicable target '%(target)s' doesn't " + "contain config with name '%(name)s'") % + {'target': app_target, 'name': name}) def check_all_configurations(data): @@ -131,8 +135,8 @@ def check_node_group_basic_fields(plugin_name, hadoop_version, ng, def check_flavor_exists(flavor_id): flavor_list = nova.client().flavors.list() if flavor_id not in [flavor.id for flavor in flavor_list]: - raise ex.InvalidException("Requested flavor '%s' not found" - % flavor_id) + raise ex.InvalidException( + _("Requested flavor '%s' not found") % flavor_id) def check_floatingip_pool_exists(ng_name, pool_id): @@ -146,37 +150,39 @@ def check_floatingip_pool_exists(ng_name, pool_id): break if not network: - raise ex.InvalidException("Floating IP pool %s for node group " - "'%s' not found" % (pool_id, ng_name)) + raise ex.InvalidException( + _("Floating IP pool %(pool)s for node group '%(group)s' " + "not found") % {'pool': pool_id, 'group': ng_name}) def check_node_processes(plugin_name, version, node_processes): if len(set(node_processes)) != len(node_processes): - raise ex.InvalidException("Duplicates in node processes " - "have been detected") - plugin_procesess = [] + raise ex.InvalidException( + _("Duplicates in node processes have been detected")) + plugin_processes = [] for process in plugin_base.PLUGINS.get_plugin( plugin_name).get_node_processes(version).values(): - plugin_procesess += process + plugin_processes += process - if not set(node_processes).issubset(set(plugin_procesess)): - raise ex.InvalidException("Plugin supports the following " - "node procesess: %s" % plugin_procesess) + if not set(node_processes).issubset(set(plugin_processes)): + raise ex.InvalidException( + _("Plugin supports the following node procesess: %s") + % plugin_processes) def check_duplicates_node_groups_names(node_groups): ng_names = [ng['name'] for ng in node_groups] if len(set(ng_names)) < len(node_groups): - raise ex.InvalidException("Duplicates in node group names " - "are detected") + raise ex.InvalidException( + _("Duplicates in node group names are detected")) # Cluster creation related checks def check_cluster_unique_name(name): if name in [cluster.name for cluster in api.get_clusters()]: - raise ex.NameAlreadyExistsException("Cluster with name '%s' already" - " exists" % name) + raise ex.NameAlreadyExistsException( + _("Cluster with name '%s' already exists") % name) check_heat_stack_name(name) @@ -185,14 +191,14 @@ def check_heat_stack_name(cluster_name): for stack in heat.client().stacks.list(): if stack.stack_name == cluster_name: raise ex.NameAlreadyExistsException( - "Cluster name '%s' is already used as Heat stack name" + _("Cluster name '%s' is already used as Heat stack name") % cluster_name) def check_cluster_exists(id): if not api.get_cluster(id): - raise ex.InvalidException("Cluster with id '%s'" - " doesn't exist" % id) + raise ex.InvalidException( + _("Cluster with id '%s' doesn't exist") % id) def check_cluster_hostnames_lengths(cluster_name, node_groups): @@ -203,35 +209,38 @@ def check_cluster_hostnames_lengths(cluster_name, node_groups): longest_hostname += CONF.node_domain if len(longest_hostname) > MAX_HOSTNAME_LENGTH: raise ex.InvalidException( - "Composite hostname %s in provisioned cluster exceeds " - "maximum limit %s characters" % (longest_hostname, - MAX_HOSTNAME_LENGTH)) + _("Composite hostname %(host)s in provisioned cluster exceeds" + " maximum limit %(limit)s characters") % + {'host': longest_hostname, + 'limit': MAX_HOSTNAME_LENGTH}) def check_keypair_exists(keypair): try: nova.client().keypairs.get(keypair) except nova_ex.NotFound: - raise ex.InvalidException("Requested keypair '%s' not found" % keypair) + raise ex.InvalidException( + _("Requested keypair '%s' not found") % keypair) def check_network_exists(net_id): if not nova.get_network(id=net_id): - raise ex.InvalidException("Network %s not found" % net_id) + raise ex.InvalidException(_("Network %s not found") % net_id) # Cluster templates related checks def check_cluster_template_unique_name(name): if name in [t.name for t in api.get_cluster_templates()]: - raise ex.NameAlreadyExistsException("Cluster template with name '%s'" - " already exists" % name) + raise ex.NameAlreadyExistsException( + _("Cluster template with name '%s' already exists") % name) def check_cluster_template_exists(cluster_template_id): if not api.get_cluster_template(id=cluster_template_id): - raise ex.InvalidException("Cluster template with id '%s'" - " doesn't exist" % cluster_template_id) + raise ex.InvalidException( + _("Cluster template with id '%s' doesn't exist") + % cluster_template_id) def check_node_groups_in_cluster_templates(cluster_name, plugin_name, @@ -249,14 +258,14 @@ def check_node_groups_in_cluster_templates(cluster_name, plugin_name, def check_node_group_template_unique_name(name): if name in [t.name for t in api.get_node_group_templates()]: - raise ex.NameAlreadyExistsException("NodeGroup template with name '%s'" - " already exists" % name) + raise ex.NameAlreadyExistsException( + _("NodeGroup template with name '%s' already exists") % name) def check_node_group_template_exists(ng_tmpl_id): if not api.get_node_group_template(id=ng_tmpl_id): - raise ex.InvalidException("NodeGroup template with id '%s'" - " doesn't exist" % ng_tmpl_id) + raise ex.InvalidException( + _("NodeGroup template with id '%s' doesn't exist") % ng_tmpl_id) def check_network_config(node_groups): @@ -290,8 +299,9 @@ def check_resize(cluster, r_node_groups): for ng in r_node_groups: if ng['name'] not in cluster_ng_names: - raise ex.InvalidException("Cluster doesn't contain node group " - "with name '%s'" % ng['name']) + raise ex.InvalidException( + _("Cluster doesn't contain node group with name '%s'") + % ng['name']) def check_add_node_groups(cluster, add_node_groups): @@ -303,9 +313,9 @@ def check_add_node_groups(cluster, add_node_groups): for ng in add_node_groups: if ng['name'] in cluster_ng_names: - raise ex.InvalidException("Can't add new nodegroup. Cluster " - "already has nodegroup with name '%s'" - % ng['name']) + raise ex.InvalidException( + _("Can't add new nodegroup. Cluster already has nodegroup with" + " name '%s'") % ng['name']) check_node_group_basic_fields(cluster.plugin_name, cluster.hadoop_version, ng, pl_confs) @@ -317,7 +327,7 @@ def check_cinder_exists(): services = [service.name for service in keystone.client().services.list()] if 'cinder' not in services: - raise ex.InvalidException("Cinder is not supported") + raise ex.InvalidException(_("Cinder is not supported")) # Tags @@ -328,12 +338,11 @@ def check_required_image_tags(plugin_name, hadoop_version, image_id): plugin = plugin_base.PLUGINS.get_plugin(plugin_name) req_tags = set(plugin.get_required_image_tags(hadoop_version)) if not req_tags.issubset(set(image.tags)): - raise ex.InvalidException("Tags of requested image '%s' don't " - "contain required tags " - "['%s', '%s']" % - (image_id, - plugin_name, - hadoop_version)) + raise ex.InvalidException( + _("Tags of requested image '%(image)s' don't contain required" + " tags ['%(name)s', '%(version)s']") + % {'image': image_id, 'name': plugin_name, + 'version': hadoop_version}) # EDP diff --git a/sahara/service/validations/cluster_templates.py b/sahara/service/validations/cluster_templates.py index 06e08e4f..f38b648b 100644 --- a/sahara/service/validations/cluster_templates.py +++ b/sahara/service/validations/cluster_templates.py @@ -16,6 +16,7 @@ import copy from sahara import exceptions as ex +from sahara.i18n import _ from sahara.service import api import sahara.service.validations.base as b import sahara.service.validations.node_group_templates as ng_tml @@ -133,5 +134,6 @@ def check_cluster_template_usage(cluster_template_id, **kwargs): if users: raise ex.InvalidException( - "Cluster template %s in use by %s" % - (cluster_template_id, ', '.join(users))) + _("Cluster template %(id)s in use by %(clusters)s") % + {'id': cluster_template_id, + 'clusters': ', '.join(users)}) diff --git a/sahara/service/validations/clusters.py b/sahara/service/validations/clusters.py index efe3d31d..f48f3fbf 100644 --- a/sahara/service/validations/clusters.py +++ b/sahara/service/validations/clusters.py @@ -18,6 +18,7 @@ import copy from oslo.config import cfg import sahara.exceptions as ex +from sahara.i18n import _ import sahara.service.api as api import sahara.service.validations.base as b import sahara.service.validations.cluster_templates as cl_tmpl @@ -71,7 +72,7 @@ def check_cluster_create(data, **kwargs): default_image_id) else: raise ex.NotFoundException('default_image_id', - "'%s' field is not found") + _("'%s' field is not found")) b.check_all_configurations(data) @@ -86,13 +87,14 @@ def check_cluster_create(data, **kwargs): neutron_net_id = _get_cluster_field(data, 'neutron_management_network') if neutron_net_id: if not CONF.use_neutron: - raise ex.InvalidException("'neutron_management_network' field " - "can't be used with 'use_neutron=False'") + raise ex.InvalidException( + _("'neutron_management_network' field can't be used " + "with 'use_neutron=False'")) b.check_network_exists(neutron_net_id) else: if CONF.use_neutron: raise ex.NotFoundException('neutron_management_network', - message="'%s' field is not found") + message=_("'%s' field is not found")) def _get_cluster_field(cluster, field): diff --git a/sahara/service/validations/clusters_scaling.py b/sahara/service/validations/clusters_scaling.py index 3d0002a9..9773bab7 100644 --- a/sahara/service/validations/clusters_scaling.py +++ b/sahara/service/validations/clusters_scaling.py @@ -16,6 +16,7 @@ import copy import sahara.exceptions as ex +from sahara.i18n import _ import sahara.plugins.base as plugin_base import sahara.service.api as api import sahara.service.validations.base as b @@ -73,12 +74,13 @@ def check_cluster_scaling(data, cluster_id, **kwargs): plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name, 'decommission_nodes'))): raise ex.InvalidException( - "Requested plugin '%s' doesn't support cluster scaling feature" + _("Requested plugin '%s' doesn't support cluster scaling feature") % cluster.plugin_name) if cluster.status != 'Active': - raise ex.InvalidException("Cluster cannot be scaled not in 'Active' " - "status. Cluster status: " + cluster.status) + raise ex.InvalidException( + _("Cluster cannot be scaled not in 'Active' status. " + "Cluster status: %s") % cluster.status) if data.get("resize_node_groups"): b.check_resize(cluster, data['resize_node_groups']) diff --git a/sahara/service/validations/node_group_templates.py b/sahara/service/validations/node_group_templates.py index 9c49a408..49388faa 100644 --- a/sahara/service/validations/node_group_templates.py +++ b/sahara/service/validations/node_group_templates.py @@ -14,6 +14,7 @@ # limitations under the License. from sahara import exceptions as ex +from sahara.i18n import _ from sahara.service import api import sahara.service.validations.base as b @@ -107,8 +108,8 @@ def check_node_group_template_usage(node_group_template_id, **kwargs): if cluster_users or template_users: raise ex.InvalidException( - "Node group template %s is in use by " - "cluster templates: %s; and clusters: %s" % - (node_group_template_id, - template_users and ', '.join(template_users) or 'N/A', - cluster_users and ', '.join(cluster_users) or 'N/A')) + _("Node group template %(template)s is in use by " + "cluster templates: %(users)s; and clusters: %(clusters)s") % + {'template': node_group_template_id, + 'users': template_users and ', '.join(template_users) or 'N/A', + 'clusters': cluster_users and ', '.join(cluster_users) or 'N/A'}) diff --git a/sahara/service/validations/plugins.py b/sahara/service/validations/plugins.py index af477d25..efff5347 100644 --- a/sahara/service/validations/plugins.py +++ b/sahara/service/validations/plugins.py @@ -14,6 +14,7 @@ # limitations under the License. import sahara.exceptions as ex +from sahara.i18n import _ from sahara.plugins import base as plugin_base @@ -23,5 +24,5 @@ CONVERT_TO_TEMPLATE_SCHEMA = None def check_convert_to_template(plugin_name, version, **kwargs): if not plugin_base.PLUGINS.is_plugin_implements(plugin_name, 'convert'): raise ex.InvalidException( - "Requested plugin '%s' doesn't support converting config files " - "to cluster templates" % plugin_name) + _("Requested plugin '%s' doesn't support converting config files " + "to cluster templates") % plugin_name) diff --git a/sahara/swift/swift_helper.py b/sahara/swift/swift_helper.py index cad95816..92f74787 100644 --- a/sahara/swift/swift_helper.py +++ b/sahara/swift/swift_helper.py @@ -18,6 +18,7 @@ import logging from oslo.config import cfg from sahara import context +from sahara.i18n import _LI from sahara.swift import utils as su from sahara.utils import xmlutils as x @@ -46,6 +47,6 @@ def get_swift_configs(): conf['value'] = CONF.os_region_name result = [cfg for cfg in configs if cfg['value']] - LOG.info("Swift would be integrated with the following " - "params: %s", result) + LOG.info(_LI("Swift would be integrated with the following " + "params: %s"), result) return result diff --git a/sahara/topology/topology_helper.py b/sahara/topology/topology_helper.py index 48aef8a5..be187b91 100644 --- a/sahara/topology/topology_helper.py +++ b/sahara/topology/topology_helper.py @@ -19,6 +19,8 @@ from oslo.config import cfg from sahara import context from sahara import exceptions as ex +from sahara.i18n import _ +from sahara.i18n import _LI from sahara.openstack.common import log from sahara.utils.openstack import nova from sahara.utils import xmlutils as x @@ -108,7 +110,7 @@ def _read_compute_topology(): except IOError: raise ex.NotFoundException( CONF.compute_topology_file, - "Unable to find file %s with compute topology") + _("Unable to find file %s with compute topology")) return topology @@ -123,8 +125,8 @@ def generate_topology_map(cluster, is_node_awareness): hostId = ni.hostId if hostId not in mapping: raise ex.NotFoundException( - i.instance_id, "Was not able to find compute node " - "topology for VM %s") + i.instance_id, + _("Was not able to find compute node topology for VM %s")) rack = mapping[hostId] if is_node_awareness: rack += "/" + hostId @@ -149,16 +151,16 @@ def vm_awareness_core_config(): if param: param['value'] = 'org.apache.hadoop.net.NetworkTopology' - LOG.info("Vm awareness will add following configs in core-site " - "params: %s", result) + LOG.info(_LI("Vm awareness will add following configs in core-site " + "params: %s"), result) return result def vm_awareness_mapred_config(): c = x.load_hadoop_xml_defaults('topology/resources/mapred-template.xml') result = [cfg for cfg in c if cfg['value']] - LOG.info("Vm awareness will add following configs in map-red " - "params: %s", result) + LOG.info(_LI("Vm awareness will add following configs in map-red " + "params: %s"), result) return result diff --git a/sahara/utils/api.py b/sahara/utils/api.py index df6921bd..d0e66b9c 100644 --- a/sahara/utils/api.py +++ b/sahara/utils/api.py @@ -20,6 +20,8 @@ from werkzeug import datastructures from sahara import context from sahara import exceptions as ex +from sahara.i18n import _ +from sahara.i18n import _LE from sahara.openstack.common import log as logging from sahara.openstack.common import wsgi @@ -131,7 +133,8 @@ def render(res=None, resp_type=None, status=None, **kwargs): res.update(kwargs) elif kwargs: # can't merge kwargs into the non-dict res - abort_and_log(500, "Non-dict and non-empty kwargs passed to render") + abort_and_log(500, + _("Non-dict and non-empty kwargs passed to render")) status_code = getattr(flask.request, 'status_code', None) if status: @@ -153,7 +156,7 @@ def render(res=None, resp_type=None, status=None, **kwargs): resp_type = RT_XML serializer = wsgi.XMLDictSerializer() else: - abort_and_log(400, "Content type '%s' isn't supported" % resp_type) + abort_and_log(400, _("Content type '%s' isn't supported") % resp_type) body = serializer.serialize(res) resp_type = str(resp_type) @@ -178,10 +181,11 @@ def request_data(): if not content_type or content_type in RT_JSON: deserializer = wsgi.JSONDeserializer() elif content_type in RT_XML: - abort_and_log(400, "XML requests are not supported yet") + abort_and_log(400, _("XML requests are not supported yet")) # deserializer = XMLDeserializer() else: - abort_and_log(400, "Content type '%s' isn't supported" % content_type) + abort_and_log(400, + _("Content type '%s' isn't supported") % content_type) # parsed request data to avoid unwanted re-parsings parsed_data = deserializer.deserialize(flask.request.data)['body'] @@ -195,8 +199,9 @@ def get_request_args(): def abort_and_log(status_code, descr, exc=None): - LOG.error("Request aborted with status code %s and message '%s'", - status_code, descr) + LOG.error(_LE("Request aborted with status code %(code)s and " + "message '%(message)s'"), + {'code': status_code, 'message': descr}) if exc is not None: LOG.error(traceback.format_exc()) @@ -218,8 +223,9 @@ def render_error_message(error_code, error_message, error_name): def internal_error(status_code, descr, exc=None): - LOG.error("Request aborted with status code %s and message '%s'", - status_code, descr) + LOG.error(_LE("Request aborted with status code %(code)s and " + "message '%(message)s'"), + {'code': status_code, 'message': descr}) if exc is not None: LOG.error(traceback.format_exc()) diff --git a/sahara/utils/openstack/heat.py b/sahara/utils/openstack/heat.py index f6d4e6b7..22413ac2 100644 --- a/sahara/utils/openstack/heat.py +++ b/sahara/utils/openstack/heat.py @@ -20,6 +20,7 @@ from oslo.config import cfg from sahara import context from sahara import exceptions as ex +from sahara.i18n import _ from sahara.openstack.common import log as logging from sahara.utils import files as f from sahara.utils import general as g @@ -41,7 +42,8 @@ def get_stack(stack_name): for stack in heat.stacks.list(filters={'stack_name': stack_name}): return stack - raise ex.NotFoundException('Failed to find stack %s' % stack_name) + raise ex.NotFoundException(_('Failed to find stack %(stack)s') + % {'stack': stack_name}) def wait_stack_completion(stack): diff --git a/sahara/utils/rpc.py b/sahara/utils/rpc.py index 3d56e0ce..eebd56e2 100644 --- a/sahara/utils/rpc.py +++ b/sahara/utils/rpc.py @@ -18,6 +18,7 @@ from oslo.config import cfg from oslo import messaging from sahara import context +from sahara.i18n import _LE from sahara.openstack.common import log as logging @@ -72,6 +73,6 @@ class ContextEndpointHandler(object): return run_method except AttributeError: - LOG.error("No %(method)s method found implemented in " - "%(class)s class", + LOG.error(_LE("No %(method)s method found implemented in " + "%(class)s class"), {'method': name, 'class': self.__endpoint}) diff --git a/sahara/utils/ssh_remote.py b/sahara/utils/ssh_remote.py index 99125827..79906d53 100644 --- a/sahara/utils/ssh_remote.py +++ b/sahara/utils/ssh_remote.py @@ -44,6 +44,7 @@ import six from sahara import context from sahara import exceptions as ex +from sahara.i18n import _LE from sahara.openstack.common import excutils from sahara.utils import crypto from sahara.utils import hashabledict as h @@ -260,7 +261,7 @@ def _read_file_from(remote_file, run_as_root=False): try: return _read_file(_ssh.open_sftp(), fl) except IOError: - LOG.error('Can\'t read file "%s"' % remote_file) + LOG.error(_LE('Can\'t read file "%s"') % remote_file) raise finally: if run_as_root: diff --git a/sahara/utils/timing.py b/sahara/utils/timing.py index 0f029c05..80661c02 100644 --- a/sahara/utils/timing.py +++ b/sahara/utils/timing.py @@ -23,6 +23,7 @@ import time from oslo.config import cfg +from sahara.i18n import _LI from sahara.openstack.common import log @@ -51,8 +52,9 @@ def timed(f): try: result = f(*args, **kwds) except Exception: - LOG.info('Exception raised by invocation of {0}: {1}' - .format(f.__name__, sys.exc_info()[0])) + LOG.info( + _LI('Exception raised by invocation of %(name)s: %(info)s'), + {'name': f.__name__, 'info': sys.exc_info()[0]}) raise finally: elapsed = time.time() - start diff --git a/sahara/utils/types.py b/sahara/utils/types.py index 182eb37a..623aa680 100644 --- a/sahara/utils/types.py +++ b/sahara/utils/types.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from sahara.i18n import _ + class FrozenList(list): def append(self, p_object): @@ -83,7 +85,7 @@ class FrozenDict(dict): class FrozenClassError(Exception): def __init__(self, instance): - self.message = "Class %s is immutable!" % type(instance).__name__ + self.message = _("Class %s is immutable!") % type(instance).__name__ def is_int(s):