Add translation support to service and missed modules

Added translations to files:
 * sahara/service/ops.py
 * sahara/service/periodic.py
 * sahara/service/validation.py
 * sahara/service/validations/base.py
 * sahara/service/validations/cluster_templates.py
 * sahara/service/validations/clusters.py
 * sahara/service/validations/clusters_scaling.py
 * sahara/service/validations/node_group_templates.py
 * sahara/service/validations/plugins.py
 * sahara/swift/swift_helper.py
 * sahara/tests/unit/service/test_instances.py
 * sahara/topology/topology_helper.py
 * sahara/utils/api.py
 * sahara/utils/openstack/heat.py
 * sahara/utils/rpc.py
 * sahara/utils/ssh_remote.py
 * sahara/utils/timing.py
 * sahara/utils/types.py
 * tox.ini

Change-Id: Ic389ec768e7f70ea2c2ffbf77d46bbebdf94348a
This commit is contained in:
Vitaly Gridnev 2014-07-17 13:21:02 +04:00
parent e23efe5471
commit 8640637fcb
23 changed files with 172 additions and 118 deletions

View File

@ -13,9 +13,10 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from sahara.i18n import _
import sahara.utils.api as u import sahara.utils.api as u
def not_implemented(): def not_implemented():
return u.internal_error( return u.internal_error(
501, NotImplementedError("This API operation isn't implemented")) 501, NotImplementedError(_("This API operation isn't implemented")))

View File

@ -16,6 +16,7 @@
from oslo.config import cfg from oslo.config import cfg
from sahara.conductor import api as conductor_api from sahara.conductor import api as conductor_api
from sahara.i18n import _
def Api(use_local=True, **kwargs): def Api(use_local=True, **kwargs):
@ -27,7 +28,8 @@ def Api(use_local=True, **kwargs):
if cfg.CONF.conductor.use_local or use_local: if cfg.CONF.conductor.use_local or use_local:
api = conductor_api.LocalApi api = conductor_api.LocalApi
else: else:
raise NotImplementedError("Remote conductor isn't implemented yet.") raise NotImplementedError(
_("Remote conductor isn't implemented yet."))
# api = conductor.RemoteApi # api = conductor.RemoteApi
return api(**kwargs) return api(**kwargs)

View File

@ -28,6 +28,7 @@ import datetime
import six import six
from sahara.conductor import objects from sahara.conductor import objects
from sahara.i18n import _
from sahara.swift import swift_helper from sahara.swift import swift_helper
from sahara.utils import types from sahara.utils import types
@ -114,7 +115,7 @@ class Resource(types.FrozenDict):
elif self._is_passthrough_type(entity): elif self._is_passthrough_type(entity):
return entity return entity
else: else:
raise TypeError("Unsupported type: %s" % type(entity).__name__) raise TypeError(_("Unsupported type: %s") % type(entity).__name__)
def _wrap_list(self, refname, lst): def _wrap_list(self, refname, lst):
newlst = [self._wrap_entity(refname, entity) for entity in lst] newlst = [self._wrap_entity(refname, entity) for entity in lst]

View File

@ -20,6 +20,7 @@ from alembic import config as alembic_cfg
from alembic import util as alembic_u from alembic import util as alembic_u
from oslo.config import cfg from oslo.config import cfg
from sahara.i18n import _
CONF = cfg.CONF CONF = cfg.CONF
@ -37,7 +38,7 @@ def do_check_migration(config, _cmd):
def do_upgrade_downgrade(config, cmd): def do_upgrade_downgrade(config, cmd):
if not CONF.command.revision and not CONF.command.delta: if not CONF.command.revision and not CONF.command.delta:
raise SystemExit('You must provide a revision or relative delta') raise SystemExit(_('You must provide a revision or relative delta'))
revision = CONF.command.revision revision = CONF.command.revision

View File

@ -15,6 +15,9 @@
import webob.exc as ex import webob.exc as ex
from sahara.i18n import _
from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.openstack.common import log as logging from sahara.openstack.common import log as logging
import sahara.openstack.commons as commons import sahara.openstack.commons as commons
@ -40,7 +43,7 @@ class AuthValidator:
""" """
token_tenant = env['HTTP_X_TENANT_ID'] token_tenant = env['HTTP_X_TENANT_ID']
if not token_tenant: if not token_tenant:
LOG.warn("Can't get tenant_id from env") LOG.warn(_LW("Can't get tenant_id from env"))
resp = ex.HTTPServiceUnavailable() resp = ex.HTTPServiceUnavailable()
return resp(env, start_response) return resp(env, start_response)
@ -48,13 +51,14 @@ class AuthValidator:
if path != '/': if path != '/':
version, url_tenant, rest = commons.split_path(path, 3, 3, True) version, url_tenant, rest = commons.split_path(path, 3, 3, True)
if not version or not url_tenant or not rest: if not version or not url_tenant or not rest:
LOG.info("Incorrect path: %s", path) LOG.info(_LI("Incorrect path: %s"), path)
resp = ex.HTTPNotFound("Incorrect path") resp = ex.HTTPNotFound(_("Incorrect path"))
return resp(env, start_response) return resp(env, start_response)
if token_tenant != url_tenant: if token_tenant != url_tenant:
LOG.debug("Unauthorized: token tenant != requested tenant") LOG.debug("Unauthorized: token tenant != requested tenant")
resp = ex.HTTPUnauthorized('Token tenant != requested tenant') resp = ex.HTTPUnauthorized(
_('Token tenant != requested tenant'))
return resp(env, start_response) return resp(env, start_response)
return self.app(env, start_response) return self.app(env, start_response)

View File

@ -17,6 +17,7 @@
import xml.dom.minidom as xml import xml.dom.minidom as xml
import sahara.exceptions as ex import sahara.exceptions as ex
from sahara.i18n import _
from sahara.utils import xmlutils as x from sahara.utils import xmlutils as x
@ -45,8 +46,8 @@ class OozieWorkflowCreator(object):
def _add_to_prepare_element(self, element, paths): def _add_to_prepare_element(self, element, paths):
if element not in ['delete', 'mkdir']: if element not in ['delete', 'mkdir']:
raise ex.NotFoundException(element, raise ex.NotFoundException(element,
message='"%s" child cannot be ' message=_('"%s" child cannot be '
'added to prepare element') 'added to prepare element'))
prop = x.get_and_create_if_not_exist(self.doc, self.tag_name, prop = x.get_and_create_if_not_exist(self.doc, self.tag_name,
'prepare') 'prepare')
for path in paths: for path in paths:
@ -56,8 +57,8 @@ class OozieWorkflowCreator(object):
def _add_to_streaming_element(self, element, path): def _add_to_streaming_element(self, element, path):
if element not in ['mapper', 'reducer']: if element not in ['mapper', 'reducer']:
raise ex.NotFoundException(element, raise ex.NotFoundException(element,
message='"%s" child cannot be added ' message=_('"%s" child cannot be added '
'to streaming element') 'to streaming element'))
x.get_and_create_if_not_exist(self.doc, self.tag_name, x.get_and_create_if_not_exist(self.doc, self.tag_name,
'streaming') 'streaming')

View File

@ -20,6 +20,7 @@ from oslo import messaging
from sahara import conductor as c from sahara import conductor as c
from sahara import context from sahara import context
from sahara.i18n import _LE
from sahara.openstack.common import log as logging from sahara.openstack.common import log as logging
from sahara.plugins import base as plugin_base from sahara.plugins import base as plugin_base
from sahara.service.edp import job_manager from sahara.service.edp import job_manager
@ -139,8 +140,9 @@ def _provision_cluster(cluster_id):
if not g.check_cluster_exists(cluster): if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster)) LOG.info(g.format_cluster_deleted_message(cluster))
return return
LOG.exception("Can't configure cluster '%s' (reason: %s)", LOG.exception(
cluster.name, ex) _LE("Can't configure cluster '%(name)s' (reason: %(reason)s)"),
{'name': cluster.name, 'reason': ex})
g.change_cluster_status(cluster, "Error") g.change_cluster_status(cluster, "Error")
return return
@ -156,8 +158,9 @@ def _provision_cluster(cluster_id):
if not g.check_cluster_exists(cluster): if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster)) LOG.info(g.format_cluster_deleted_message(cluster))
return return
LOG.exception("Can't start services for cluster '%s' (reason: %s)", LOG.exception(
cluster.name, ex) _LE("Can't start services for cluster '%(name)s' (reason: "
"%(reason)s)"), {'name': cluster.name, 'reason': ex})
g.change_cluster_status(cluster, "Error") g.change_cluster_status(cluster, "Error")
return return
@ -206,8 +209,10 @@ def _provision_scaled_cluster(cluster_id, node_group_id_map):
if not g.check_cluster_exists(cluster): if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster)) LOG.info(g.format_cluster_deleted_message(cluster))
return return
LOG.exception("Can't scale cluster '%s' (reason: %s)", LOG.exception(
cluster.name, ex) _LE("Can't scale cluster '%(name)s' (reason: %(reason)s)"),
{'name': cluster.name, 'reason': ex})
g.change_cluster_status(cluster, "Error") g.change_cluster_status(cluster, "Error")
return return

View File

@ -20,6 +20,7 @@ import six
from sahara import conductor as c from sahara import conductor as c
from sahara import context from sahara import context
from sahara.i18n import _LI
from sahara.openstack.common import log from sahara.openstack.common import log
from sahara.openstack.common import periodic_task from sahara.openstack.common import periodic_task
from sahara.openstack.common import threadgroup from sahara.openstack.common import threadgroup
@ -92,15 +93,18 @@ class SaharaPeriodicTasks(periodic_task.PeriodicTasks):
if CONF.use_identity_api_v3: if CONF.use_identity_api_v3:
trusts.use_os_admin_auth_token(cluster) trusts.use_os_admin_auth_token(cluster)
LOG.info('Terminating transient cluster %s with id %s' % LOG.info(_LI('Terminating transient cluster %(cluster)s '
(cluster.name, cluster.id)) 'with id %(id)s'),
{'cluster': cluster.name, 'id': cluster.id})
try: try:
api.terminate_cluster(cluster.id) api.terminate_cluster(cluster.id)
except Exception as e: except Exception as e:
LOG.info('Failed to terminate transient cluster ' LOG.info(_LI('Failed to terminate transient cluster '
'%s with id %s: %s.' % '%(cluster)s with id %(id)s: %(error)s.'),
(cluster.name, cluster.id, six.text_type(e))) {'cluster': cluster.name,
'id': cluster.id,
'error': six.text_type(e)})
else: else:
if cluster.status != 'AwaitingTermination': if cluster.status != 'AwaitingTermination':

View File

@ -18,6 +18,7 @@ import functools
import jsonschema import jsonschema
from sahara import exceptions as ex from sahara import exceptions as ex
from sahara.i18n import _
import sahara.openstack.common.exception as os_ex import sahara.openstack.common.exception as os_ex
from sahara.utils import api as u from sahara.utils import api as u
from sahara.utils import api_validator from sahara.utils import api_validator
@ -73,7 +74,7 @@ def check_exists(get_func, *id_prop, **get_args):
raise e raise e
if obj is None: if obj is None:
e = ex.NotFoundException(get_kwargs, e = ex.NotFoundException(get_kwargs,
'Object with %s not found') _('Object with %s not found'))
return u.not_found(e) return u.not_found(e)
return func(*args, **kwargs) return func(*args, **kwargs)

View File

@ -19,6 +19,7 @@ from oslo.config import cfg
from sahara import conductor as cond from sahara import conductor as cond
from sahara import context from sahara import context
import sahara.exceptions as ex import sahara.exceptions as ex
from sahara.i18n import _
import sahara.plugins.base as plugin_base import sahara.plugins.base as plugin_base
import sahara.service.api as api import sahara.service.api as api
from sahara.utils import general as g from sahara.utils import general as g
@ -48,20 +49,21 @@ def _get_plugin_configs(plugin_name, hadoop_version, scope=None):
def check_plugin_name_exists(name): def check_plugin_name_exists(name):
if name not in [p.name for p in api.get_plugins()]: if name not in [p.name for p in api.get_plugins()]:
raise ex.InvalidException("Sahara doesn't contain plugin with name " raise ex.InvalidException(
"'%s'" % name) _("Sahara doesn't contain plugin with name '%s'") % name)
def check_plugin_supports_version(p_name, version): def check_plugin_supports_version(p_name, version):
if version not in plugin_base.PLUGINS.get_plugin(p_name).get_versions(): if version not in plugin_base.PLUGINS.get_plugin(p_name).get_versions():
raise ex.InvalidException("Requested plugin '%s' doesn't support" raise ex.InvalidException(
" version '%s'" % (p_name, version)) _("Requested plugin '%(name)s' doesn't support version "
"'%(version)s'") % {'name': p_name, 'version': version})
def check_image_registered(image_id): def check_image_registered(image_id):
if image_id not in [i.id for i in nova.client().images.list_registered()]: if image_id not in [i.id for i in nova.client().images.list_registered()]:
raise ex.InvalidException("Requested image '%s' is not registered" raise ex.InvalidException(
% image_id) _("Requested image '%s' is not registered") % image_id)
def check_node_group_configs(plugin_name, hadoop_version, ng_configs, def check_node_group_configs(plugin_name, hadoop_version, ng_configs,
@ -71,13 +73,15 @@ def check_node_group_configs(plugin_name, hadoop_version, ng_configs,
hadoop_version) hadoop_version)
for app_target, configs in ng_configs.items(): for app_target, configs in ng_configs.items():
if app_target not in pl_confs: if app_target not in pl_confs:
raise ex.InvalidException("Plugin doesn't contain applicable " raise ex.InvalidException(
"target '%s'" % app_target) _("Plugin doesn't contain applicable target '%s'")
% app_target)
for name, values in configs.items(): for name, values in configs.items():
if name not in pl_confs[app_target]: if name not in pl_confs[app_target]:
raise ex.InvalidException("Plugin's applicable target '%s' " raise ex.InvalidException(
"doesn't contain config with name " _("Plugin's applicable target '%(target)s' doesn't "
"'%s'" % (app_target, name)) "contain config with name '%(name)s'") %
{'target': app_target, 'name': name})
def check_all_configurations(data): def check_all_configurations(data):
@ -131,8 +135,8 @@ def check_node_group_basic_fields(plugin_name, hadoop_version, ng,
def check_flavor_exists(flavor_id): def check_flavor_exists(flavor_id):
flavor_list = nova.client().flavors.list() flavor_list = nova.client().flavors.list()
if flavor_id not in [flavor.id for flavor in flavor_list]: if flavor_id not in [flavor.id for flavor in flavor_list]:
raise ex.InvalidException("Requested flavor '%s' not found" raise ex.InvalidException(
% flavor_id) _("Requested flavor '%s' not found") % flavor_id)
def check_floatingip_pool_exists(ng_name, pool_id): def check_floatingip_pool_exists(ng_name, pool_id):
@ -146,37 +150,39 @@ def check_floatingip_pool_exists(ng_name, pool_id):
break break
if not network: if not network:
raise ex.InvalidException("Floating IP pool %s for node group " raise ex.InvalidException(
"'%s' not found" % (pool_id, ng_name)) _("Floating IP pool %(pool)s for node group '%(group)s' "
"not found") % {'pool': pool_id, 'group': ng_name})
def check_node_processes(plugin_name, version, node_processes): def check_node_processes(plugin_name, version, node_processes):
if len(set(node_processes)) != len(node_processes): if len(set(node_processes)) != len(node_processes):
raise ex.InvalidException("Duplicates in node processes " raise ex.InvalidException(
"have been detected") _("Duplicates in node processes have been detected"))
plugin_procesess = [] plugin_processes = []
for process in plugin_base.PLUGINS.get_plugin( for process in plugin_base.PLUGINS.get_plugin(
plugin_name).get_node_processes(version).values(): plugin_name).get_node_processes(version).values():
plugin_procesess += process plugin_processes += process
if not set(node_processes).issubset(set(plugin_procesess)): if not set(node_processes).issubset(set(plugin_processes)):
raise ex.InvalidException("Plugin supports the following " raise ex.InvalidException(
"node procesess: %s" % plugin_procesess) _("Plugin supports the following node procesess: %s")
% plugin_processes)
def check_duplicates_node_groups_names(node_groups): def check_duplicates_node_groups_names(node_groups):
ng_names = [ng['name'] for ng in node_groups] ng_names = [ng['name'] for ng in node_groups]
if len(set(ng_names)) < len(node_groups): if len(set(ng_names)) < len(node_groups):
raise ex.InvalidException("Duplicates in node group names " raise ex.InvalidException(
"are detected") _("Duplicates in node group names are detected"))
# Cluster creation related checks # Cluster creation related checks
def check_cluster_unique_name(name): def check_cluster_unique_name(name):
if name in [cluster.name for cluster in api.get_clusters()]: if name in [cluster.name for cluster in api.get_clusters()]:
raise ex.NameAlreadyExistsException("Cluster with name '%s' already" raise ex.NameAlreadyExistsException(
" exists" % name) _("Cluster with name '%s' already exists") % name)
check_heat_stack_name(name) check_heat_stack_name(name)
@ -185,14 +191,14 @@ def check_heat_stack_name(cluster_name):
for stack in heat.client().stacks.list(): for stack in heat.client().stacks.list():
if stack.stack_name == cluster_name: if stack.stack_name == cluster_name:
raise ex.NameAlreadyExistsException( raise ex.NameAlreadyExistsException(
"Cluster name '%s' is already used as Heat stack name" _("Cluster name '%s' is already used as Heat stack name")
% cluster_name) % cluster_name)
def check_cluster_exists(id): def check_cluster_exists(id):
if not api.get_cluster(id): if not api.get_cluster(id):
raise ex.InvalidException("Cluster with id '%s'" raise ex.InvalidException(
" doesn't exist" % id) _("Cluster with id '%s' doesn't exist") % id)
def check_cluster_hostnames_lengths(cluster_name, node_groups): def check_cluster_hostnames_lengths(cluster_name, node_groups):
@ -203,35 +209,38 @@ def check_cluster_hostnames_lengths(cluster_name, node_groups):
longest_hostname += CONF.node_domain longest_hostname += CONF.node_domain
if len(longest_hostname) > MAX_HOSTNAME_LENGTH: if len(longest_hostname) > MAX_HOSTNAME_LENGTH:
raise ex.InvalidException( raise ex.InvalidException(
"Composite hostname %s in provisioned cluster exceeds " _("Composite hostname %(host)s in provisioned cluster exceeds"
"maximum limit %s characters" % (longest_hostname, " maximum limit %(limit)s characters") %
MAX_HOSTNAME_LENGTH)) {'host': longest_hostname,
'limit': MAX_HOSTNAME_LENGTH})
def check_keypair_exists(keypair): def check_keypair_exists(keypair):
try: try:
nova.client().keypairs.get(keypair) nova.client().keypairs.get(keypair)
except nova_ex.NotFound: except nova_ex.NotFound:
raise ex.InvalidException("Requested keypair '%s' not found" % keypair) raise ex.InvalidException(
_("Requested keypair '%s' not found") % keypair)
def check_network_exists(net_id): def check_network_exists(net_id):
if not nova.get_network(id=net_id): if not nova.get_network(id=net_id):
raise ex.InvalidException("Network %s not found" % net_id) raise ex.InvalidException(_("Network %s not found") % net_id)
# Cluster templates related checks # Cluster templates related checks
def check_cluster_template_unique_name(name): def check_cluster_template_unique_name(name):
if name in [t.name for t in api.get_cluster_templates()]: if name in [t.name for t in api.get_cluster_templates()]:
raise ex.NameAlreadyExistsException("Cluster template with name '%s'" raise ex.NameAlreadyExistsException(
" already exists" % name) _("Cluster template with name '%s' already exists") % name)
def check_cluster_template_exists(cluster_template_id): def check_cluster_template_exists(cluster_template_id):
if not api.get_cluster_template(id=cluster_template_id): if not api.get_cluster_template(id=cluster_template_id):
raise ex.InvalidException("Cluster template with id '%s'" raise ex.InvalidException(
" doesn't exist" % cluster_template_id) _("Cluster template with id '%s' doesn't exist")
% cluster_template_id)
def check_node_groups_in_cluster_templates(cluster_name, plugin_name, def check_node_groups_in_cluster_templates(cluster_name, plugin_name,
@ -249,14 +258,14 @@ def check_node_groups_in_cluster_templates(cluster_name, plugin_name,
def check_node_group_template_unique_name(name): def check_node_group_template_unique_name(name):
if name in [t.name for t in api.get_node_group_templates()]: if name in [t.name for t in api.get_node_group_templates()]:
raise ex.NameAlreadyExistsException("NodeGroup template with name '%s'" raise ex.NameAlreadyExistsException(
" already exists" % name) _("NodeGroup template with name '%s' already exists") % name)
def check_node_group_template_exists(ng_tmpl_id): def check_node_group_template_exists(ng_tmpl_id):
if not api.get_node_group_template(id=ng_tmpl_id): if not api.get_node_group_template(id=ng_tmpl_id):
raise ex.InvalidException("NodeGroup template with id '%s'" raise ex.InvalidException(
" doesn't exist" % ng_tmpl_id) _("NodeGroup template with id '%s' doesn't exist") % ng_tmpl_id)
def check_network_config(node_groups): def check_network_config(node_groups):
@ -290,8 +299,9 @@ def check_resize(cluster, r_node_groups):
for ng in r_node_groups: for ng in r_node_groups:
if ng['name'] not in cluster_ng_names: if ng['name'] not in cluster_ng_names:
raise ex.InvalidException("Cluster doesn't contain node group " raise ex.InvalidException(
"with name '%s'" % ng['name']) _("Cluster doesn't contain node group with name '%s'")
% ng['name'])
def check_add_node_groups(cluster, add_node_groups): def check_add_node_groups(cluster, add_node_groups):
@ -303,9 +313,9 @@ def check_add_node_groups(cluster, add_node_groups):
for ng in add_node_groups: for ng in add_node_groups:
if ng['name'] in cluster_ng_names: if ng['name'] in cluster_ng_names:
raise ex.InvalidException("Can't add new nodegroup. Cluster " raise ex.InvalidException(
"already has nodegroup with name '%s'" _("Can't add new nodegroup. Cluster already has nodegroup with"
% ng['name']) " name '%s'") % ng['name'])
check_node_group_basic_fields(cluster.plugin_name, check_node_group_basic_fields(cluster.plugin_name,
cluster.hadoop_version, ng, pl_confs) cluster.hadoop_version, ng, pl_confs)
@ -317,7 +327,7 @@ def check_cinder_exists():
services = [service.name for service in services = [service.name for service in
keystone.client().services.list()] keystone.client().services.list()]
if 'cinder' not in services: if 'cinder' not in services:
raise ex.InvalidException("Cinder is not supported") raise ex.InvalidException(_("Cinder is not supported"))
# Tags # Tags
@ -328,12 +338,11 @@ def check_required_image_tags(plugin_name, hadoop_version, image_id):
plugin = plugin_base.PLUGINS.get_plugin(plugin_name) plugin = plugin_base.PLUGINS.get_plugin(plugin_name)
req_tags = set(plugin.get_required_image_tags(hadoop_version)) req_tags = set(plugin.get_required_image_tags(hadoop_version))
if not req_tags.issubset(set(image.tags)): if not req_tags.issubset(set(image.tags)):
raise ex.InvalidException("Tags of requested image '%s' don't " raise ex.InvalidException(
"contain required tags " _("Tags of requested image '%(image)s' don't contain required"
"['%s', '%s']" % " tags ['%(name)s', '%(version)s']")
(image_id, % {'image': image_id, 'name': plugin_name,
plugin_name, 'version': hadoop_version})
hadoop_version))
# EDP # EDP

View File

@ -16,6 +16,7 @@
import copy import copy
from sahara import exceptions as ex from sahara import exceptions as ex
from sahara.i18n import _
from sahara.service import api from sahara.service import api
import sahara.service.validations.base as b import sahara.service.validations.base as b
import sahara.service.validations.node_group_templates as ng_tml import sahara.service.validations.node_group_templates as ng_tml
@ -133,5 +134,6 @@ def check_cluster_template_usage(cluster_template_id, **kwargs):
if users: if users:
raise ex.InvalidException( raise ex.InvalidException(
"Cluster template %s in use by %s" % _("Cluster template %(id)s in use by %(clusters)s") %
(cluster_template_id, ', '.join(users))) {'id': cluster_template_id,
'clusters': ', '.join(users)})

View File

@ -18,6 +18,7 @@ import copy
from oslo.config import cfg from oslo.config import cfg
import sahara.exceptions as ex import sahara.exceptions as ex
from sahara.i18n import _
import sahara.service.api as api import sahara.service.api as api
import sahara.service.validations.base as b import sahara.service.validations.base as b
import sahara.service.validations.cluster_templates as cl_tmpl import sahara.service.validations.cluster_templates as cl_tmpl
@ -71,7 +72,7 @@ def check_cluster_create(data, **kwargs):
default_image_id) default_image_id)
else: else:
raise ex.NotFoundException('default_image_id', raise ex.NotFoundException('default_image_id',
"'%s' field is not found") _("'%s' field is not found"))
b.check_all_configurations(data) b.check_all_configurations(data)
@ -86,13 +87,14 @@ def check_cluster_create(data, **kwargs):
neutron_net_id = _get_cluster_field(data, 'neutron_management_network') neutron_net_id = _get_cluster_field(data, 'neutron_management_network')
if neutron_net_id: if neutron_net_id:
if not CONF.use_neutron: if not CONF.use_neutron:
raise ex.InvalidException("'neutron_management_network' field " raise ex.InvalidException(
"can't be used with 'use_neutron=False'") _("'neutron_management_network' field can't be used "
"with 'use_neutron=False'"))
b.check_network_exists(neutron_net_id) b.check_network_exists(neutron_net_id)
else: else:
if CONF.use_neutron: if CONF.use_neutron:
raise ex.NotFoundException('neutron_management_network', raise ex.NotFoundException('neutron_management_network',
message="'%s' field is not found") message=_("'%s' field is not found"))
def _get_cluster_field(cluster, field): def _get_cluster_field(cluster, field):

View File

@ -16,6 +16,7 @@
import copy import copy
import sahara.exceptions as ex import sahara.exceptions as ex
from sahara.i18n import _
import sahara.plugins.base as plugin_base import sahara.plugins.base as plugin_base
import sahara.service.api as api import sahara.service.api as api
import sahara.service.validations.base as b import sahara.service.validations.base as b
@ -73,12 +74,13 @@ def check_cluster_scaling(data, cluster_id, **kwargs):
plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name, plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name,
'decommission_nodes'))): 'decommission_nodes'))):
raise ex.InvalidException( raise ex.InvalidException(
"Requested plugin '%s' doesn't support cluster scaling feature" _("Requested plugin '%s' doesn't support cluster scaling feature")
% cluster.plugin_name) % cluster.plugin_name)
if cluster.status != 'Active': if cluster.status != 'Active':
raise ex.InvalidException("Cluster cannot be scaled not in 'Active' " raise ex.InvalidException(
"status. Cluster status: " + cluster.status) _("Cluster cannot be scaled not in 'Active' status. "
"Cluster status: %s") % cluster.status)
if data.get("resize_node_groups"): if data.get("resize_node_groups"):
b.check_resize(cluster, data['resize_node_groups']) b.check_resize(cluster, data['resize_node_groups'])

View File

@ -14,6 +14,7 @@
# limitations under the License. # limitations under the License.
from sahara import exceptions as ex from sahara import exceptions as ex
from sahara.i18n import _
from sahara.service import api from sahara.service import api
import sahara.service.validations.base as b import sahara.service.validations.base as b
@ -107,8 +108,8 @@ def check_node_group_template_usage(node_group_template_id, **kwargs):
if cluster_users or template_users: if cluster_users or template_users:
raise ex.InvalidException( raise ex.InvalidException(
"Node group template %s is in use by " _("Node group template %(template)s is in use by "
"cluster templates: %s; and clusters: %s" % "cluster templates: %(users)s; and clusters: %(clusters)s") %
(node_group_template_id, {'template': node_group_template_id,
template_users and ', '.join(template_users) or 'N/A', 'users': template_users and ', '.join(template_users) or 'N/A',
cluster_users and ', '.join(cluster_users) or 'N/A')) 'clusters': cluster_users and ', '.join(cluster_users) or 'N/A'})

View File

@ -14,6 +14,7 @@
# limitations under the License. # limitations under the License.
import sahara.exceptions as ex import sahara.exceptions as ex
from sahara.i18n import _
from sahara.plugins import base as plugin_base from sahara.plugins import base as plugin_base
@ -23,5 +24,5 @@ CONVERT_TO_TEMPLATE_SCHEMA = None
def check_convert_to_template(plugin_name, version, **kwargs): def check_convert_to_template(plugin_name, version, **kwargs):
if not plugin_base.PLUGINS.is_plugin_implements(plugin_name, 'convert'): if not plugin_base.PLUGINS.is_plugin_implements(plugin_name, 'convert'):
raise ex.InvalidException( raise ex.InvalidException(
"Requested plugin '%s' doesn't support converting config files " _("Requested plugin '%s' doesn't support converting config files "
"to cluster templates" % plugin_name) "to cluster templates") % plugin_name)

View File

@ -18,6 +18,7 @@ import logging
from oslo.config import cfg from oslo.config import cfg
from sahara import context from sahara import context
from sahara.i18n import _LI
from sahara.swift import utils as su from sahara.swift import utils as su
from sahara.utils import xmlutils as x from sahara.utils import xmlutils as x
@ -46,6 +47,6 @@ def get_swift_configs():
conf['value'] = CONF.os_region_name conf['value'] = CONF.os_region_name
result = [cfg for cfg in configs if cfg['value']] result = [cfg for cfg in configs if cfg['value']]
LOG.info("Swift would be integrated with the following " LOG.info(_LI("Swift would be integrated with the following "
"params: %s", result) "params: %s"), result)
return result return result

View File

@ -19,6 +19,8 @@ from oslo.config import cfg
from sahara import context from sahara import context
from sahara import exceptions as ex from sahara import exceptions as ex
from sahara.i18n import _
from sahara.i18n import _LI
from sahara.openstack.common import log from sahara.openstack.common import log
from sahara.utils.openstack import nova from sahara.utils.openstack import nova
from sahara.utils import xmlutils as x from sahara.utils import xmlutils as x
@ -108,7 +110,7 @@ def _read_compute_topology():
except IOError: except IOError:
raise ex.NotFoundException( raise ex.NotFoundException(
CONF.compute_topology_file, CONF.compute_topology_file,
"Unable to find file %s with compute topology") _("Unable to find file %s with compute topology"))
return topology return topology
@ -123,8 +125,8 @@ def generate_topology_map(cluster, is_node_awareness):
hostId = ni.hostId hostId = ni.hostId
if hostId not in mapping: if hostId not in mapping:
raise ex.NotFoundException( raise ex.NotFoundException(
i.instance_id, "Was not able to find compute node " i.instance_id,
"topology for VM %s") _("Was not able to find compute node topology for VM %s"))
rack = mapping[hostId] rack = mapping[hostId]
if is_node_awareness: if is_node_awareness:
rack += "/" + hostId rack += "/" + hostId
@ -149,16 +151,16 @@ def vm_awareness_core_config():
if param: if param:
param['value'] = 'org.apache.hadoop.net.NetworkTopology' param['value'] = 'org.apache.hadoop.net.NetworkTopology'
LOG.info("Vm awareness will add following configs in core-site " LOG.info(_LI("Vm awareness will add following configs in core-site "
"params: %s", result) "params: %s"), result)
return result return result
def vm_awareness_mapred_config(): def vm_awareness_mapred_config():
c = x.load_hadoop_xml_defaults('topology/resources/mapred-template.xml') c = x.load_hadoop_xml_defaults('topology/resources/mapred-template.xml')
result = [cfg for cfg in c if cfg['value']] result = [cfg for cfg in c if cfg['value']]
LOG.info("Vm awareness will add following configs in map-red " LOG.info(_LI("Vm awareness will add following configs in map-red "
"params: %s", result) "params: %s"), result)
return result return result

View File

@ -20,6 +20,8 @@ from werkzeug import datastructures
from sahara import context from sahara import context
from sahara import exceptions as ex from sahara import exceptions as ex
from sahara.i18n import _
from sahara.i18n import _LE
from sahara.openstack.common import log as logging from sahara.openstack.common import log as logging
from sahara.openstack.common import wsgi from sahara.openstack.common import wsgi
@ -131,7 +133,8 @@ def render(res=None, resp_type=None, status=None, **kwargs):
res.update(kwargs) res.update(kwargs)
elif kwargs: elif kwargs:
# can't merge kwargs into the non-dict res # can't merge kwargs into the non-dict res
abort_and_log(500, "Non-dict and non-empty kwargs passed to render") abort_and_log(500,
_("Non-dict and non-empty kwargs passed to render"))
status_code = getattr(flask.request, 'status_code', None) status_code = getattr(flask.request, 'status_code', None)
if status: if status:
@ -153,7 +156,7 @@ def render(res=None, resp_type=None, status=None, **kwargs):
resp_type = RT_XML resp_type = RT_XML
serializer = wsgi.XMLDictSerializer() serializer = wsgi.XMLDictSerializer()
else: else:
abort_and_log(400, "Content type '%s' isn't supported" % resp_type) abort_and_log(400, _("Content type '%s' isn't supported") % resp_type)
body = serializer.serialize(res) body = serializer.serialize(res)
resp_type = str(resp_type) resp_type = str(resp_type)
@ -178,10 +181,11 @@ def request_data():
if not content_type or content_type in RT_JSON: if not content_type or content_type in RT_JSON:
deserializer = wsgi.JSONDeserializer() deserializer = wsgi.JSONDeserializer()
elif content_type in RT_XML: elif content_type in RT_XML:
abort_and_log(400, "XML requests are not supported yet") abort_and_log(400, _("XML requests are not supported yet"))
# deserializer = XMLDeserializer() # deserializer = XMLDeserializer()
else: else:
abort_and_log(400, "Content type '%s' isn't supported" % content_type) abort_and_log(400,
_("Content type '%s' isn't supported") % content_type)
# parsed request data to avoid unwanted re-parsings # parsed request data to avoid unwanted re-parsings
parsed_data = deserializer.deserialize(flask.request.data)['body'] parsed_data = deserializer.deserialize(flask.request.data)['body']
@ -195,8 +199,9 @@ def get_request_args():
def abort_and_log(status_code, descr, exc=None): def abort_and_log(status_code, descr, exc=None):
LOG.error("Request aborted with status code %s and message '%s'", LOG.error(_LE("Request aborted with status code %(code)s and "
status_code, descr) "message '%(message)s'"),
{'code': status_code, 'message': descr})
if exc is not None: if exc is not None:
LOG.error(traceback.format_exc()) LOG.error(traceback.format_exc())
@ -218,8 +223,9 @@ def render_error_message(error_code, error_message, error_name):
def internal_error(status_code, descr, exc=None): def internal_error(status_code, descr, exc=None):
LOG.error("Request aborted with status code %s and message '%s'", LOG.error(_LE("Request aborted with status code %(code)s and "
status_code, descr) "message '%(message)s'"),
{'code': status_code, 'message': descr})
if exc is not None: if exc is not None:
LOG.error(traceback.format_exc()) LOG.error(traceback.format_exc())

View File

@ -20,6 +20,7 @@ from oslo.config import cfg
from sahara import context from sahara import context
from sahara import exceptions as ex from sahara import exceptions as ex
from sahara.i18n import _
from sahara.openstack.common import log as logging from sahara.openstack.common import log as logging
from sahara.utils import files as f from sahara.utils import files as f
from sahara.utils import general as g from sahara.utils import general as g
@ -41,7 +42,8 @@ def get_stack(stack_name):
for stack in heat.stacks.list(filters={'stack_name': stack_name}): for stack in heat.stacks.list(filters={'stack_name': stack_name}):
return stack return stack
raise ex.NotFoundException('Failed to find stack %s' % stack_name) raise ex.NotFoundException(_('Failed to find stack %(stack)s')
% {'stack': stack_name})
def wait_stack_completion(stack): def wait_stack_completion(stack):

View File

@ -18,6 +18,7 @@ from oslo.config import cfg
from oslo import messaging from oslo import messaging
from sahara import context from sahara import context
from sahara.i18n import _LE
from sahara.openstack.common import log as logging from sahara.openstack.common import log as logging
@ -72,6 +73,6 @@ class ContextEndpointHandler(object):
return run_method return run_method
except AttributeError: except AttributeError:
LOG.error("No %(method)s method found implemented in " LOG.error(_LE("No %(method)s method found implemented in "
"%(class)s class", "%(class)s class"),
{'method': name, 'class': self.__endpoint}) {'method': name, 'class': self.__endpoint})

View File

@ -44,6 +44,7 @@ import six
from sahara import context from sahara import context
from sahara import exceptions as ex from sahara import exceptions as ex
from sahara.i18n import _LE
from sahara.openstack.common import excutils from sahara.openstack.common import excutils
from sahara.utils import crypto from sahara.utils import crypto
from sahara.utils import hashabledict as h from sahara.utils import hashabledict as h
@ -260,7 +261,7 @@ def _read_file_from(remote_file, run_as_root=False):
try: try:
return _read_file(_ssh.open_sftp(), fl) return _read_file(_ssh.open_sftp(), fl)
except IOError: except IOError:
LOG.error('Can\'t read file "%s"' % remote_file) LOG.error(_LE('Can\'t read file "%s"') % remote_file)
raise raise
finally: finally:
if run_as_root: if run_as_root:

View File

@ -23,6 +23,7 @@ import time
from oslo.config import cfg from oslo.config import cfg
from sahara.i18n import _LI
from sahara.openstack.common import log from sahara.openstack.common import log
@ -51,8 +52,9 @@ def timed(f):
try: try:
result = f(*args, **kwds) result = f(*args, **kwds)
except Exception: except Exception:
LOG.info('Exception raised by invocation of {0}: {1}' LOG.info(
.format(f.__name__, sys.exc_info()[0])) _LI('Exception raised by invocation of %(name)s: %(info)s'),
{'name': f.__name__, 'info': sys.exc_info()[0]})
raise raise
finally: finally:
elapsed = time.time() - start elapsed = time.time() - start

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from sahara.i18n import _
class FrozenList(list): class FrozenList(list):
def append(self, p_object): def append(self, p_object):
@ -83,7 +85,7 @@ class FrozenDict(dict):
class FrozenClassError(Exception): class FrozenClassError(Exception):
def __init__(self, instance): def __init__(self, instance):
self.message = "Class %s is immutable!" % type(instance).__name__ self.message = _("Class %s is immutable!") % type(instance).__name__
def is_int(s): def is_int(s):