Merge "Add translation support to service and utils modules"

This commit is contained in:
Jenkins 2014-07-30 14:05:10 +00:00 committed by Gerrit Code Review
commit e23efe5471
11 changed files with 99 additions and 62 deletions

View File

@ -20,6 +20,9 @@ import six
from sahara import conductor as c
from sahara import context
from sahara import exceptions as exc
from sahara.i18n import _
from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.openstack.common import excutils
from sahara.openstack.common import log as logging
from sahara.service import engine as e
@ -76,7 +79,8 @@ class DirectEngine(e.Engine):
return
self._log_operation_exception(
"Can't start cluster '%s' (reason: %s)", cluster, ex)
_LW("Can't start cluster '%(cluster)s' "
"(reason: %(reason)s)"), cluster, ex)
cluster = g.change_cluster_status(
cluster, "Error", status_description=six.text_type(ex))
@ -122,7 +126,8 @@ class DirectEngine(e.Engine):
return []
self._log_operation_exception(
"Can't scale cluster '%s' (reason: %s)", cluster, ex)
_LW("Can't scale cluster '%(cluster)s' "
"(reason: %(reason)s)"), cluster, ex)
cluster = conductor.cluster_get(ctx, cluster)
self._rollback_cluster_scaling(
@ -272,27 +277,29 @@ class DirectEngine(e.Engine):
context.sleep(1)
LOG.info("Cluster '%s': all instances are active" % cluster.id)
LOG.info(_LI("Cluster '%s': all instances are active"), cluster.id)
def _check_if_active(self, instance):
server = nova.get_instance_info(instance)
if server.status == 'ERROR':
raise exc.SystemError("Node %s has error status" % server.name)
raise exc.SystemError(_("Node %s has error status") % server.name)
return server.status == 'ACTIVE'
def _rollback_cluster_creation(self, cluster, ex):
"""Shutdown all instances and update cluster status."""
LOG.info("Cluster '%s' creation rollback (reason: %s)",
cluster.name, ex)
LOG.info(_LI("Cluster '%(name)s' creation rollback "
"(reason: %(reason)s)"),
{'name': cluster.name, 'reason': ex})
self.shutdown_cluster(cluster)
def _rollback_cluster_scaling(self, cluster, instances, ex):
"""Attempt to rollback cluster scaling."""
LOG.info("Cluster '%s' scaling rollback (reason: %s)",
cluster.name, ex)
LOG.info(_LI("Cluster '%(name)s' scaling rollback "
"(reason: %(reason)s)"),
{'name': cluster.name, 'reason': ex})
for i in instances:
self._shutdown_instance(i)
@ -309,21 +316,21 @@ class DirectEngine(e.Engine):
try:
networks.delete_floating_ip(instance.instance_id)
except nova_exceptions.NotFound:
LOG.warn("Attempted to delete non-existent floating IP in "
"pool %s from instancie %s",
instance.node_group.floating_ip_pool,
instance.instance_id)
LOG.warn(_LW("Attempted to delete non-existent floating IP in "
"pool %(pool)s from instance %(instance)s"),
{'pool': instance.node_group.floating_ip_pool,
'instance': instance.instance_id})
try:
volumes.detach_from_instance(instance)
except Exception:
LOG.warn("Detaching volumes from instance %s failed",
LOG.warn(_LW("Detaching volumes from instance %s failed"),
instance.instance_id)
try:
nova.client().servers.delete(instance.instance_id)
except nova_exceptions.NotFound:
LOG.warn("Attempted to delete non-existent instance %s",
LOG.warn(_LW("Attempted to delete non-existent instance %s"),
instance.instance_id)
conductor.instance_remove(ctx, instance)

View File

@ -20,6 +20,8 @@ from oslo.config import cfg
from sahara import conductor as c
from sahara import context
from sahara import exceptions as e
from sahara.i18n import _
from sahara.i18n import _LE
from sahara.openstack.common import log
from sahara.service.edp import job_utils
from sahara.service.edp.oozie import engine as oozie_engine
@ -97,7 +99,7 @@ def _run_job(job_execution_id):
eng = _get_job_engine(cluster, job_execution)
if eng is None:
raise e.EDPError("Cluster does not support job type %s"
raise e.EDPError(_("Cluster does not support job type %s")
% _get_job_type(job_execution))
job_execution = _update_job_execution_extra(cluster, job_execution)
jid = eng.run_job(job_execution)
@ -111,8 +113,9 @@ def run_job(job_execution_id):
try:
_run_job(job_execution_id)
except Exception as ex:
LOG.exception("Can't run job execution '%s' (reason: %s)",
job_execution_id, ex)
LOG.exception(
_LE("Can't run job execution '%(job)s' (reason: %(reason)s)"),
{'job': job_execution_id, 'reason': ex})
conductor.job_execution_update(
context.ctx(), job_execution_id,
@ -131,8 +134,9 @@ def cancel_job(job_execution_id):
try:
engine.cancel_job(job_execution)
except Exception as e:
LOG.exception("Error during cancel of job execution %s: %s" %
(job_execution.id, e))
LOG.exception(
_LE("Error during cancel of job execution %(job)s: "
"%(error)s"), {'job': job_execution.id, 'error': e})
job_execution = _update_job_status(engine, job_execution)
return job_execution
@ -155,8 +159,9 @@ def update_job_statuses():
try:
get_job_status(je.id)
except Exception as e:
LOG.exception("Error during update job execution %s: %s" %
(je.id, e))
LOG.exception(
_LE("Error during update job execution %(job)s: %(error)s"),
{'job': je.id, 'error': e})
def get_job_config_hints(job_type):

View File

@ -22,6 +22,8 @@ import six
from sahara import conductor as c
from sahara import context
from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.openstack.common import log as logging
from sahara.service import networks
from sahara.utils import general as g
@ -66,7 +68,8 @@ class Engine:
context.sleep(1)
LOG.info("Cluster '%s': all instances have IPs assigned" % cluster.id)
LOG.info(
_LI("Cluster '%s': all instances have IPs assigned"), cluster.id)
cluster = conductor.cluster_get(context.ctx(), cluster)
instances = g.get_instances(cluster, ips_assigned)
@ -76,7 +79,7 @@ class Engine:
tg.spawn("wait-for-ssh-%s" % instance.instance_name,
self._wait_until_accessible, instance)
LOG.info("Cluster '%s': all instances are accessible" % cluster.id)
LOG.info(_LI("Cluster '%s': all instances are accessible"), cluster.id)
def _wait_until_accessible(self, instance):
while True:
@ -159,7 +162,8 @@ echo "${public_key}" >> ${user_home}/.ssh/authorized_keys\n
def _log_operation_exception(self, message, cluster, ex):
# we want to log the initial exception even if cluster was deleted
cluster_name = cluster.name if cluster is not None else '_unknown_'
LOG.warn(message, cluster_name, ex)
LOG.warn(message, {'cluster': cluster_name, 'reason': ex})
if cluster is None:
LOG.warn("Presumably the operation failed because the cluster was"
"deleted by a user during the process.")
LOG.warn(
_LW("Presumably the operation failed because the cluster was "
"deleted by a user during the process."))

View File

@ -19,6 +19,9 @@ import six
from sahara import conductor as c
from sahara import context
from sahara.i18n import _LE
from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.openstack.common import excutils
from sahara.openstack.common import log as logging
from sahara.service import engine as e
@ -64,7 +67,8 @@ class HeatEngine(e.Engine):
LOG.info(g.format_cluster_deleted_message(cluster))
return
self._log_operation_exception(
"Can't start cluster '%s' (reason: %s)", cluster, ex)
_LW("Can't start cluster '%(cluster)s' "
"(reason: %(reason)s)"), cluster, ex)
cluster = g.change_cluster_status(
cluster, "Error", status_description=six.text_type(ex))
@ -97,7 +101,8 @@ class HeatEngine(e.Engine):
LOG.info(g.format_cluster_deleted_message(cluster))
return
self._log_operation_exception(
"Can't scale cluster '%s' (reason: %s)", cluster, ex)
_LW("Can't scale cluster '%(cluster)s' "
"(reason: %(reason)s)"), cluster, ex)
cluster = conductor.cluster_get(ctx, cluster)
@ -111,12 +116,13 @@ class HeatEngine(e.Engine):
# if something fails during the rollback, we stop
# doing anything further
cluster = g.change_cluster_status(cluster, "Error")
LOG.error("Unable to complete rollback, aborting")
LOG.error(_LE("Unable to complete rollback, aborting"))
raise
cluster = g.change_cluster_status(cluster, "Active")
LOG.warn(
"Rollback successful. Throwing off an initial exception.")
_LW("Rollback successful. "
"Throwing off an initial exception."))
finally:
cluster = conductor.cluster_get(ctx, cluster)
g.clean_cluster_from_empty_ng(cluster)
@ -141,7 +147,7 @@ class HeatEngine(e.Engine):
def _rollback_cluster_creation(self, cluster):
"""Shutdown all instances and update cluster status."""
LOG.info("Cluster '%s' creation rollback", cluster.name)
LOG.info(_LI("Cluster '%s' creation rollback"), cluster.name)
self.shutdown_cluster(cluster)
@ -156,7 +162,7 @@ class HeatEngine(e.Engine):
maximize the chance of rollback success.
"""
LOG.info("Cluster '%s' scaling rollback", cluster.name)
LOG.info(_LI("Cluster '%s' scaling rollback"), cluster.name)
for ng in rollback_count.keys():
if rollback_count[ng] > target_count[ng]:
@ -172,7 +178,7 @@ class HeatEngine(e.Engine):
stack = heat.get_stack(cluster.name)
heat.wait_stack_completion(stack)
except heat_exc.HTTPNotFound:
LOG.warn('Did not found stack for cluster %s' % cluster.name)
LOG.warn(_LW('Did not found stack for cluster %s') % cluster.name)
self._clean_job_executions(cluster)

View File

@ -18,6 +18,9 @@ from oslo.config import cfg
from sahara import conductor as c
from sahara import context
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.i18n import _LE
from sahara.i18n import _LW
from sahara.openstack.common import log as logging
from sahara.openstack.common import timeutils as tu
from sahara.utils.openstack import cinder
@ -56,7 +59,7 @@ def _await_attach_volumes(instance, devices):
timeout -= step
context.sleep(step)
raise ex.SystemError("Error attach volume to instance %s" %
raise ex.SystemError(_("Error attach volume to instance %s") %
instance.instance_name)
@ -85,7 +88,7 @@ def _create_attach_volume(ctx, instance, size, display_name=None):
while volume.status != 'available':
volume = cinder.get_volume(volume.id)
if volume.status == 'error':
raise ex.SystemError("Volume %s has error status" % volume.id)
raise ex.SystemError(_("Volume %s has error status") % volume.id)
context.sleep(1)
@ -142,7 +145,7 @@ def _mount_volume(instance, device_path, mount_point):
r.execute_command('sudo mkfs.ext4 %s' % device_path)
r.execute_command('sudo mount %s %s' % (device_path, mount_point))
except Exception:
LOG.error("Error mounting volume to instance %s" %
LOG.error(_LE("Error mounting volume to instance %s"),
instance.instance_id)
raise
@ -161,7 +164,7 @@ def _detach_volume(instance, volume_id):
nova.client().volumes.delete_server_volume(instance.instance_id,
volume_id)
except Exception:
LOG.exception("Can't detach volume %s" % volume.id)
LOG.exception(_LE("Can't detach volume %s"), volume.id)
detach_timeout = CONF.detach_volume_timeout
LOG.debug("Waiting %d seconds to detach %s volume" % (detach_timeout,
@ -175,8 +178,9 @@ def _detach_volume(instance, volume_id):
LOG.debug("Volume %s has been detached" % volume_id)
return
else:
LOG.warn("Can't detach volume %s. Current status of volume: %s" % (
volume_id, volume.status))
LOG.warn(_LW("Can't detach volume %(volume)s. "
"Current status of volume: %(status)s"),
{'volume': volume_id, 'status': volume.status})
def _delete_volume(volume_id):
@ -185,4 +189,4 @@ def _delete_volume(volume_id):
try:
volume.delete()
except Exception:
LOG.exception("Can't delete volume %s" % volume.id)
LOG.exception(_LE("Can't delete volume %s"), volume.id)

View File

@ -19,6 +19,7 @@ import paramiko
import six
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.openstack.common import processutils
from sahara.utils import tempfiles
@ -47,11 +48,11 @@ def generate_key_pair(key_length=2048):
args.extend(['-b', key_length])
processutils.execute(*args)
if not os.path.exists(keyfile):
raise ex.SystemError("Private key file hasn't been created")
raise ex.SystemError(_("Private key file hasn't been created"))
private_key = open(keyfile).read()
public_key_path = keyfile + '.pub'
if not os.path.exists(public_key_path):
raise ex.SystemError("Public key file hasn't been created")
raise ex.SystemError(_("Public key file hasn't been created"))
public_key = open(public_key_path).read()
return private_key, public_key

View File

@ -17,9 +17,9 @@ import six
from sahara import conductor as c
from sahara import context
from sahara.i18n import _LI
from sahara.openstack.common import log as logging
conductor = c.API
LOG = logging.getLogger(__name__)
@ -69,16 +69,20 @@ def change_cluster_status(cluster, status, status_description=None):
update_dict["status_description"] = status_description
cluster = conductor.cluster_update(context.ctx(), cluster, update_dict)
LOG.info("Cluster status has been changed: id=%s, New status=%s",
cluster.id, cluster.status)
LOG.info(_LI("Cluster status has been changed: id=%(id)s, New status="
"%(status)s"), {'id': cluster.id, 'name': cluster.status})
return cluster
def format_cluster_deleted_message(cluster):
msg = "Cluster %s (id=%s) was deleted. Canceling current operation."
msg = _LI("Cluster %(name)s (id=%(id)s) was deleted. "
"Canceling current operation.")
if cluster:
return msg % (cluster.name, cluster.id)
return msg % ("Unknown", "Unknown")
return (msg, {'name': cluster.name,
'id': cluster.id})
return (msg, {'name': _LI("Unknown"),
'id': _LI("Unknown")})
def check_cluster_exists(cluster):

View File

@ -20,6 +20,7 @@ from six.moves.urllib import parse as urlparse
from sahara import context
from sahara import exceptions as ex
from sahara.i18n import _
CONF = cfg.CONF
@ -41,12 +42,14 @@ def url_for(service_catalog, service_type, admin=False, endpoint_type=None):
return _get_endpoint_url(endpoints, endpoint_type)
except Exception:
raise ex.SystemError(
"Endpoint with type %s is not found for service %s"
% (endpoint_type, service_type))
_("Endpoint with type %(type)s is not found for service "
"%(service)s")
% {'type': endpoint_type,
'service': service_type})
else:
raise ex.SystemError('Service "%s" not found in service catalog'
% service_type)
raise ex.SystemError(
_('Service "%s" not found in service catalog') % service_type)
def _get_service_from_catalog(catalog, service_type):

View File

@ -23,6 +23,7 @@ from requests import adapters
from sahara import context
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.openstack.common import log as logging
from sahara.utils.openstack import base
@ -74,8 +75,8 @@ class NeutronClientRemoteWrapper():
break
if not matching_router:
raise ex.SystemError('Neutron router corresponding to network {0} '
'is not found'.format(self.network))
raise ex.SystemError(_('Neutron router corresponding to network '
'%s is not found') % self.network)
return matching_router['id']
@ -181,7 +182,7 @@ class NetcatSocket:
return self.process.stdout
if mode.startswith('w'):
return self.process.stdin
raise ex.IncorrectStateError("Unknown file mode %s" % mode)
raise ex.IncorrectStateError(_("Unknown file mode %s") % mode)
def recv(self, size):
try:

View File

@ -20,7 +20,7 @@ from oslo.config import cfg
import six
from sahara import exceptions as ex
from sahara.i18n import _
# These options are for SSH remote only
ssh_opts = [
@ -120,10 +120,10 @@ def setup_remote(driver, engine):
def _check_driver_is_loaded():
if not DRIVER:
raise ex.SystemError('Remote driver is not loaded. Most probably you '
'see this error because you are running Sahara '
'in distributed mode and it is broken. Try '
'running sahara-all instead.')
raise ex.SystemError(_('Remote driver is not loaded. Most probably '
'you see this error because you are running '
'Sahara in distributed mode and it is broken.'
'Try running sahara-all instead.'))
def get_remote(instance):

View File

@ -18,6 +18,7 @@ import shutil
import tempfile
from sahara import exceptions as ex
from sahara.i18n import _
@contextlib.contextmanager
@ -32,5 +33,6 @@ def tempdir(**kwargs):
try:
shutil.rmtree(tmpdir)
except OSError as e:
raise ex.SystemError("Failed to delete temp dir %s (reason: %s)" %
(tmpdir, e))
raise ex.SystemError(
_("Failed to delete temp dir %(dir)s (reason: %(reason)s)") %
{'dir': tmpdir, 'reason': e})