Merge "Add translation support to service and utils modules"

This commit is contained in:
Jenkins 2014-07-30 14:05:10 +00:00 committed by Gerrit Code Review
commit e23efe5471
11 changed files with 99 additions and 62 deletions

View File

@ -20,6 +20,9 @@ import six
from sahara import conductor as c from sahara import conductor as c
from sahara import context from sahara import context
from sahara import exceptions as exc from sahara import exceptions as exc
from sahara.i18n import _
from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.openstack.common import excutils from sahara.openstack.common import excutils
from sahara.openstack.common import log as logging from sahara.openstack.common import log as logging
from sahara.service import engine as e from sahara.service import engine as e
@ -76,7 +79,8 @@ class DirectEngine(e.Engine):
return return
self._log_operation_exception( self._log_operation_exception(
"Can't start cluster '%s' (reason: %s)", cluster, ex) _LW("Can't start cluster '%(cluster)s' "
"(reason: %(reason)s)"), cluster, ex)
cluster = g.change_cluster_status( cluster = g.change_cluster_status(
cluster, "Error", status_description=six.text_type(ex)) cluster, "Error", status_description=six.text_type(ex))
@ -122,7 +126,8 @@ class DirectEngine(e.Engine):
return [] return []
self._log_operation_exception( self._log_operation_exception(
"Can't scale cluster '%s' (reason: %s)", cluster, ex) _LW("Can't scale cluster '%(cluster)s' "
"(reason: %(reason)s)"), cluster, ex)
cluster = conductor.cluster_get(ctx, cluster) cluster = conductor.cluster_get(ctx, cluster)
self._rollback_cluster_scaling( self._rollback_cluster_scaling(
@ -272,27 +277,29 @@ class DirectEngine(e.Engine):
context.sleep(1) context.sleep(1)
LOG.info("Cluster '%s': all instances are active" % cluster.id) LOG.info(_LI("Cluster '%s': all instances are active"), cluster.id)
def _check_if_active(self, instance): def _check_if_active(self, instance):
server = nova.get_instance_info(instance) server = nova.get_instance_info(instance)
if server.status == 'ERROR': if server.status == 'ERROR':
raise exc.SystemError("Node %s has error status" % server.name) raise exc.SystemError(_("Node %s has error status") % server.name)
return server.status == 'ACTIVE' return server.status == 'ACTIVE'
def _rollback_cluster_creation(self, cluster, ex): def _rollback_cluster_creation(self, cluster, ex):
"""Shutdown all instances and update cluster status.""" """Shutdown all instances and update cluster status."""
LOG.info("Cluster '%s' creation rollback (reason: %s)", LOG.info(_LI("Cluster '%(name)s' creation rollback "
cluster.name, ex) "(reason: %(reason)s)"),
{'name': cluster.name, 'reason': ex})
self.shutdown_cluster(cluster) self.shutdown_cluster(cluster)
def _rollback_cluster_scaling(self, cluster, instances, ex): def _rollback_cluster_scaling(self, cluster, instances, ex):
"""Attempt to rollback cluster scaling.""" """Attempt to rollback cluster scaling."""
LOG.info("Cluster '%s' scaling rollback (reason: %s)", LOG.info(_LI("Cluster '%(name)s' scaling rollback "
cluster.name, ex) "(reason: %(reason)s)"),
{'name': cluster.name, 'reason': ex})
for i in instances: for i in instances:
self._shutdown_instance(i) self._shutdown_instance(i)
@ -309,21 +316,21 @@ class DirectEngine(e.Engine):
try: try:
networks.delete_floating_ip(instance.instance_id) networks.delete_floating_ip(instance.instance_id)
except nova_exceptions.NotFound: except nova_exceptions.NotFound:
LOG.warn("Attempted to delete non-existent floating IP in " LOG.warn(_LW("Attempted to delete non-existent floating IP in "
"pool %s from instancie %s", "pool %(pool)s from instance %(instance)s"),
instance.node_group.floating_ip_pool, {'pool': instance.node_group.floating_ip_pool,
instance.instance_id) 'instance': instance.instance_id})
try: try:
volumes.detach_from_instance(instance) volumes.detach_from_instance(instance)
except Exception: except Exception:
LOG.warn("Detaching volumes from instance %s failed", LOG.warn(_LW("Detaching volumes from instance %s failed"),
instance.instance_id) instance.instance_id)
try: try:
nova.client().servers.delete(instance.instance_id) nova.client().servers.delete(instance.instance_id)
except nova_exceptions.NotFound: except nova_exceptions.NotFound:
LOG.warn("Attempted to delete non-existent instance %s", LOG.warn(_LW("Attempted to delete non-existent instance %s"),
instance.instance_id) instance.instance_id)
conductor.instance_remove(ctx, instance) conductor.instance_remove(ctx, instance)

View File

@ -20,6 +20,8 @@ from oslo.config import cfg
from sahara import conductor as c from sahara import conductor as c
from sahara import context from sahara import context
from sahara import exceptions as e from sahara import exceptions as e
from sahara.i18n import _
from sahara.i18n import _LE
from sahara.openstack.common import log from sahara.openstack.common import log
from sahara.service.edp import job_utils from sahara.service.edp import job_utils
from sahara.service.edp.oozie import engine as oozie_engine from sahara.service.edp.oozie import engine as oozie_engine
@ -97,7 +99,7 @@ def _run_job(job_execution_id):
eng = _get_job_engine(cluster, job_execution) eng = _get_job_engine(cluster, job_execution)
if eng is None: if eng is None:
raise e.EDPError("Cluster does not support job type %s" raise e.EDPError(_("Cluster does not support job type %s")
% _get_job_type(job_execution)) % _get_job_type(job_execution))
job_execution = _update_job_execution_extra(cluster, job_execution) job_execution = _update_job_execution_extra(cluster, job_execution)
jid = eng.run_job(job_execution) jid = eng.run_job(job_execution)
@ -111,8 +113,9 @@ def run_job(job_execution_id):
try: try:
_run_job(job_execution_id) _run_job(job_execution_id)
except Exception as ex: except Exception as ex:
LOG.exception("Can't run job execution '%s' (reason: %s)", LOG.exception(
job_execution_id, ex) _LE("Can't run job execution '%(job)s' (reason: %(reason)s)"),
{'job': job_execution_id, 'reason': ex})
conductor.job_execution_update( conductor.job_execution_update(
context.ctx(), job_execution_id, context.ctx(), job_execution_id,
@ -131,8 +134,9 @@ def cancel_job(job_execution_id):
try: try:
engine.cancel_job(job_execution) engine.cancel_job(job_execution)
except Exception as e: except Exception as e:
LOG.exception("Error during cancel of job execution %s: %s" % LOG.exception(
(job_execution.id, e)) _LE("Error during cancel of job execution %(job)s: "
"%(error)s"), {'job': job_execution.id, 'error': e})
job_execution = _update_job_status(engine, job_execution) job_execution = _update_job_status(engine, job_execution)
return job_execution return job_execution
@ -155,8 +159,9 @@ def update_job_statuses():
try: try:
get_job_status(je.id) get_job_status(je.id)
except Exception as e: except Exception as e:
LOG.exception("Error during update job execution %s: %s" % LOG.exception(
(je.id, e)) _LE("Error during update job execution %(job)s: %(error)s"),
{'job': je.id, 'error': e})
def get_job_config_hints(job_type): def get_job_config_hints(job_type):

View File

@ -22,6 +22,8 @@ import six
from sahara import conductor as c from sahara import conductor as c
from sahara import context from sahara import context
from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.openstack.common import log as logging from sahara.openstack.common import log as logging
from sahara.service import networks from sahara.service import networks
from sahara.utils import general as g from sahara.utils import general as g
@ -66,7 +68,8 @@ class Engine:
context.sleep(1) context.sleep(1)
LOG.info("Cluster '%s': all instances have IPs assigned" % cluster.id) LOG.info(
_LI("Cluster '%s': all instances have IPs assigned"), cluster.id)
cluster = conductor.cluster_get(context.ctx(), cluster) cluster = conductor.cluster_get(context.ctx(), cluster)
instances = g.get_instances(cluster, ips_assigned) instances = g.get_instances(cluster, ips_assigned)
@ -76,7 +79,7 @@ class Engine:
tg.spawn("wait-for-ssh-%s" % instance.instance_name, tg.spawn("wait-for-ssh-%s" % instance.instance_name,
self._wait_until_accessible, instance) self._wait_until_accessible, instance)
LOG.info("Cluster '%s': all instances are accessible" % cluster.id) LOG.info(_LI("Cluster '%s': all instances are accessible"), cluster.id)
def _wait_until_accessible(self, instance): def _wait_until_accessible(self, instance):
while True: while True:
@ -159,7 +162,8 @@ echo "${public_key}" >> ${user_home}/.ssh/authorized_keys\n
def _log_operation_exception(self, message, cluster, ex): def _log_operation_exception(self, message, cluster, ex):
# we want to log the initial exception even if cluster was deleted # we want to log the initial exception even if cluster was deleted
cluster_name = cluster.name if cluster is not None else '_unknown_' cluster_name = cluster.name if cluster is not None else '_unknown_'
LOG.warn(message, cluster_name, ex) LOG.warn(message, {'cluster': cluster_name, 'reason': ex})
if cluster is None: if cluster is None:
LOG.warn("Presumably the operation failed because the cluster was" LOG.warn(
"deleted by a user during the process.") _LW("Presumably the operation failed because the cluster was "
"deleted by a user during the process."))

View File

@ -19,6 +19,9 @@ import six
from sahara import conductor as c from sahara import conductor as c
from sahara import context from sahara import context
from sahara.i18n import _LE
from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.openstack.common import excutils from sahara.openstack.common import excutils
from sahara.openstack.common import log as logging from sahara.openstack.common import log as logging
from sahara.service import engine as e from sahara.service import engine as e
@ -64,7 +67,8 @@ class HeatEngine(e.Engine):
LOG.info(g.format_cluster_deleted_message(cluster)) LOG.info(g.format_cluster_deleted_message(cluster))
return return
self._log_operation_exception( self._log_operation_exception(
"Can't start cluster '%s' (reason: %s)", cluster, ex) _LW("Can't start cluster '%(cluster)s' "
"(reason: %(reason)s)"), cluster, ex)
cluster = g.change_cluster_status( cluster = g.change_cluster_status(
cluster, "Error", status_description=six.text_type(ex)) cluster, "Error", status_description=six.text_type(ex))
@ -97,7 +101,8 @@ class HeatEngine(e.Engine):
LOG.info(g.format_cluster_deleted_message(cluster)) LOG.info(g.format_cluster_deleted_message(cluster))
return return
self._log_operation_exception( self._log_operation_exception(
"Can't scale cluster '%s' (reason: %s)", cluster, ex) _LW("Can't scale cluster '%(cluster)s' "
"(reason: %(reason)s)"), cluster, ex)
cluster = conductor.cluster_get(ctx, cluster) cluster = conductor.cluster_get(ctx, cluster)
@ -111,12 +116,13 @@ class HeatEngine(e.Engine):
# if something fails during the rollback, we stop # if something fails during the rollback, we stop
# doing anything further # doing anything further
cluster = g.change_cluster_status(cluster, "Error") cluster = g.change_cluster_status(cluster, "Error")
LOG.error("Unable to complete rollback, aborting") LOG.error(_LE("Unable to complete rollback, aborting"))
raise raise
cluster = g.change_cluster_status(cluster, "Active") cluster = g.change_cluster_status(cluster, "Active")
LOG.warn( LOG.warn(
"Rollback successful. Throwing off an initial exception.") _LW("Rollback successful. "
"Throwing off an initial exception."))
finally: finally:
cluster = conductor.cluster_get(ctx, cluster) cluster = conductor.cluster_get(ctx, cluster)
g.clean_cluster_from_empty_ng(cluster) g.clean_cluster_from_empty_ng(cluster)
@ -141,7 +147,7 @@ class HeatEngine(e.Engine):
def _rollback_cluster_creation(self, cluster): def _rollback_cluster_creation(self, cluster):
"""Shutdown all instances and update cluster status.""" """Shutdown all instances and update cluster status."""
LOG.info("Cluster '%s' creation rollback", cluster.name) LOG.info(_LI("Cluster '%s' creation rollback"), cluster.name)
self.shutdown_cluster(cluster) self.shutdown_cluster(cluster)
@ -156,7 +162,7 @@ class HeatEngine(e.Engine):
maximize the chance of rollback success. maximize the chance of rollback success.
""" """
LOG.info("Cluster '%s' scaling rollback", cluster.name) LOG.info(_LI("Cluster '%s' scaling rollback"), cluster.name)
for ng in rollback_count.keys(): for ng in rollback_count.keys():
if rollback_count[ng] > target_count[ng]: if rollback_count[ng] > target_count[ng]:
@ -172,7 +178,7 @@ class HeatEngine(e.Engine):
stack = heat.get_stack(cluster.name) stack = heat.get_stack(cluster.name)
heat.wait_stack_completion(stack) heat.wait_stack_completion(stack)
except heat_exc.HTTPNotFound: except heat_exc.HTTPNotFound:
LOG.warn('Did not found stack for cluster %s' % cluster.name) LOG.warn(_LW('Did not found stack for cluster %s') % cluster.name)
self._clean_job_executions(cluster) self._clean_job_executions(cluster)

View File

@ -18,6 +18,9 @@ from oslo.config import cfg
from sahara import conductor as c from sahara import conductor as c
from sahara import context from sahara import context
from sahara import exceptions as ex from sahara import exceptions as ex
from sahara.i18n import _
from sahara.i18n import _LE
from sahara.i18n import _LW
from sahara.openstack.common import log as logging from sahara.openstack.common import log as logging
from sahara.openstack.common import timeutils as tu from sahara.openstack.common import timeutils as tu
from sahara.utils.openstack import cinder from sahara.utils.openstack import cinder
@ -56,7 +59,7 @@ def _await_attach_volumes(instance, devices):
timeout -= step timeout -= step
context.sleep(step) context.sleep(step)
raise ex.SystemError("Error attach volume to instance %s" % raise ex.SystemError(_("Error attach volume to instance %s") %
instance.instance_name) instance.instance_name)
@ -85,7 +88,7 @@ def _create_attach_volume(ctx, instance, size, display_name=None):
while volume.status != 'available': while volume.status != 'available':
volume = cinder.get_volume(volume.id) volume = cinder.get_volume(volume.id)
if volume.status == 'error': if volume.status == 'error':
raise ex.SystemError("Volume %s has error status" % volume.id) raise ex.SystemError(_("Volume %s has error status") % volume.id)
context.sleep(1) context.sleep(1)
@ -142,7 +145,7 @@ def _mount_volume(instance, device_path, mount_point):
r.execute_command('sudo mkfs.ext4 %s' % device_path) r.execute_command('sudo mkfs.ext4 %s' % device_path)
r.execute_command('sudo mount %s %s' % (device_path, mount_point)) r.execute_command('sudo mount %s %s' % (device_path, mount_point))
except Exception: except Exception:
LOG.error("Error mounting volume to instance %s" % LOG.error(_LE("Error mounting volume to instance %s"),
instance.instance_id) instance.instance_id)
raise raise
@ -161,7 +164,7 @@ def _detach_volume(instance, volume_id):
nova.client().volumes.delete_server_volume(instance.instance_id, nova.client().volumes.delete_server_volume(instance.instance_id,
volume_id) volume_id)
except Exception: except Exception:
LOG.exception("Can't detach volume %s" % volume.id) LOG.exception(_LE("Can't detach volume %s"), volume.id)
detach_timeout = CONF.detach_volume_timeout detach_timeout = CONF.detach_volume_timeout
LOG.debug("Waiting %d seconds to detach %s volume" % (detach_timeout, LOG.debug("Waiting %d seconds to detach %s volume" % (detach_timeout,
@ -175,8 +178,9 @@ def _detach_volume(instance, volume_id):
LOG.debug("Volume %s has been detached" % volume_id) LOG.debug("Volume %s has been detached" % volume_id)
return return
else: else:
LOG.warn("Can't detach volume %s. Current status of volume: %s" % ( LOG.warn(_LW("Can't detach volume %(volume)s. "
volume_id, volume.status)) "Current status of volume: %(status)s"),
{'volume': volume_id, 'status': volume.status})
def _delete_volume(volume_id): def _delete_volume(volume_id):
@ -185,4 +189,4 @@ def _delete_volume(volume_id):
try: try:
volume.delete() volume.delete()
except Exception: except Exception:
LOG.exception("Can't delete volume %s" % volume.id) LOG.exception(_LE("Can't delete volume %s"), volume.id)

View File

@ -19,6 +19,7 @@ import paramiko
import six import six
from sahara import exceptions as ex from sahara import exceptions as ex
from sahara.i18n import _
from sahara.openstack.common import processutils from sahara.openstack.common import processutils
from sahara.utils import tempfiles from sahara.utils import tempfiles
@ -47,11 +48,11 @@ def generate_key_pair(key_length=2048):
args.extend(['-b', key_length]) args.extend(['-b', key_length])
processutils.execute(*args) processutils.execute(*args)
if not os.path.exists(keyfile): if not os.path.exists(keyfile):
raise ex.SystemError("Private key file hasn't been created") raise ex.SystemError(_("Private key file hasn't been created"))
private_key = open(keyfile).read() private_key = open(keyfile).read()
public_key_path = keyfile + '.pub' public_key_path = keyfile + '.pub'
if not os.path.exists(public_key_path): if not os.path.exists(public_key_path):
raise ex.SystemError("Public key file hasn't been created") raise ex.SystemError(_("Public key file hasn't been created"))
public_key = open(public_key_path).read() public_key = open(public_key_path).read()
return private_key, public_key return private_key, public_key

View File

@ -17,9 +17,9 @@ import six
from sahara import conductor as c from sahara import conductor as c
from sahara import context from sahara import context
from sahara.i18n import _LI
from sahara.openstack.common import log as logging from sahara.openstack.common import log as logging
conductor = c.API conductor = c.API
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -69,16 +69,20 @@ def change_cluster_status(cluster, status, status_description=None):
update_dict["status_description"] = status_description update_dict["status_description"] = status_description
cluster = conductor.cluster_update(context.ctx(), cluster, update_dict) cluster = conductor.cluster_update(context.ctx(), cluster, update_dict)
LOG.info("Cluster status has been changed: id=%s, New status=%s", LOG.info(_LI("Cluster status has been changed: id=%(id)s, New status="
cluster.id, cluster.status) "%(status)s"), {'id': cluster.id, 'name': cluster.status})
return cluster return cluster
def format_cluster_deleted_message(cluster): def format_cluster_deleted_message(cluster):
msg = "Cluster %s (id=%s) was deleted. Canceling current operation." msg = _LI("Cluster %(name)s (id=%(id)s) was deleted. "
"Canceling current operation.")
if cluster: if cluster:
return msg % (cluster.name, cluster.id) return (msg, {'name': cluster.name,
return msg % ("Unknown", "Unknown") 'id': cluster.id})
return (msg, {'name': _LI("Unknown"),
'id': _LI("Unknown")})
def check_cluster_exists(cluster): def check_cluster_exists(cluster):

View File

@ -20,6 +20,7 @@ from six.moves.urllib import parse as urlparse
from sahara import context from sahara import context
from sahara import exceptions as ex from sahara import exceptions as ex
from sahara.i18n import _
CONF = cfg.CONF CONF = cfg.CONF
@ -41,12 +42,14 @@ def url_for(service_catalog, service_type, admin=False, endpoint_type=None):
return _get_endpoint_url(endpoints, endpoint_type) return _get_endpoint_url(endpoints, endpoint_type)
except Exception: except Exception:
raise ex.SystemError( raise ex.SystemError(
"Endpoint with type %s is not found for service %s" _("Endpoint with type %(type)s is not found for service "
% (endpoint_type, service_type)) "%(service)s")
% {'type': endpoint_type,
'service': service_type})
else: else:
raise ex.SystemError('Service "%s" not found in service catalog' raise ex.SystemError(
% service_type) _('Service "%s" not found in service catalog') % service_type)
def _get_service_from_catalog(catalog, service_type): def _get_service_from_catalog(catalog, service_type):

View File

@ -23,6 +23,7 @@ from requests import adapters
from sahara import context from sahara import context
from sahara import exceptions as ex from sahara import exceptions as ex
from sahara.i18n import _
from sahara.openstack.common import log as logging from sahara.openstack.common import log as logging
from sahara.utils.openstack import base from sahara.utils.openstack import base
@ -74,8 +75,8 @@ class NeutronClientRemoteWrapper():
break break
if not matching_router: if not matching_router:
raise ex.SystemError('Neutron router corresponding to network {0} ' raise ex.SystemError(_('Neutron router corresponding to network '
'is not found'.format(self.network)) '%s is not found') % self.network)
return matching_router['id'] return matching_router['id']
@ -181,7 +182,7 @@ class NetcatSocket:
return self.process.stdout return self.process.stdout
if mode.startswith('w'): if mode.startswith('w'):
return self.process.stdin return self.process.stdin
raise ex.IncorrectStateError("Unknown file mode %s" % mode) raise ex.IncorrectStateError(_("Unknown file mode %s") % mode)
def recv(self, size): def recv(self, size):
try: try:

View File

@ -20,7 +20,7 @@ from oslo.config import cfg
import six import six
from sahara import exceptions as ex from sahara import exceptions as ex
from sahara.i18n import _
# These options are for SSH remote only # These options are for SSH remote only
ssh_opts = [ ssh_opts = [
@ -120,10 +120,10 @@ def setup_remote(driver, engine):
def _check_driver_is_loaded(): def _check_driver_is_loaded():
if not DRIVER: if not DRIVER:
raise ex.SystemError('Remote driver is not loaded. Most probably you ' raise ex.SystemError(_('Remote driver is not loaded. Most probably '
'see this error because you are running Sahara ' 'you see this error because you are running '
'in distributed mode and it is broken. Try ' 'Sahara in distributed mode and it is broken.'
'running sahara-all instead.') 'Try running sahara-all instead.'))
def get_remote(instance): def get_remote(instance):

View File

@ -18,6 +18,7 @@ import shutil
import tempfile import tempfile
from sahara import exceptions as ex from sahara import exceptions as ex
from sahara.i18n import _
@contextlib.contextmanager @contextlib.contextmanager
@ -32,5 +33,6 @@ def tempdir(**kwargs):
try: try:
shutil.rmtree(tmpdir) shutil.rmtree(tmpdir)
except OSError as e: except OSError as e:
raise ex.SystemError("Failed to delete temp dir %s (reason: %s)" % raise ex.SystemError(
(tmpdir, e)) _("Failed to delete temp dir %(dir)s (reason: %(reason)s)") %
{'dir': tmpdir, 'reason': e})