Merge "Add _() to fix i18N compliance (part 2)"
This commit is contained in:
commit
186b5a471b
@ -17,6 +17,7 @@ from heat.openstack.common import timeutils
|
||||
from heat.engine import template
|
||||
|
||||
from heat.openstack.common import log as logging
|
||||
from heat.openstack.common.gettextutils import _
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -31,7 +32,7 @@ def extract_args(params):
|
||||
try:
|
||||
timeout_mins = int(params.get(api.PARAM_TIMEOUT, 0))
|
||||
except (ValueError, TypeError):
|
||||
logger.exception('create timeout conversion')
|
||||
logger.exception(_('create timeout conversion'))
|
||||
else:
|
||||
if timeout_mins > 0:
|
||||
kwargs[api.PARAM_TIMEOUT] = timeout_mins
|
||||
@ -205,7 +206,7 @@ def format_watch_data(wd):
|
||||
if len(metric) == 1:
|
||||
metric_name, metric_data = metric[0]
|
||||
else:
|
||||
logger.error("Unexpected number of keys in watch_data.data!")
|
||||
logger.error(_("Unexpected number of keys in watch_data.data!"))
|
||||
return
|
||||
|
||||
result = {
|
||||
|
@ -17,6 +17,7 @@ from oslo.config import cfg
|
||||
|
||||
from heat.openstack.common import importutils
|
||||
from heat.openstack.common import log as logging
|
||||
from heat.openstack.common.gettextutils import _
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -28,23 +29,23 @@ try:
|
||||
from swiftclient import client as swiftclient
|
||||
except ImportError:
|
||||
swiftclient = None
|
||||
logger.info('swiftclient not available')
|
||||
logger.info(_('swiftclient not available'))
|
||||
try:
|
||||
from neutronclient.v2_0 import client as neutronclient
|
||||
except ImportError:
|
||||
neutronclient = None
|
||||
logger.info('neutronclient not available')
|
||||
logger.info(_('neutronclient not available'))
|
||||
try:
|
||||
from cinderclient import client as cinderclient
|
||||
except ImportError:
|
||||
cinderclient = None
|
||||
logger.info('cinderclient not available')
|
||||
logger.info(_('cinderclient not available'))
|
||||
|
||||
try:
|
||||
from ceilometerclient.v2 import client as ceilometerclient
|
||||
except ImportError:
|
||||
ceilometerclient = None
|
||||
logger.info('ceilometerclient not available')
|
||||
logger.info(_('ceilometerclient not available'))
|
||||
|
||||
_default_backend = "heat.engine.clients.OpenStackClients"
|
||||
|
||||
@ -91,7 +92,7 @@ class OpenStackClients(object):
|
||||
|
||||
con = self.context
|
||||
if self.auth_token is None:
|
||||
logger.error("Nova connection failed, no auth_token!")
|
||||
logger.error(_("Nova connection failed, no auth_token!"))
|
||||
return None
|
||||
|
||||
computeshell = novashell.OpenStackComputeShell()
|
||||
@ -123,7 +124,7 @@ class OpenStackClients(object):
|
||||
|
||||
con = self.context
|
||||
if self.auth_token is None:
|
||||
logger.error("Swift connection failed, no auth_token!")
|
||||
logger.error(_("Swift connection failed, no auth_token!"))
|
||||
return None
|
||||
|
||||
args = {
|
||||
@ -146,7 +147,7 @@ class OpenStackClients(object):
|
||||
|
||||
con = self.context
|
||||
if self.auth_token is None:
|
||||
logger.error("Neutron connection failed, no auth_token!")
|
||||
logger.error(_("Neutron connection failed, no auth_token!"))
|
||||
return None
|
||||
|
||||
args = {
|
||||
@ -168,7 +169,7 @@ class OpenStackClients(object):
|
||||
|
||||
con = self.context
|
||||
if self.auth_token is None:
|
||||
logger.error("Cinder connection failed, no auth_token!")
|
||||
logger.error(_("Cinder connection failed, no auth_token!"))
|
||||
return None
|
||||
|
||||
args = {
|
||||
@ -193,7 +194,7 @@ class OpenStackClients(object):
|
||||
return self._ceilometer
|
||||
|
||||
if self.auth_token is None:
|
||||
logger.error("Ceilometer connection failed, no auth_token!")
|
||||
logger.error(_("Ceilometer connection failed, no auth_token!"))
|
||||
return None
|
||||
con = self.context
|
||||
args = {
|
||||
|
@ -16,6 +16,7 @@
|
||||
import itertools
|
||||
|
||||
from heat.openstack.common import log
|
||||
from heat.openstack.common.gettextutils import _
|
||||
from heat.common import exception
|
||||
|
||||
|
||||
@ -275,7 +276,7 @@ class ResourceRegistry(object):
|
||||
info = self.get_resource_info(resource_type,
|
||||
resource_name=resource_name)
|
||||
if info is None:
|
||||
msg = "Unknown resource Type : %s" % resource_type
|
||||
msg = _("Unknown resource Type : %s") % resource_type
|
||||
raise exception.StackValidationFailed(message=msg)
|
||||
return info.get_class()
|
||||
|
||||
|
@ -17,6 +17,7 @@ from heat.db import api as db_api
|
||||
from heat.common import exception
|
||||
from heat.common import identifier
|
||||
from heat.openstack.common import log as logging
|
||||
from heat.openstack.common.gettextutils import _
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -55,7 +56,7 @@ class Event(object):
|
||||
ev = event if event is not None else\
|
||||
db_api.event_get(context, event_id)
|
||||
if ev is None:
|
||||
message = 'No event exists with id "%s"' % str(event_id)
|
||||
message = _('No event exists with id "%s"') % str(event_id)
|
||||
raise exception.NotFound(message)
|
||||
|
||||
st = stack if stack is not None else\
|
||||
@ -83,7 +84,7 @@ class Event(object):
|
||||
ev['created_at'] = self.timestamp
|
||||
|
||||
if self.id is not None:
|
||||
logger.warning('Duplicating event')
|
||||
logger.warning(_('Duplicating event'))
|
||||
|
||||
new_ev = db_api.event_create(self.context, ev)
|
||||
self.id = new_ev.id
|
||||
|
@ -161,7 +161,7 @@ class Stack(collections.Mapping):
|
||||
try:
|
||||
stack_arn = self.identifier().arn()
|
||||
except (AttributeError, ValueError, TypeError):
|
||||
logger.warning("Unable to set parameters StackId identifier")
|
||||
logger.warning(_("Unable to set parameters StackId identifier"))
|
||||
else:
|
||||
self.parameters.set_stack_id(stack_arn)
|
||||
|
||||
@ -306,7 +306,7 @@ class Stack(collections.Mapping):
|
||||
dup_names = set(self.parameters.keys()) & set(self.keys())
|
||||
|
||||
if dup_names:
|
||||
logger.debug("Duplicate names %s" % dup_names)
|
||||
logger.debug(_("Duplicate names %s") % dup_names)
|
||||
raise StackValidationFailed(message=_("Duplicate names %s") %
|
||||
dup_names)
|
||||
|
||||
@ -427,13 +427,13 @@ class Stack(collections.Mapping):
|
||||
s = db_api.stack_get_by_name(self.context, self._backup_name(),
|
||||
owner_id=self.id)
|
||||
if s is not None:
|
||||
logger.debug('Loaded existing backup stack')
|
||||
logger.debug(_('Loaded existing backup stack'))
|
||||
return self.load(self.context, stack=s)
|
||||
elif create_if_missing:
|
||||
prev = type(self)(self.context, self.name, self.t, self.env,
|
||||
owner_id=self.id)
|
||||
prev.store(backup=True)
|
||||
logger.debug('Created new backup stack')
|
||||
logger.debug(_('Created new backup stack'))
|
||||
return prev
|
||||
else:
|
||||
return None
|
||||
@ -456,7 +456,7 @@ class Stack(collections.Mapping):
|
||||
@scheduler.wrappertask
|
||||
def update_task(self, newstack, action=UPDATE):
|
||||
if action not in (self.UPDATE, self.ROLLBACK):
|
||||
logger.error("Unexpected action %s passed to update!" % action)
|
||||
logger.error(_("Unexpected action %s passed to update!") % action)
|
||||
self.state_set(self.UPDATE, self.FAILED,
|
||||
"Invalid action %s" % action)
|
||||
return
|
||||
@ -464,7 +464,7 @@ class Stack(collections.Mapping):
|
||||
if self.status != self.COMPLETE:
|
||||
if (action == self.ROLLBACK and
|
||||
self.state == (self.UPDATE, self.IN_PROGRESS)):
|
||||
logger.debug("Starting update rollback for %s" % self.name)
|
||||
logger.debug(_("Starting update rollback for %s") % self.name)
|
||||
else:
|
||||
self.state_set(action, self.FAILED,
|
||||
'State invalid for %s' % action)
|
||||
@ -512,7 +512,7 @@ class Stack(collections.Mapping):
|
||||
yield self.update_task(oldstack, action=self.ROLLBACK)
|
||||
return
|
||||
else:
|
||||
logger.debug('Deleting backup stack')
|
||||
logger.debug(_('Deleting backup stack'))
|
||||
backup_stack.delete()
|
||||
|
||||
self.state_set(action, stack_status, reason)
|
||||
@ -535,7 +535,7 @@ class Stack(collections.Mapping):
|
||||
differently.
|
||||
'''
|
||||
if action not in (self.DELETE, self.ROLLBACK):
|
||||
logger.error("Unexpected action %s passed to delete!" % action)
|
||||
logger.error(_("Unexpected action %s passed to delete!") % action)
|
||||
self.state_set(self.DELETE, self.FAILED,
|
||||
"Invalid action %s" % action)
|
||||
return
|
||||
@ -626,7 +626,7 @@ class Stack(collections.Mapping):
|
||||
scheduler.TaskRunner(res.destroy)()
|
||||
except exception.ResourceFailure as ex:
|
||||
failed = True
|
||||
logger.error('delete: %s' % str(ex))
|
||||
logger.error(_('delete: %s') % str(ex))
|
||||
|
||||
for res in deps:
|
||||
if not failed:
|
||||
@ -634,7 +634,7 @@ class Stack(collections.Mapping):
|
||||
res.state_reset()
|
||||
scheduler.TaskRunner(res.create)()
|
||||
except exception.ResourceFailure as ex:
|
||||
logger.exception('create')
|
||||
logger.exception(_('create'))
|
||||
failed = True
|
||||
else:
|
||||
res.state_set(res.CREATE, res.FAILED,
|
||||
|
@ -384,7 +384,7 @@ class Resource(object):
|
||||
self.state_set(action, self.FAILED,
|
||||
'%s aborted' % action)
|
||||
except Exception:
|
||||
logger.exception('Error marking resource as failed')
|
||||
logger.exception(_('Error marking resource as failed'))
|
||||
else:
|
||||
self.state_set(action, self.COMPLETE)
|
||||
|
||||
@ -395,7 +395,7 @@ class Resource(object):
|
||||
'''
|
||||
action = self.CREATE
|
||||
if (self.action, self.status) != (self.INIT, self.COMPLETE):
|
||||
exc = exception.Error('State %s invalid for create'
|
||||
exc = exception.Error(_('State %s invalid for create')
|
||||
% str(self.state))
|
||||
raise exception.ResourceFailure(exc, self, action)
|
||||
|
||||
@ -424,7 +424,7 @@ class Resource(object):
|
||||
|
||||
if (self.action, self.status) in ((self.CREATE, self.IN_PROGRESS),
|
||||
(self.UPDATE, self.IN_PROGRESS)):
|
||||
exc = Exception('Resource update already requested')
|
||||
exc = Exception(_('Resource update already requested'))
|
||||
raise exception.ResourceFailure(exc, self, action)
|
||||
|
||||
logger.info('updating %s' % str(self))
|
||||
@ -445,7 +445,8 @@ class Resource(object):
|
||||
while not self.check_update_complete(handle_data):
|
||||
yield
|
||||
except UpdateReplace:
|
||||
logger.debug("Resource %s update requires replacement" % self.name)
|
||||
logger.debug(_("Resource %s update requires replacement") %
|
||||
self.name)
|
||||
raise
|
||||
except Exception as ex:
|
||||
logger.exception('update %s : %s' % (str(self), str(ex)))
|
||||
@ -465,11 +466,11 @@ class Resource(object):
|
||||
|
||||
# Don't try to suspend the resource unless it's in a stable state
|
||||
if (self.action == self.DELETE or self.status != self.COMPLETE):
|
||||
exc = exception.Error('State %s invalid for suspend'
|
||||
exc = exception.Error(_('State %s invalid for suspend')
|
||||
% str(self.state))
|
||||
raise exception.ResourceFailure(exc, self, action)
|
||||
|
||||
logger.info('suspending %s' % str(self))
|
||||
logger.info(_('suspending %s') % str(self))
|
||||
return self._do_action(action)
|
||||
|
||||
def resume(self):
|
||||
@ -481,11 +482,11 @@ class Resource(object):
|
||||
|
||||
# Can't resume a resource unless it's SUSPEND_COMPLETE
|
||||
if self.state != (self.SUSPEND, self.COMPLETE):
|
||||
exc = exception.Error('State %s invalid for resume'
|
||||
exc = exception.Error(_('State %s invalid for resume')
|
||||
% str(self.state))
|
||||
raise exception.ResourceFailure(exc, self, action)
|
||||
|
||||
logger.info('resuming %s' % str(self))
|
||||
logger.info(_('resuming %s') % str(self))
|
||||
return self._do_action(action)
|
||||
|
||||
def physical_resource_name(self):
|
||||
@ -525,7 +526,7 @@ class Resource(object):
|
||||
return name[0:2] + '-' + name[-postfix_length:]
|
||||
|
||||
def validate(self):
|
||||
logger.info('Validating %s' % str(self))
|
||||
logger.info(_('Validating %s') % str(self))
|
||||
|
||||
self.validate_deletion_policy(self.t)
|
||||
return self.properties.validate()
|
||||
@ -534,11 +535,11 @@ class Resource(object):
|
||||
def validate_deletion_policy(cls, template):
|
||||
deletion_policy = template.get('DeletionPolicy', 'Delete')
|
||||
if deletion_policy not in ('Delete', 'Retain', 'Snapshot'):
|
||||
msg = 'Invalid DeletionPolicy %s' % deletion_policy
|
||||
msg = _('Invalid DeletionPolicy %s') % deletion_policy
|
||||
raise exception.StackValidationFailed(message=msg)
|
||||
elif deletion_policy == 'Snapshot':
|
||||
if not callable(getattr(cls, 'handle_snapshot_delete', None)):
|
||||
msg = 'Snapshot DeletionPolicy not supported'
|
||||
msg = _('Snapshot DeletionPolicy not supported')
|
||||
raise exception.StackValidationFailed(message=msg)
|
||||
|
||||
def delete(self):
|
||||
@ -556,7 +557,7 @@ class Resource(object):
|
||||
|
||||
initial_state = self.state
|
||||
|
||||
logger.info('deleting %s' % str(self))
|
||||
logger.info(_('deleting %s') % str(self))
|
||||
|
||||
try:
|
||||
self.state_set(action, self.IN_PROGRESS)
|
||||
@ -578,7 +579,7 @@ class Resource(object):
|
||||
yield
|
||||
|
||||
except Exception as ex:
|
||||
logger.exception('Delete %s', str(self))
|
||||
logger.exception(_('Delete %s'), str(self))
|
||||
failure = exception.ResourceFailure(ex, self, self.action)
|
||||
self.state_set(action, self.FAILED, str(failure))
|
||||
raise failure
|
||||
@ -588,7 +589,8 @@ class Resource(object):
|
||||
self.state_set(action, self.FAILED,
|
||||
'Deletion aborted')
|
||||
except Exception:
|
||||
logger.exception('Error marking resource deletion failed')
|
||||
logger.exception(_('Error marking resource deletion '
|
||||
'failed'))
|
||||
else:
|
||||
self.state_set(action, self.COMPLETE)
|
||||
|
||||
@ -618,7 +620,7 @@ class Resource(object):
|
||||
rs = db_api.resource_get(self.context, self.id)
|
||||
rs.update_and_save({'nova_instance': self.resource_id})
|
||||
except Exception as ex:
|
||||
logger.warn('db error %s' % str(ex))
|
||||
logger.warn(_('db error %s') % str(ex))
|
||||
|
||||
def _store(self):
|
||||
'''Create the resource in the database.'''
|
||||
@ -639,7 +641,7 @@ class Resource(object):
|
||||
self.stack.updated_time = datetime.utcnow()
|
||||
|
||||
except Exception as ex:
|
||||
logger.error('DB error %s' % str(ex))
|
||||
logger.error(_('DB error %s') % str(ex))
|
||||
|
||||
def _add_event(self, action, status, reason):
|
||||
'''Add a state change event to the database.'''
|
||||
@ -650,7 +652,7 @@ class Resource(object):
|
||||
try:
|
||||
ev.store()
|
||||
except Exception as ex:
|
||||
logger.error('DB error %s' % str(ex))
|
||||
logger.error(_('DB error %s') % str(ex))
|
||||
|
||||
def _store_or_update(self, action, status, reason):
|
||||
self.action = action
|
||||
@ -668,7 +670,7 @@ class Resource(object):
|
||||
|
||||
self.stack.updated_time = datetime.utcnow()
|
||||
except Exception as ex:
|
||||
logger.error('DB error %s' % str(ex))
|
||||
logger.error(_('DB error %s') % str(ex))
|
||||
|
||||
# store resource in DB on transition to CREATE_IN_PROGRESS
|
||||
# all other transistions (other than to DELETE_COMPLETE)
|
||||
@ -696,10 +698,10 @@ class Resource(object):
|
||||
|
||||
def state_set(self, action, status, reason="state changed"):
|
||||
if action not in self.ACTIONS:
|
||||
raise ValueError("Invalid action %s" % action)
|
||||
raise ValueError(_("Invalid action %s") % action)
|
||||
|
||||
if status not in self.STATUSES:
|
||||
raise ValueError("Invalid status %s" % status)
|
||||
raise ValueError(_("Invalid status %s") % status)
|
||||
|
||||
old_state = (self.action, self.status)
|
||||
new_state = (action, status)
|
||||
@ -766,17 +768,19 @@ class Resource(object):
|
||||
|
||||
try:
|
||||
if self.action in (self.SUSPEND, self.DELETE):
|
||||
msg = 'Cannot signal resource during %s' % self.action
|
||||
msg = _('Cannot signal resource during %s') % self.action
|
||||
raise Exception(msg)
|
||||
|
||||
if not callable(getattr(self, 'handle_signal', None)):
|
||||
msg = 'Resource %s is not able to receive a signal' % str(self)
|
||||
msg = (_('Resource %s is not able to receive a signal') %
|
||||
str(self))
|
||||
raise Exception(msg)
|
||||
|
||||
self._add_event('signal', self.status, get_string_details())
|
||||
self.handle_signal(details)
|
||||
except Exception as ex:
|
||||
logger.exception('signal %s : %s' % (str(self), str(ex)))
|
||||
logger.exception(_('signal %(name)s : %(msg)s') %
|
||||
{'name': str(self), 'msg': str(ex)})
|
||||
failure = exception.ResourceFailure(ex, self)
|
||||
raise failure
|
||||
|
||||
@ -788,8 +792,8 @@ class Resource(object):
|
||||
No-op for resources which don't explicitly override this method
|
||||
'''
|
||||
if new_metadata:
|
||||
logger.warning("Resource %s does not implement metadata update" %
|
||||
self.name)
|
||||
logger.warning(_("Resource %s does not implement metadata update")
|
||||
% self.name)
|
||||
|
||||
@classmethod
|
||||
def resource_to_template(cls, resource_type):
|
||||
|
@ -84,8 +84,9 @@ class S3Bucket(resource.Resource):
|
||||
"""Create a bucket."""
|
||||
container = self.physical_resource_name()
|
||||
headers = self.tags_to_headers()
|
||||
logger.debug('S3Bucket create container %s with headers %s' %
|
||||
(container, headers))
|
||||
logger.debug(_('S3Bucket create container %(container)s with headers '
|
||||
'%(headers)s') % {
|
||||
'container': container, 'headers': headers})
|
||||
if self.properties['WebsiteConfiguration'] is not None:
|
||||
sc = self.properties['WebsiteConfiguration']
|
||||
# we will assume that swift is configured for the staticweb
|
||||
@ -113,12 +114,12 @@ class S3Bucket(resource.Resource):
|
||||
|
||||
def handle_delete(self):
|
||||
"""Perform specified delete policy."""
|
||||
logger.debug('S3Bucket delete container %s' % self.resource_id)
|
||||
logger.debug(_('S3Bucket delete container %s') % self.resource_id)
|
||||
if self.resource_id is not None:
|
||||
try:
|
||||
self.swift().delete_container(self.resource_id)
|
||||
except clients.swiftclient.ClientException as ex:
|
||||
logger.warn("Delete container failed: %s" % str(ex))
|
||||
logger.warn(_("Delete container failed: %s") % str(ex))
|
||||
|
||||
def FnGetRefId(self):
|
||||
return unicode(self.resource_id)
|
||||
|
@ -493,7 +493,7 @@ class Server(resource.Resource):
|
||||
raise exception.NotFound(_('Failed to find server %s') %
|
||||
self.resource_id)
|
||||
else:
|
||||
logger.debug('suspending server %s' % self.resource_id)
|
||||
logger.debug(_('suspending server %s') % self.resource_id)
|
||||
# We want the server.suspend to happen after the volume
|
||||
# detachement has finished, so pass both tasks and the server
|
||||
suspend_runner = scheduler.TaskRunner(server.suspend)
|
||||
@ -510,8 +510,9 @@ class Server(resource.Resource):
|
||||
return True
|
||||
|
||||
server.get()
|
||||
logger.debug('%s check_suspend_complete status = %s' %
|
||||
(self.name, server.status))
|
||||
logger.debug(_('%(name)s check_suspend_complete status '
|
||||
'= %(status)s') % {
|
||||
'name': self.name, 'status': server.status})
|
||||
if server.status in list(nova_utils.deferred_server_statuses +
|
||||
['ACTIVE']):
|
||||
return server.status == 'SUSPENDED'
|
||||
@ -538,7 +539,7 @@ class Server(resource.Resource):
|
||||
raise exception.NotFound(_('Failed to find server %s') %
|
||||
self.resource_id)
|
||||
else:
|
||||
logger.debug('resuming server %s' % self.resource_id)
|
||||
logger.debug(_('resuming server %s') % self.resource_id)
|
||||
server.resume()
|
||||
return server
|
||||
|
||||
|
@ -60,8 +60,10 @@ class NestedStack(stack_resource.StackResource):
|
||||
try:
|
||||
template_data = urlfetch.get(self.properties[PROP_TEMPLATE_URL])
|
||||
except (exceptions.RequestException, IOError) as r_exc:
|
||||
raise ValueError("Could not fetch remote template '%s': %s" %
|
||||
(self.properties[PROP_TEMPLATE_URL], str(r_exc)))
|
||||
raise ValueError(_("Could not fetch remote template '%(url)s': "
|
||||
"%(exc)s") %
|
||||
{'url': self.properties[PROP_TEMPLATE_URL],
|
||||
'exc': str(r_exc)})
|
||||
|
||||
template = template_format.parse(template_data)
|
||||
|
||||
@ -91,8 +93,10 @@ class NestedStack(stack_resource.StackResource):
|
||||
try:
|
||||
template_data = urlfetch.get(self.properties[PROP_TEMPLATE_URL])
|
||||
except (exceptions.RequestException, IOError) as r_exc:
|
||||
raise ValueError("Could not fetch remote template '%s': %s" %
|
||||
(self.properties[PROP_TEMPLATE_URL], str(r_exc)))
|
||||
raise ValueError(_("Could not fetch remote template '%(url)s': "
|
||||
"%(exc)s") %
|
||||
{'url': self.properties[PROP_TEMPLATE_URL],
|
||||
'exc': str(r_exc)})
|
||||
|
||||
template = template_format.parse(template_data)
|
||||
|
||||
|
@ -90,20 +90,22 @@ class SwiftContainer(resource.Resource):
|
||||
headers['X-Container-Read'] = self.properties['X-Container-Read']
|
||||
if 'X-Container-Write' in self.properties.keys():
|
||||
headers['X-Container-Write'] = self.properties['X-Container-Write']
|
||||
logger.debug('SwiftContainer create container %s with headers %s' %
|
||||
(container, headers))
|
||||
logger.debug(_('SwiftContainer create container %(container)s with '
|
||||
'headers %(headers)s') % {
|
||||
'container': container, 'headers': headers})
|
||||
|
||||
self.swift().put_container(container, headers)
|
||||
self.resource_id_set(container)
|
||||
|
||||
def handle_delete(self):
|
||||
"""Perform specified delete policy."""
|
||||
logger.debug('SwiftContainer delete container %s' % self.resource_id)
|
||||
logger.debug(_('SwiftContainer delete container %s') %
|
||||
self.resource_id)
|
||||
if self.resource_id is not None:
|
||||
try:
|
||||
self.swift().delete_container(self.resource_id)
|
||||
except clients.swiftclient.ClientException as ex:
|
||||
logger.warn("Delete container failed: %s" % str(ex))
|
||||
logger.warn(_("Delete container failed: %s") % str(ex))
|
||||
|
||||
def FnGetRefId(self):
|
||||
return unicode(self.resource_id)
|
||||
@ -123,7 +125,7 @@ class SwiftContainer(resource.Resource):
|
||||
try:
|
||||
headers = self.swift().head_container(self.resource_id)
|
||||
except clients.swiftclient.ClientException as ex:
|
||||
logger.warn("Head container failed: %s" % str(ex))
|
||||
logger.warn(_("Head container failed: %s") % str(ex))
|
||||
return None
|
||||
else:
|
||||
if key == 'ObjectCount':
|
||||
|
@ -115,8 +115,10 @@ class TemplateResource(stack_resource.StackResource):
|
||||
t_data = urlfetch.get(self.template_name,
|
||||
allowed_schemes=self.allowed_schemes)
|
||||
except (exceptions.RequestException, IOError) as r_exc:
|
||||
raise ValueError("Could not fetch remote template '%s': %s" %
|
||||
(self.template_name, str(r_exc)))
|
||||
raise ValueError(_("Could not fetch remote template "
|
||||
"'%(name)s': %(exc)s") % {
|
||||
'name': self.template_name,
|
||||
'exc': str(r_exc)})
|
||||
else:
|
||||
# TODO(Randall) Whoops, misunderstanding on my part; this
|
||||
# doesn't actually persist to the db like I thought.
|
||||
@ -129,30 +131,33 @@ class TemplateResource(stack_resource.StackResource):
|
||||
|
||||
for n, fs in facade_schemata.items():
|
||||
if fs.required and n not in self.properties_schema:
|
||||
msg = ("Required property %s for facade %s "
|
||||
"missing in provider") % (n, self.type())
|
||||
msg = (_("Required property %(n)s for facade %(type)s "
|
||||
"missing in provider") % {'n': n, 'type': self.type()})
|
||||
raise exception.StackValidationFailed(message=msg)
|
||||
|
||||
ps = self.properties_schema.get(n)
|
||||
if (n in self.properties_schema and
|
||||
(fs.type != ps.type)):
|
||||
# Type mismatch
|
||||
msg = ("Property %s type mismatch between facade %s (%s) "
|
||||
"and provider (%s)") % (n, self.type(),
|
||||
fs.type, ps.type)
|
||||
msg = (_("Property %(n)s type mismatch between facade %(type)s"
|
||||
" (%(fs_type)s) and provider (%(ps_type)s)") % {
|
||||
'n': n, 'type': self.type(),
|
||||
'fs_type': fs.type, 'ps_type': ps.type})
|
||||
raise exception.StackValidationFailed(message=msg)
|
||||
|
||||
for n, ps in self.properties_schema.items():
|
||||
if ps.required and n not in facade_schemata:
|
||||
# Required property for template not present in facade
|
||||
msg = ("Provider requires property %s "
|
||||
"unknown in facade %s") % (n, self.type())
|
||||
msg = (_("Provider requires property %(n)s "
|
||||
"unknown in facade %(type)s") % {
|
||||
'n': n, 'type': self.type()})
|
||||
raise exception.StackValidationFailed(message=msg)
|
||||
|
||||
for attr in facade_cls.attributes_schema:
|
||||
if attr not in self.attributes_schema:
|
||||
msg = ("Attribute %s for facade %s "
|
||||
"missing in provider") % (attr, self.type())
|
||||
msg = (_("Attribute %(attr)s for facade %(type)s "
|
||||
"missing in provider") % {
|
||||
'attr': attr, 'type': self.type()})
|
||||
raise exception.StackValidationFailed(message=msg)
|
||||
|
||||
def validate(self):
|
||||
|
@ -55,19 +55,20 @@ class User(resource.Resource):
|
||||
# ignore the policy (don't reject it because we previously ignored
|
||||
# and we don't want to break templates which previously worked
|
||||
if not isinstance(policy, basestring):
|
||||
logger.warning("Ignoring policy %s, " % policy
|
||||
+ "must be string resource name")
|
||||
logger.warning(_("Ignoring policy %s, must be string "
|
||||
"resource name") % policy)
|
||||
continue
|
||||
|
||||
try:
|
||||
policy_rsrc = self.stack[policy]
|
||||
except KeyError:
|
||||
logger.error("Policy %s does not exist in stack %s" %
|
||||
(policy, self.stack.name))
|
||||
logger.error(_("Policy %(policy)s does not exist in stack "
|
||||
"%(stack)s") % {
|
||||
'policy': policy, 'stack': self.stack.name})
|
||||
return False
|
||||
|
||||
if not callable(getattr(policy_rsrc, 'access_allowed', None)):
|
||||
logger.error("Policy %s is not an AccessPolicy resource" %
|
||||
logger.error(_("Policy %s is not an AccessPolicy resource") %
|
||||
policy)
|
||||
return False
|
||||
|
||||
@ -90,7 +91,8 @@ class User(resource.Resource):
|
||||
|
||||
def handle_delete(self):
|
||||
if self.resource_id is None:
|
||||
logger.error("Cannot delete User resource before user created!")
|
||||
logger.error(_("Cannot delete User resource before user "
|
||||
"created!"))
|
||||
return
|
||||
try:
|
||||
self.keystone().delete_stack_user(self.resource_id)
|
||||
@ -99,13 +101,15 @@ class User(resource.Resource):
|
||||
|
||||
def handle_suspend(self):
|
||||
if self.resource_id is None:
|
||||
logger.error("Cannot suspend User resource before user created!")
|
||||
logger.error(_("Cannot suspend User resource before user "
|
||||
"created!"))
|
||||
return
|
||||
self.keystone().disable_stack_user(self.resource_id)
|
||||
|
||||
def handle_resume(self):
|
||||
if self.resource_id is None:
|
||||
logger.error("Cannot resume User resource before user created!")
|
||||
logger.error(_("Cannot resume User resource before user "
|
||||
"created!"))
|
||||
return
|
||||
self.keystone().enable_stack_user(self.resource_id)
|
||||
|
||||
@ -121,8 +125,8 @@ class User(resource.Resource):
|
||||
policies = (self.properties['Policies'] or [])
|
||||
for policy in policies:
|
||||
if not isinstance(policy, basestring):
|
||||
logger.warning("Ignoring policy %s, " % policy
|
||||
+ "must be string resource name")
|
||||
logger.warning(_("Ignoring policy %s, must be string "
|
||||
"resource name") % policy)
|
||||
continue
|
||||
policy_rsrc = self.stack[policy]
|
||||
if not policy_rsrc.access_allowed(resource_name):
|
||||
@ -168,12 +172,12 @@ class AccessKey(resource.Resource):
|
||||
def handle_create(self):
|
||||
user = self._get_user()
|
||||
if user is None:
|
||||
raise exception.NotFound('could not find user %s' %
|
||||
raise exception.NotFound(_('could not find user %s') %
|
||||
self.properties['UserName'])
|
||||
|
||||
kp = self.keystone().get_ec2_keypair(user.resource_id)
|
||||
if not kp:
|
||||
raise exception.Error("Error creating ec2 keypair for user %s" %
|
||||
raise exception.Error(_("Error creating ec2 keypair for user %s") %
|
||||
user)
|
||||
|
||||
self.resource_id_set(kp.access)
|
||||
@ -186,7 +190,7 @@ class AccessKey(resource.Resource):
|
||||
|
||||
user = self._get_user()
|
||||
if user is None:
|
||||
logger.warning('Error deleting %s - user not found' % str(self))
|
||||
logger.warning(_('Error deleting %s - user not found') % str(self))
|
||||
return
|
||||
user_id = user.resource_id
|
||||
if user_id:
|
||||
@ -203,23 +207,26 @@ class AccessKey(resource.Resource):
|
||||
'''
|
||||
if self._secret is None:
|
||||
if not self.resource_id:
|
||||
logger.warn('could not get secret for %s Error:%s' %
|
||||
(self.properties['UserName'],
|
||||
"resource_id not yet set"))
|
||||
logger.warn(_('could not get secret for %(username)s '
|
||||
'Error:%(msg)s') % {
|
||||
'username': self.properties['UserName'],
|
||||
'msg': "resource_id not yet set"})
|
||||
else:
|
||||
try:
|
||||
user_id = self._get_user().resource_id
|
||||
kp = self.keystone().get_ec2_keypair(user_id)
|
||||
except Exception as ex:
|
||||
logger.warn('could not get secret for %s Error:%s' %
|
||||
(self.properties['UserName'],
|
||||
str(ex)))
|
||||
logger.warn(_('could not get secret for %(username)s '
|
||||
'Error:%(msg)s') % {
|
||||
'username': self.properties['UserName'],
|
||||
'msg': str(ex)})
|
||||
else:
|
||||
if kp.access == self.resource_id:
|
||||
self._secret = kp.secret
|
||||
else:
|
||||
msg = ("Unexpected ec2 keypair, for %s access %s" %
|
||||
(user_id, kp.access))
|
||||
msg = (_("Unexpected ec2 keypair, for %(id)s access "
|
||||
"%(access)s") % {
|
||||
'id': user_id, 'access': kp.access})
|
||||
logger.error(msg)
|
||||
|
||||
return self._secret or '000-000-000'
|
||||
@ -258,7 +265,7 @@ class AccessPolicy(resource.Resource):
|
||||
# All of the provided resource names must exist in this stack
|
||||
for resource in resources:
|
||||
if resource not in self.stack:
|
||||
logger.error("AccessPolicy resource %s not in stack" %
|
||||
logger.error(_("AccessPolicy resource %s not in stack") %
|
||||
resource)
|
||||
raise exception.ResourceNotFound(resource_name=resource,
|
||||
stack_name=self.stack.name)
|
||||
|
@ -128,8 +128,8 @@ class Volume(resource.Resource):
|
||||
vol.get()
|
||||
|
||||
if vol.status == 'in-use':
|
||||
logger.warn('cant delete volume when in-use')
|
||||
raise exception.Error('Volume in use')
|
||||
logger.warn(_('cant delete volume when in-use'))
|
||||
raise exception.Error(_('Volume in use'))
|
||||
|
||||
vol.delete()
|
||||
while True:
|
||||
@ -196,14 +196,15 @@ class VolumeAttachTask(object):
|
||||
|
||||
vol = self.clients.cinder().volumes.get(self.volume_id)
|
||||
while vol.status == 'available' or vol.status == 'attaching':
|
||||
logger.debug('%s - volume status: %s' % (str(self), vol.status))
|
||||
logger.debug(_('%(name)s - volume status: %(status)s') % {
|
||||
'name': str(self), 'status': vol.status})
|
||||
yield
|
||||
vol.get()
|
||||
|
||||
if vol.status != 'in-use':
|
||||
raise exception.Error(vol.status)
|
||||
|
||||
logger.info('%s - complete' % str(self))
|
||||
logger.info(_('%s - complete') % str(self))
|
||||
|
||||
|
||||
class VolumeDetachTask(object):
|
||||
@ -236,7 +237,7 @@ class VolumeDetachTask(object):
|
||||
try:
|
||||
vol = self.clients.cinder().volumes.get(self.volume_id)
|
||||
except clients.cinderclient.exceptions.NotFound:
|
||||
logger.warning('%s - volume not found' % str(self))
|
||||
logger.warning(_('%s - volume not found') % str(self))
|
||||
return
|
||||
|
||||
server_api = self.clients.nova().volumes
|
||||
@ -252,7 +253,7 @@ class VolumeDetachTask(object):
|
||||
try:
|
||||
vol.get()
|
||||
while vol.status in ('in-use', 'detaching'):
|
||||
logger.debug('%s - volume still in use' % str(self))
|
||||
logger.debug(_('%s - volume still in use') % str(self))
|
||||
yield
|
||||
|
||||
try:
|
||||
@ -263,12 +264,13 @@ class VolumeDetachTask(object):
|
||||
pass
|
||||
vol.get()
|
||||
|
||||
logger.info('%s - status: %s' % (str(self), vol.status))
|
||||
logger.info(_('%(name)s - status: %(status)s') % {
|
||||
'name': str(self), 'status': vol.status})
|
||||
if vol.status != 'available':
|
||||
raise exception.Error(vol.status)
|
||||
|
||||
except clients.cinderclient.exceptions.NotFound:
|
||||
logger.warning('%s - volume not found' % str(self))
|
||||
logger.warning(_('%s - volume not found') % str(self))
|
||||
|
||||
|
||||
class VolumeAttachment(resource.Resource):
|
||||
|
@ -71,8 +71,8 @@ class WaitConditionHandle(signal_responder.SignalResponder):
|
||||
if self._metadata_format_ok(new_metadata):
|
||||
rsrc_metadata = self.metadata
|
||||
if new_metadata['UniqueId'] in rsrc_metadata:
|
||||
logger.warning("Overwriting Metadata item for UniqueId %s!" %
|
||||
new_metadata['UniqueId'])
|
||||
logger.warning(_("Overwriting Metadata item for UniqueId %s!")
|
||||
% new_metadata['UniqueId'])
|
||||
safe_metadata = {}
|
||||
for k in ('Data', 'Reason', 'Status'):
|
||||
safe_metadata[k] = new_metadata[k]
|
||||
@ -81,8 +81,8 @@ class WaitConditionHandle(signal_responder.SignalResponder):
|
||||
rsrc_metadata.update({new_metadata['UniqueId']: safe_metadata})
|
||||
self.metadata = rsrc_metadata
|
||||
else:
|
||||
logger.error("Metadata failed validation for %s" % self.name)
|
||||
raise ValueError("Metadata format invalid")
|
||||
logger.error(_("Metadata failed validation for %s") % self.name)
|
||||
raise ValueError(_("Metadata format invalid"))
|
||||
|
||||
def get_status(self):
|
||||
'''
|
||||
@ -120,7 +120,8 @@ class WaitConditionFailure(Exception):
|
||||
class WaitConditionTimeout(Exception):
|
||||
def __init__(self, wait_condition, handle):
|
||||
reasons = handle.get_status_reason(STATUS_SUCCESS)
|
||||
message = '%d of %d received' % (len(reasons), wait_condition.count)
|
||||
message = (_('%(len)d of %(count)d received') % {
|
||||
'len': len(reasons), 'count': wait_condition.count})
|
||||
if reasons:
|
||||
message += ' - %s' % reasons
|
||||
|
||||
@ -158,20 +159,20 @@ class WaitCondition(resource.Resource):
|
||||
handle_url = self.properties['Handle']
|
||||
handle_id = identifier.ResourceIdentifier.from_arn_url(handle_url)
|
||||
if handle_id.tenant != self.stack.context.tenant_id:
|
||||
raise ValueError("WaitCondition invalid Handle tenant %s" %
|
||||
raise ValueError(_("WaitCondition invalid Handle tenant %s") %
|
||||
handle_id.tenant)
|
||||
if handle_id.stack_name != self.stack.name:
|
||||
raise ValueError("WaitCondition invalid Handle stack %s" %
|
||||
raise ValueError(_("WaitCondition invalid Handle stack %s") %
|
||||
handle_id.stack_name)
|
||||
if handle_id.stack_id != self.stack.id:
|
||||
raise ValueError("WaitCondition invalid Handle stack %s" %
|
||||
raise ValueError(_("WaitCondition invalid Handle stack %s") %
|
||||
handle_id.stack_id)
|
||||
if handle_id.resource_name not in self.stack:
|
||||
raise ValueError("WaitCondition invalid Handle %s" %
|
||||
raise ValueError(_("WaitCondition invalid Handle %s") %
|
||||
handle_id.resource_name)
|
||||
if not isinstance(self.stack[handle_id.resource_name],
|
||||
WaitConditionHandle):
|
||||
raise ValueError("WaitCondition invalid Handle %s" %
|
||||
raise ValueError(_("WaitCondition invalid Handle %s") %
|
||||
handle_id.resource_name)
|
||||
|
||||
def _get_handle_resource_name(self):
|
||||
@ -185,18 +186,20 @@ class WaitCondition(resource.Resource):
|
||||
yield
|
||||
except scheduler.Timeout:
|
||||
timeout = WaitConditionTimeout(self, handle)
|
||||
logger.info('%s Timed out (%s)' % (str(self), str(timeout)))
|
||||
logger.info(_('%(name)s Timed out (%(timeout)s)') % {
|
||||
'name': str(self), 'timeout': str(timeout)})
|
||||
raise timeout
|
||||
|
||||
handle_status = handle.get_status()
|
||||
|
||||
if any(s != STATUS_SUCCESS for s in handle_status):
|
||||
failure = WaitConditionFailure(self, handle)
|
||||
logger.info('%s Failed (%s)' % (str(self), str(failure)))
|
||||
logger.info(_('%(name)s Failed (%(failure)s)') % {
|
||||
'name': str(self), 'failure': str(failure)})
|
||||
raise failure
|
||||
|
||||
if len(handle_status) >= self.count:
|
||||
logger.info("%s Succeeded" % str(self))
|
||||
logger.info(_("%s Succeeded") % str(self))
|
||||
return
|
||||
|
||||
def handle_create(self):
|
||||
|
@ -105,7 +105,7 @@ class TaskRunner(object):
|
||||
def _sleep(self, wait_time):
|
||||
"""Sleep for the specified number of seconds."""
|
||||
if ENABLE_SLEEP and wait_time is not None:
|
||||
logger.debug('%s sleeping' % str(self))
|
||||
logger.debug(_('%s sleeping') % str(self))
|
||||
eventlet.sleep(wait_time)
|
||||
|
||||
def __call__(self, wait_time=1, timeout=None):
|
||||
@ -128,7 +128,7 @@ class TaskRunner(object):
|
||||
"""
|
||||
assert self._runner is None, "Task already started"
|
||||
|
||||
logger.debug('%s starting' % str(self))
|
||||
logger.debug(_('%s starting') % str(self))
|
||||
|
||||
if timeout is not None:
|
||||
self._timeout = Timeout(self, timeout)
|
||||
@ -140,7 +140,7 @@ class TaskRunner(object):
|
||||
else:
|
||||
self._runner = False
|
||||
self._done = True
|
||||
logger.debug('%s done (not resumable)' % str(self))
|
||||
logger.debug(_('%s done (not resumable)') % str(self))
|
||||
|
||||
def step(self):
|
||||
"""
|
||||
@ -151,7 +151,7 @@ class TaskRunner(object):
|
||||
assert self._runner is not None, "Task not started"
|
||||
|
||||
if self._timeout is not None and self._timeout.expired():
|
||||
logger.info('%s timed out' % str(self))
|
||||
logger.info(_('%s timed out') % str(self))
|
||||
|
||||
try:
|
||||
self._runner.throw(self._timeout)
|
||||
@ -161,13 +161,13 @@ class TaskRunner(object):
|
||||
# Clean up in case task swallows exception without exiting
|
||||
self.cancel()
|
||||
else:
|
||||
logger.debug('%s running' % str(self))
|
||||
logger.debug(_('%s running') % str(self))
|
||||
|
||||
try:
|
||||
next(self._runner)
|
||||
except StopIteration:
|
||||
self._done = True
|
||||
logger.debug('%s complete' % str(self))
|
||||
logger.debug(_('%s complete') % str(self))
|
||||
|
||||
return self._done
|
||||
|
||||
@ -184,7 +184,7 @@ class TaskRunner(object):
|
||||
def cancel(self):
|
||||
"""Cancel the task if it is running."""
|
||||
if self.started() and not self.done():
|
||||
logger.debug('%s cancelled' % str(self))
|
||||
logger.debug(_('%s cancelled') % str(self))
|
||||
self._runner.close()
|
||||
self._done = True
|
||||
|
||||
|
@ -246,7 +246,7 @@ class EngineService(service.Service):
|
||||
:param files: Files referenced from the template
|
||||
:param args: Request parameters/args passed from API
|
||||
"""
|
||||
logger.info('template is %s' % template)
|
||||
logger.info(_('template is %s') % template)
|
||||
|
||||
def _stack_create(stack):
|
||||
# Create the stack, and create the periodic task if successful
|
||||
@ -255,7 +255,8 @@ class EngineService(service.Service):
|
||||
# Schedule a periodic watcher task for this stack
|
||||
self._start_watch_task(stack.id, cnxt)
|
||||
else:
|
||||
logger.warning("Stack create failed, status %s" % stack.status)
|
||||
logger.warning(_("Stack create failed, status %s") %
|
||||
stack.status)
|
||||
|
||||
if db_api.stack_get_by_name(cnxt, stack_name):
|
||||
raise exception.StackExists(stack_name=stack_name)
|
||||
@ -301,7 +302,7 @@ class EngineService(service.Service):
|
||||
arg4 -> Stack Input Params
|
||||
arg4 -> Request parameters/args passed from API
|
||||
"""
|
||||
logger.info('template is %s' % template)
|
||||
logger.info(_('template is %s') % template)
|
||||
|
||||
# Get the database representation of the existing stack
|
||||
db_stack = self._get_stack(cnxt, stack_identity)
|
||||
@ -345,7 +346,7 @@ class EngineService(service.Service):
|
||||
arg3 -> Template of stack you want to create.
|
||||
arg4 -> Stack Input Params
|
||||
"""
|
||||
logger.info('validate_template')
|
||||
logger.info(_('validate_template'))
|
||||
if template is None:
|
||||
msg = _("No Template provided.")
|
||||
return webob.exc.HTTPBadRequest(explanation=msg)
|
||||
@ -425,7 +426,7 @@ class EngineService(service.Service):
|
||||
"""
|
||||
st = self._get_stack(cnxt, stack_identity)
|
||||
|
||||
logger.info('deleting stack %s' % st.name)
|
||||
logger.info(_('deleting stack %s') % st.name)
|
||||
|
||||
stack = parser.Stack.load(cnxt, stack=st)
|
||||
|
||||
@ -530,7 +531,7 @@ class EngineService(service.Service):
|
||||
try:
|
||||
akey_rsrc = self.find_physical_resource(cnxt, access_key)
|
||||
except exception.PhysicalResourceNotFound:
|
||||
logger.warning("access_key % not found!" % access_key)
|
||||
logger.warning(_("access_key % not found!") % access_key)
|
||||
return False
|
||||
|
||||
akey_rsrc_id = identifier.ResourceIdentifier(**akey_rsrc)
|
||||
@ -540,9 +541,9 @@ class EngineService(service.Service):
|
||||
ak_akey_rsrc = stack[akey_rsrc_id.resource_name]
|
||||
return ak_akey_rsrc.access_allowed(resource_name)
|
||||
else:
|
||||
logger.warning("Cannot access resource from wrong stack!")
|
||||
logger.warning(_("Cannot access resource from wrong stack!"))
|
||||
else:
|
||||
logger.warning("Cannot access resource, invalid credentials!")
|
||||
logger.warning(_("Cannot access resource, invalid credentials!"))
|
||||
|
||||
return False
|
||||
|
||||
@ -553,7 +554,8 @@ class EngineService(service.Service):
|
||||
|
||||
if cfg.CONF.heat_stack_user_role in cnxt.roles:
|
||||
if not self._authorize_stack_user(cnxt, stack, resource_name):
|
||||
logger.warning("Access denied to resource %s" % resource_name)
|
||||
logger.warning(_("Access denied to resource %s")
|
||||
% resource_name)
|
||||
raise exception.Forbidden()
|
||||
|
||||
if resource_name not in stack:
|
||||
@ -632,7 +634,7 @@ class EngineService(service.Service):
|
||||
Handle request to perform suspend action on a stack
|
||||
'''
|
||||
def _stack_suspend(stack):
|
||||
logger.debug("suspending stack %s" % stack.name)
|
||||
logger.debug(_("suspending stack %s") % stack.name)
|
||||
stack.suspend()
|
||||
|
||||
s = self._get_stack(cnxt, stack_identity)
|
||||
@ -646,7 +648,7 @@ class EngineService(service.Service):
|
||||
Handle request to perform a resume action on a stack
|
||||
'''
|
||||
def _stack_resume(stack):
|
||||
logger.debug("resuming stack %s" % stack.name)
|
||||
logger.debug(_("resuming stack %s") % stack.name)
|
||||
stack.resume()
|
||||
|
||||
s = self._get_stack(cnxt, stack_identity)
|
||||
@ -700,11 +702,11 @@ class EngineService(service.Service):
|
||||
# Retrieve the stored credentials & create context
|
||||
# Require tenant_safe=False to the stack_get to defeat tenant
|
||||
# scoping otherwise we fail to retrieve the stack
|
||||
logger.debug("Periodic watcher task for stack %s" % sid)
|
||||
logger.debug(_("Periodic watcher task for stack %s") % sid)
|
||||
admin_context = context.get_admin_context()
|
||||
stack = db_api.stack_get(admin_context, sid, tenant_safe=False)
|
||||
if not stack:
|
||||
logger.error("Unable to retrieve stack %s for periodic task" %
|
||||
logger.error(_("Unable to retrieve stack %s for periodic task") %
|
||||
sid)
|
||||
return
|
||||
stack_context = self._load_user_creds(stack.user_creds_id)
|
||||
@ -718,8 +720,8 @@ class EngineService(service.Service):
|
||||
try:
|
||||
wrs = db_api.watch_rule_get_all_by_stack(stack_context, sid)
|
||||
except Exception as ex:
|
||||
logger.warn('periodic_task db error (%s) %s' %
|
||||
('watch rule removed?', str(ex)))
|
||||
logger.warn(_('periodic_task db error (%(msg)s) %(ex)s') % {
|
||||
'msg': 'watch rule removed?', 'ex': str(ex)})
|
||||
return
|
||||
|
||||
def run_alarm_action(actions, details):
|
||||
@ -784,7 +786,7 @@ class EngineService(service.Service):
|
||||
try:
|
||||
wrn = [w.name for w in db_api.watch_rule_get_all(cnxt)]
|
||||
except Exception as ex:
|
||||
logger.warn('show_watch (all) db error %s' % str(ex))
|
||||
logger.warn(_('show_watch (all) db error %s') % str(ex))
|
||||
return
|
||||
|
||||
wrs = [watchrule.WatchRule.load(cnxt, w) for w in wrn]
|
||||
@ -804,13 +806,13 @@ class EngineService(service.Service):
|
||||
# namespace/metric, but we will want this at some point
|
||||
# for now, the API can query all metric data and filter locally
|
||||
if metric_namespace is not None or metric_name is not None:
|
||||
logger.error("Filtering by namespace/metric not yet supported")
|
||||
logger.error(_("Filtering by namespace/metric not yet supported"))
|
||||
return
|
||||
|
||||
try:
|
||||
wds = db_api.watch_data_get_all(cnxt)
|
||||
except Exception as ex:
|
||||
logger.warn('show_metric (all) db error %s' % str(ex))
|
||||
logger.warn(_('show_metric (all) db error %s') % str(ex))
|
||||
return
|
||||
|
||||
result = [api.format_watch_data(w) for w in wds]
|
||||
|
@ -26,6 +26,7 @@ from heat.engine import clients
|
||||
from heat.engine import resource
|
||||
|
||||
from heat.openstack.common import log
|
||||
from heat.openstack.common.gettextutils import _
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
SIGNAL_TYPES = (
|
||||
@ -49,7 +50,7 @@ class SignalResponder(resource.Resource):
|
||||
self.physical_resource_name())
|
||||
kp = self.keystone().get_ec2_keypair(user_id)
|
||||
if not kp:
|
||||
raise exception.Error("Error creating ec2 keypair for user %s" %
|
||||
raise exception.Error(_("Error creating ec2 keypair for user %s") %
|
||||
user_id)
|
||||
else:
|
||||
self.resource_id_set(user_id)
|
||||
|
@ -24,6 +24,7 @@ from heat.engine import scheduler
|
||||
from heat.engine import template as tmpl
|
||||
|
||||
from heat.openstack.common import log as logging
|
||||
from heat.openstack.common.gettextutils import _
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -67,7 +68,7 @@ class StackResource(resource.Resource):
|
||||
parent_resource=self)
|
||||
|
||||
if self._nested is None:
|
||||
raise exception.NotFound('Nested stack not found in DB')
|
||||
raise exception.NotFound(_('Nested stack not found in DB'))
|
||||
|
||||
return self._nested
|
||||
|
||||
@ -170,7 +171,7 @@ class StackResource(resource.Resource):
|
||||
nested_stack = self.nested()
|
||||
if nested_stack.state != (nested_stack.UPDATE,
|
||||
nested_stack.COMPLETE):
|
||||
raise exception.Error("Nested stack update failed: %s" %
|
||||
raise exception.Error(_("Nested stack update failed: %s") %
|
||||
nested_stack.status_reason)
|
||||
return True
|
||||
|
||||
@ -181,7 +182,7 @@ class StackResource(resource.Resource):
|
||||
try:
|
||||
stack = self.nested()
|
||||
except exception.NotFound:
|
||||
logger.info("Stack not found to delete")
|
||||
logger.info(_("Stack not found to delete"))
|
||||
else:
|
||||
if stack is not None:
|
||||
delete_task = scheduler.TaskRunner(stack.delete)
|
||||
|
@ -20,6 +20,7 @@ from heat.engine import resource
|
||||
from heat.engine import scheduler
|
||||
|
||||
from heat.openstack.common import log as logging
|
||||
from heat.openstack.common.gettextutils import _
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -77,7 +78,7 @@ class StackUpdate(object):
|
||||
def _remove_backup_resource(self, prev_res):
|
||||
if prev_res.state not in ((prev_res.INIT, prev_res.COMPLETE),
|
||||
(prev_res.DELETE, prev_res.COMPLETE)):
|
||||
logger.debug("Deleting backup resource %s" % prev_res.name)
|
||||
logger.debug(_("Deleting backup resource %s") % prev_res.name)
|
||||
yield prev_res.destroy()
|
||||
|
||||
@staticmethod
|
||||
@ -101,17 +102,18 @@ class StackUpdate(object):
|
||||
# Swap in the backup resource if it is in a valid state,
|
||||
# instead of creating a new resource
|
||||
if prev_res.status == prev_res.COMPLETE:
|
||||
logger.debug("Swapping in backup Resource %s" % res_name)
|
||||
logger.debug(_("Swapping in backup Resource %s") %
|
||||
res_name)
|
||||
self._exchange_stacks(self.existing_stack[res_name],
|
||||
prev_res)
|
||||
return
|
||||
|
||||
logger.debug("Deleting backup Resource %s" % res_name)
|
||||
logger.debug(_("Deleting backup Resource %s") % res_name)
|
||||
yield prev_res.destroy()
|
||||
|
||||
# Back up existing resource
|
||||
if res_name in self.existing_stack:
|
||||
logger.debug("Backing up existing Resource %s" % res_name)
|
||||
logger.debug(_("Backing up existing Resource %s") % res_name)
|
||||
existing_res = self.existing_stack[res_name]
|
||||
self.previous_stack[res_name] = existing_res
|
||||
existing_res.state_set(existing_res.UPDATE, existing_res.COMPLETE)
|
||||
@ -131,8 +133,10 @@ class StackUpdate(object):
|
||||
except resource.UpdateReplace:
|
||||
pass
|
||||
else:
|
||||
logger.info("Resource %s for stack %s updated" %
|
||||
(res_name, self.existing_stack.name))
|
||||
logger.info(_("Resource %(res_name)s for stack %(stack_name)s"
|
||||
" updated") % {
|
||||
'res_name': res_name,
|
||||
'stack_name': self.existing_stack.name})
|
||||
return
|
||||
|
||||
yield self._create_resource(new_res)
|
||||
|
@ -17,6 +17,7 @@
|
||||
import datetime
|
||||
from heat.common import exception
|
||||
from heat.openstack.common import log as logging
|
||||
from heat.openstack.common.gettextutils import _
|
||||
from heat.openstack.common import timeutils
|
||||
from heat.engine import timestamp
|
||||
from heat.db import api as db_api
|
||||
@ -75,8 +76,9 @@ class WatchRule(object):
|
||||
try:
|
||||
watch = db_api.watch_rule_get_by_name(context, watch_name)
|
||||
except Exception as ex:
|
||||
logger.warn('WatchRule.load (%s) db error %s' %
|
||||
(watch_name, str(ex)))
|
||||
logger.warn(_('WatchRule.load (%(watch_name)s) db error '
|
||||
'%(ex)s') % {
|
||||
'watch_name': watch_name, 'ex': str(ex)})
|
||||
if watch is None:
|
||||
raise exception.WatchRuleNotFound(watch_name=watch_name)
|
||||
else:
|
||||
@ -209,7 +211,7 @@ class WatchRule(object):
|
||||
data = 0
|
||||
for d in self.watch_data:
|
||||
if d.created_at < self.now - self.timeperiod:
|
||||
logger.debug('ignoring %s' % str(d.data))
|
||||
logger.debug(_('ignoring %s') % str(d.data))
|
||||
continue
|
||||
data = data + float(d.data[self.rule['MetricName']]['Value'])
|
||||
|
||||
@ -250,7 +252,7 @@ class WatchRule(object):
|
||||
self.stack_id, self.name, new_state)
|
||||
actions = []
|
||||
if self.ACTION_MAP[new_state] not in self.rule:
|
||||
logger.info('no action for new state %s',
|
||||
logger.info(_('no action for new state %s'),
|
||||
new_state)
|
||||
else:
|
||||
s = db_api.stack_get(self.context, self.stack_id)
|
||||
@ -260,8 +262,8 @@ class WatchRule(object):
|
||||
for refid in self.rule[self.ACTION_MAP[new_state]]:
|
||||
actions.append(stack.resource_by_refid(refid).signal)
|
||||
else:
|
||||
logger.warning("Could not process watch state %s for stack" %
|
||||
new_state)
|
||||
logger.warning(_("Could not process watch state %s for stack")
|
||||
% new_state)
|
||||
return actions
|
||||
|
||||
def _to_ceilometer(self, data):
|
||||
@ -281,7 +283,8 @@ class WatchRule(object):
|
||||
dims = dims[0]
|
||||
sample['resource_metadata'] = dims
|
||||
sample['resource_id'] = dims.get('InstanceId')
|
||||
logger.debug('new sample:%s data:%s' % (k, sample))
|
||||
logger.debug(_('new sample:%(k)s data:%(sample)s') % {
|
||||
'k': k, 'sample': sample})
|
||||
clients.ceilometer().samples.create(**sample)
|
||||
|
||||
def create_watch_data(self, data):
|
||||
@ -293,7 +296,7 @@ class WatchRule(object):
|
||||
return
|
||||
|
||||
if self.state == self.SUSPENDED:
|
||||
logger.debug('Ignoring metric data for %s, SUSPENDED state'
|
||||
logger.debug(_('Ignoring metric data for %s, SUSPENDED state')
|
||||
% self.name)
|
||||
return []
|
||||
|
||||
@ -303,8 +306,9 @@ class WatchRule(object):
|
||||
# options, e.g --haproxy try to push multiple metrics when we
|
||||
# actually only care about one (the one we're alarming on)
|
||||
# so just ignore any data which doesn't contain MetricName
|
||||
logger.debug('Ignoring metric data (only accept %s) : %s' %
|
||||
(self.rule['MetricName'], data))
|
||||
logger.debug(_('Ignoring metric data (only accept %(metric)s) '
|
||||
': %(data)s') % {
|
||||
'metric': self.rule['MetricName'], 'data': data})
|
||||
return
|
||||
|
||||
watch_data = {
|
||||
@ -312,7 +316,8 @@ class WatchRule(object):
|
||||
'watch_rule_id': self.id
|
||||
}
|
||||
wd = db_api.watch_data_create(None, watch_data)
|
||||
logger.debug('new watch:%s data:%s' % (self.name, str(wd.data)))
|
||||
logger.debug(_('new watch:%(name)s data:%(data)s')
|
||||
% {'name': self.name, 'data': str(wd.data)})
|
||||
|
||||
def state_set(self, state):
|
||||
'''
|
||||
@ -337,11 +342,14 @@ class WatchRule(object):
|
||||
if state != self.state:
|
||||
actions = self.rule_actions(state)
|
||||
if actions:
|
||||
logger.debug("Overriding state %s for watch %s with %s" %
|
||||
(self.state, self.name, state))
|
||||
logger.debug(_("Overriding state %(self_state)s for watch "
|
||||
"%(name)s with %(state)s") % {
|
||||
'self_state': self.state, 'name': self.name,
|
||||
'state': state})
|
||||
else:
|
||||
logger.warning("Unable to override state %s for watch %s" %
|
||||
(self.state, self.name))
|
||||
logger.warning(_("Unable to override state %(state)s for "
|
||||
"watch %(name)s") % {
|
||||
'state': self.state, 'name': self.name})
|
||||
return actions
|
||||
|
||||
|
||||
|
@ -16,6 +16,7 @@ from heat.engine import resource
|
||||
from heat.engine import signal_responder
|
||||
|
||||
from heat.openstack.common import log as logging
|
||||
from heat.openstack.common.gettextutils import _
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -29,22 +30,27 @@ class GenericResource(resource.Resource):
|
||||
'Foo': 'Another generic attribute'}
|
||||
|
||||
def handle_create(self):
|
||||
logger.warning('Creating generic resource (Type "%s")' % self.type())
|
||||
logger.warning(_('Creating generic resource (Type "%s")') %
|
||||
self.type())
|
||||
|
||||
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
|
||||
logger.warning('Updating generic resource (Type "%s")' % self.type())
|
||||
logger.warning(_('Updating generic resource (Type "%s")') %
|
||||
self.type())
|
||||
|
||||
def handle_delete(self):
|
||||
logger.warning('Deleting generic resource (Type "%s")' % self.type())
|
||||
logger.warning(_('Deleting generic resource (Type "%s")') %
|
||||
self.type())
|
||||
|
||||
def _resolve_attribute(self, name):
|
||||
return self.name
|
||||
|
||||
def handle_suspend(self):
|
||||
logger.warning('Suspending generic resource (Type "%s")' % self.type())
|
||||
logger.warning(_('Suspending generic resource (Type "%s")') %
|
||||
self.type())
|
||||
|
||||
def handle_resume(self):
|
||||
logger.warning('Resuming generic resource (Type "%s")' % self.type())
|
||||
logger.warning(_('Resuming generic resource (Type "%s")') %
|
||||
self.type())
|
||||
|
||||
|
||||
class ResourceWithProps(GenericResource):
|
||||
@ -61,8 +67,8 @@ class SignalResource(signal_responder.SignalResponder):
|
||||
attributes_schema = {'AlarmUrl': 'Get a signed webhook'}
|
||||
|
||||
def handle_signal(self, details=None):
|
||||
logger.warning('Signaled resource (Type "%s") %s' % (self.type(),
|
||||
details))
|
||||
logger.warning(_('Signaled resource (Type "%(type)s") %(details)s')
|
||||
% {'type': self.type(), 'details': details})
|
||||
|
||||
def _resolve_attribute(self, name):
|
||||
if name == 'AlarmUrl' and self.resource_id is not None:
|
||||
|
Loading…
Reference in New Issue
Block a user