Remove context object in oslo.log method

Removed context object while logging as Nova uses oslo.context's
RequestContext which means the context object is in scope when doing
logging. Added hack to notify, in case if someone uses it in logging
statements in future.

Change-Id: I5aaa869f2e6155964827e659d18e2bcaad9d866b
Closes-Bug:#1500896
This commit is contained in:
Sivasathurappan Radhakrishnan 2016-05-16 22:09:44 +00:00
parent 69784047ea
commit b3d58ed718
8 changed files with 111 additions and 79 deletions

View File

@ -63,6 +63,7 @@ Nova Specific Commandments
- [N350] Policy registration should be in the central location ``nova/policies/`` - [N350] Policy registration should be in the central location ``nova/policies/``
- [N351] Do not use the oslo_policy.policy.Enforcer.enforce() method. - [N351] Do not use the oslo_policy.policy.Enforcer.enforce() method.
- [N352] LOG.warn is deprecated. Enforce use of LOG.warning. - [N352] LOG.warn is deprecated. Enforce use of LOG.warning.
- [N353] Validate that context objects is not passed in logging calls.
Creating Unit Tests Creating Unit Tests
------------------- -------------------

View File

@ -1827,14 +1827,14 @@ class API(base.Base):
'id: %(id)s, status: %(status)s'), 'id: %(id)s, status: %(status)s'),
{'id': migration.id, {'id': migration.id,
'status': migration.status}, 'status': migration.status},
context=context, instance=instance) instance=instance)
break break
except exception.MigrationNotFoundByStatus: except exception.MigrationNotFoundByStatus:
pass pass
if not migration: if not migration:
LOG.info(_LI('Instance may have been confirmed during delete'), LOG.info(_LI('Instance may have been confirmed during delete'),
context=context, instance=instance) instance=instance)
return return
src_host = migration.source_compute src_host = migration.source_compute
@ -1851,8 +1851,7 @@ class API(base.Base):
deltas = compute_utils.downsize_quota_delta(context, instance) deltas = compute_utils.downsize_quota_delta(context, instance)
except KeyError: except KeyError:
LOG.info(_LI('Migration %s may have been confirmed during ' LOG.info(_LI('Migration %s may have been confirmed during '
'delete'), 'delete'), migration.id, instance=instance)
migration.id, context=context, instance=instance)
return return
quotas = compute_utils.reserve_quota_delta(context, deltas, instance) quotas = compute_utils.reserve_quota_delta(context, deltas, instance)
@ -2500,7 +2499,7 @@ class API(base.Base):
if compute_utils.is_volume_backed_instance(context, instance): if compute_utils.is_volume_backed_instance(context, instance):
LOG.info(_LI("It's not supported to backup volume backed " LOG.info(_LI("It's not supported to backup volume backed "
"instance."), context=context, instance=instance) "instance."), instance=instance)
raise exception.InvalidRequest() raise exception.InvalidRequest()
else: else:
image_meta = self._create_image(context, instance, image_meta = self._create_image(context, instance,
@ -2646,7 +2645,7 @@ class API(base.Base):
else: else:
LOG.info(_LI('Skipping quiescing instance: ' LOG.info(_LI('Skipping quiescing instance: '
'%(reason)s.'), {'reason': err}, '%(reason)s.'), {'reason': err},
context=context, instance=instance) instance=instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid) context, instance.uuid)
@ -3312,7 +3311,7 @@ class API(base.Base):
return return
context = context.elevated() context = context.elevated()
LOG.debug('Locking', context=context, instance=instance) LOG.debug('Locking', instance=instance)
instance.locked = True instance.locked = True
instance.locked_by = 'owner' if is_owner else 'admin' instance.locked_by = 'owner' if is_owner else 'admin'
instance.save() instance.save()
@ -3328,7 +3327,7 @@ class API(base.Base):
def unlock(self, context, instance): def unlock(self, context, instance):
"""Unlock the given instance.""" """Unlock the given instance."""
context = context.elevated() context = context.elevated()
LOG.debug('Unlocking', context=context, instance=instance) LOG.debug('Unlocking', instance=instance)
instance.locked = False instance.locked = False
instance.locked_by = None instance.locked_by = None
instance.save() instance.save()
@ -4443,7 +4442,7 @@ class SecurityGroupAPI(base.Base, security_group_base.SecurityGroupBase):
msg = _("Quota exceeded, too many security groups.") msg = _("Quota exceeded, too many security groups.")
self.raise_over_quota(msg) self.raise_over_quota(msg)
LOG.info(_LI("Create Security Group %s"), name, context=context) LOG.info(_LI("Create Security Group %s"), name)
try: try:
self.ensure_default(context) self.ensure_default(context)
@ -4551,8 +4550,7 @@ class SecurityGroupAPI(base.Base, security_group_base.SecurityGroupBase):
LOG.exception(_LE("Failed to update usages deallocating " LOG.exception(_LE("Failed to update usages deallocating "
"security group")) "security group"))
LOG.info(_LI("Delete security group %s"), security_group['name'], LOG.info(_LI("Delete security group %s"), security_group['name'])
context=context)
self.db.security_group_destroy(context, security_group['id']) self.db.security_group_destroy(context, security_group['id'])
# Commit the reservations # Commit the reservations

View File

@ -1746,8 +1746,7 @@ class ComputeManager(manager.Manager):
node=None, limits=None): node=None, limits=None):
try: try:
LOG.debug('Starting instance...', context=context, LOG.debug('Starting instance...', instance=instance)
instance=instance)
instance.vm_state = vm_states.BUILDING instance.vm_state = vm_states.BUILDING
instance.task_state = None instance.task_state = None
instance.save(expected_task_state= instance.save(expected_task_state=
@ -2170,8 +2169,7 @@ class ComputeManager(manager.Manager):
trying to teardown networking trying to teardown networking
""" """
context = context.elevated() context = context.elevated()
LOG.info(_LI('Terminating instance'), LOG.info(_LI('Terminating instance'), instance=instance)
context=context, instance=instance)
if notify: if notify:
self._notify_about_instance_usage(context, instance, self._notify_about_instance_usage(context, instance,
@ -2653,8 +2651,7 @@ class ComputeManager(manager.Manager):
""" """
context = context.elevated() context = context.elevated()
LOG.info(_LI("Rebuilding instance"), context=context, LOG.info(_LI("Rebuilding instance"), instance=instance)
instance=instance)
if scheduled_node is not None: if scheduled_node is not None:
rt = self._get_resource_tracker(scheduled_node) rt = self._get_resource_tracker(scheduled_node)
rebuild_claim = rt.rebuild_claim rebuild_claim = rt.rebuild_claim
@ -2909,7 +2906,7 @@ class ComputeManager(manager.Manager):
task_states.REBOOT_PENDING_HARD, task_states.REBOOT_PENDING_HARD,
task_states.REBOOT_STARTED_HARD) task_states.REBOOT_STARTED_HARD)
context = context.elevated() context = context.elevated()
LOG.info(_LI("Rebooting instance"), context=context, instance=instance) LOG.info(_LI("Rebooting instance"), instance=instance)
block_device_info = self._get_instance_block_device_info(context, block_device_info = self._get_instance_block_device_info(context,
instance) instance)
@ -2927,7 +2924,7 @@ class ComputeManager(manager.Manager):
LOG.warning(_LW('trying to reboot a non-running instance:' LOG.warning(_LW('trying to reboot a non-running instance:'
' (state: %(state)s expected: %(running)s)'), ' (state: %(state)s expected: %(running)s)'),
{'state': state, 'running': running}, {'state': state, 'running': running},
context=context, instance=instance) instance=instance)
def bad_volumes_callback(bad_devices): def bad_volumes_callback(bad_devices):
self._handle_bad_volumes_detached( self._handle_bad_volumes_detached(
@ -2961,7 +2958,7 @@ class ComputeManager(manager.Manager):
new_power_state = self._get_power_state(context, instance) new_power_state = self._get_power_state(context, instance)
if new_power_state == power_state.RUNNING: if new_power_state == power_state.RUNNING:
LOG.warning(_LW('Reboot failed but instance is running'), LOG.warning(_LW('Reboot failed but instance is running'),
context=context, instance=instance) instance=instance)
compute_utils.add_instance_fault_from_exc(context, compute_utils.add_instance_fault_from_exc(context,
instance, error, exc_info) instance, error, exc_info)
self._notify_about_instance_usage(context, instance, self._notify_about_instance_usage(context, instance,
@ -2969,7 +2966,7 @@ class ComputeManager(manager.Manager):
ctxt.reraise = False ctxt.reraise = False
else: else:
LOG.error(_LE('Cannot reboot instance: %s'), error, LOG.error(_LE('Cannot reboot instance: %s'), error,
context=context, instance=instance) instance=instance)
self._set_instance_obj_error_state(context, instance) self._set_instance_obj_error_state(context, instance)
if not new_power_state: if not new_power_state:
@ -2981,7 +2978,7 @@ class ComputeManager(manager.Manager):
instance.save() instance.save()
except exception.InstanceNotFound: except exception.InstanceNotFound:
LOG.warning(_LW("Instance disappeared during reboot"), LOG.warning(_LW("Instance disappeared during reboot"),
context=context, instance=instance) instance=instance)
self._notify_about_instance_usage(context, instance, "reboot.end") self._notify_about_instance_usage(context, instance, "reboot.end")
@ -3045,8 +3042,7 @@ class ComputeManager(manager.Manager):
try: try:
instance.save() instance.save()
LOG.info(_LI('instance snapshotting'), context=context, LOG.info(_LI('instance snapshotting'), instance=instance)
instance=instance)
if instance.power_state != power_state.RUNNING: if instance.power_state != power_state.RUNNING:
state = instance.power_state state = instance.power_state
@ -3275,7 +3271,7 @@ class ComputeManager(manager.Manager):
def rescue_instance(self, context, instance, rescue_password, def rescue_instance(self, context, instance, rescue_password,
rescue_image_ref, clean_shutdown): rescue_image_ref, clean_shutdown):
context = context.elevated() context = context.elevated()
LOG.info(_LI('Rescuing'), context=context, instance=instance) LOG.info(_LI('Rescuing'), instance=instance)
admin_password = (rescue_password if rescue_password else admin_password = (rescue_password if rescue_password else
utils.generate_password()) utils.generate_password())
@ -3324,7 +3320,7 @@ class ComputeManager(manager.Manager):
@wrap_instance_fault @wrap_instance_fault
def unrescue_instance(self, context, instance): def unrescue_instance(self, context, instance):
context = context.elevated() context = context.elevated()
LOG.info(_LI('Unrescuing'), context=context, instance=instance) LOG.info(_LI('Unrescuing'), instance=instance)
network_info = self.network_api.get_instance_nw_info(context, instance) network_info = self.network_api.get_instance_nw_info(context, instance)
self._notify_about_instance_usage(context, instance, self._notify_about_instance_usage(context, instance,
@ -3365,7 +3361,7 @@ class ComputeManager(manager.Manager):
# NOTE(wangpan): Get the migration status from db, if it has been # NOTE(wangpan): Get the migration status from db, if it has been
# confirmed, we do nothing and return here # confirmed, we do nothing and return here
LOG.debug("Going to confirm migration %s", migration_id, LOG.debug("Going to confirm migration %s", migration_id,
context=context, instance=instance) instance=instance)
try: try:
# TODO(russellb) Why are we sending the migration object just # TODO(russellb) Why are we sending the migration object just
# to turn around and look it up from the db again? # to turn around and look it up from the db again?
@ -3373,13 +3369,13 @@ class ComputeManager(manager.Manager):
context.elevated(), migration_id) context.elevated(), migration_id)
except exception.MigrationNotFound: except exception.MigrationNotFound:
LOG.error(_LE("Migration %s is not found during confirmation"), LOG.error(_LE("Migration %s is not found during confirmation"),
migration_id, context=context, instance=instance) migration_id, instance=instance)
quotas.rollback() quotas.rollback()
return return
if migration.status == 'confirmed': if migration.status == 'confirmed':
LOG.info(_LI("Migration %s is already confirmed"), LOG.info(_LI("Migration %s is already confirmed"),
migration_id, context=context, instance=instance) migration_id, instance=instance)
quotas.rollback() quotas.rollback()
return return
elif migration.status not in ('finished', 'confirming'): elif migration.status not in ('finished', 'confirming'):
@ -3387,7 +3383,7 @@ class ComputeManager(manager.Manager):
"of migration %(id)s, exit confirmation " "of migration %(id)s, exit confirmation "
"process"), "process"),
{"status": migration.status, "id": migration_id}, {"status": migration.status, "id": migration_id},
context=context, instance=instance) instance=instance)
quotas.rollback() quotas.rollback()
return return
@ -3400,7 +3396,7 @@ class ComputeManager(manager.Manager):
expected_attrs=expected_attrs) expected_attrs=expected_attrs)
except exception.InstanceNotFound: except exception.InstanceNotFound:
LOG.info(_LI("Instance is not found during confirmation"), LOG.info(_LI("Instance is not found during confirmation"),
context=context, instance=instance) instance=instance)
quotas.rollback() quotas.rollback()
return return
@ -3645,7 +3641,7 @@ class ComputeManager(manager.Manager):
rt = self._get_resource_tracker(node) rt = self._get_resource_tracker(node)
with rt.resize_claim(context, instance, instance_type, with rt.resize_claim(context, instance, instance_type,
image_meta=image, limits=limits) as claim: image_meta=image, limits=limits) as claim:
LOG.info(_LI('Migrating'), context=context, instance=instance) LOG.info(_LI('Migrating'), instance=instance)
self.compute_rpcapi.resize_instance( self.compute_rpcapi.resize_instance(
context, instance, claim.migration, image, context, instance, claim.migration, image,
instance_type, quotas.reservations, instance_type, quotas.reservations,
@ -4010,7 +4006,7 @@ class ComputeManager(manager.Manager):
def pause_instance(self, context, instance): def pause_instance(self, context, instance):
"""Pause an instance on this host.""" """Pause an instance on this host."""
context = context.elevated() context = context.elevated()
LOG.info(_LI('Pausing'), context=context, instance=instance) LOG.info(_LI('Pausing'), instance=instance)
self._notify_about_instance_usage(context, instance, 'pause.start') self._notify_about_instance_usage(context, instance, 'pause.start')
compute_utils.notify_about_instance_action(context, instance, compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.PAUSE, self.host, action=fields.NotificationAction.PAUSE,
@ -4032,7 +4028,7 @@ class ComputeManager(manager.Manager):
def unpause_instance(self, context, instance): def unpause_instance(self, context, instance):
"""Unpause a paused instance on this host.""" """Unpause a paused instance on this host."""
context = context.elevated() context = context.elevated()
LOG.info(_LI('Unpausing'), context=context, instance=instance) LOG.info(_LI('Unpausing'), instance=instance)
self._notify_about_instance_usage(context, instance, 'unpause.start') self._notify_about_instance_usage(context, instance, 'unpause.start')
self.driver.unpause(instance) self.driver.unpause(instance)
instance.power_state = self._get_power_state(context, instance) instance.power_state = self._get_power_state(context, instance)
@ -4069,8 +4065,7 @@ class ComputeManager(manager.Manager):
"""Retrieve diagnostics for an instance on this host.""" """Retrieve diagnostics for an instance on this host."""
current_power_state = self._get_power_state(context, instance) current_power_state = self._get_power_state(context, instance)
if current_power_state == power_state.RUNNING: if current_power_state == power_state.RUNNING:
LOG.info(_LI("Retrieving diagnostics"), context=context, LOG.info(_LI("Retrieving diagnostics"), instance=instance)
instance=instance)
return self.driver.get_diagnostics(instance) return self.driver.get_diagnostics(instance)
else: else:
raise exception.InstanceInvalidState( raise exception.InstanceInvalidState(
@ -4087,8 +4082,7 @@ class ComputeManager(manager.Manager):
"""Retrieve diagnostics for an instance on this host.""" """Retrieve diagnostics for an instance on this host."""
current_power_state = self._get_power_state(context, instance) current_power_state = self._get_power_state(context, instance)
if current_power_state == power_state.RUNNING: if current_power_state == power_state.RUNNING:
LOG.info(_LI("Retrieving diagnostics"), context=context, LOG.info(_LI("Retrieving diagnostics"), instance=instance)
instance=instance)
diags = self.driver.get_instance_diagnostics(instance) diags = self.driver.get_instance_diagnostics(instance)
return diags.serialize() return diags.serialize()
else: else:
@ -4131,7 +4125,7 @@ class ComputeManager(manager.Manager):
def resume_instance(self, context, instance): def resume_instance(self, context, instance):
"""Resume the given suspended instance.""" """Resume the given suspended instance."""
context = context.elevated() context = context.elevated()
LOG.info(_LI('Resuming'), context=context, instance=instance) LOG.info(_LI('Resuming'), instance=instance)
self._notify_about_instance_usage(context, instance, 'resume.start') self._notify_about_instance_usage(context, instance, 'resume.start')
network_info = self.network_api.get_instance_nw_info(context, instance) network_info = self.network_api.get_instance_nw_info(context, instance)
@ -4381,12 +4375,12 @@ class ComputeManager(manager.Manager):
@wrap_instance_fault @wrap_instance_fault
def reset_network(self, context, instance): def reset_network(self, context, instance):
"""Reset networking on the given instance.""" """Reset networking on the given instance."""
LOG.debug('Reset network', context=context, instance=instance) LOG.debug('Reset network', instance=instance)
self.driver.reset_network(instance) self.driver.reset_network(instance)
def _inject_network_info(self, context, instance, network_info): def _inject_network_info(self, context, instance, network_info):
"""Inject network info for the given instance.""" """Inject network info for the given instance."""
LOG.debug('Inject network info', context=context, instance=instance) LOG.debug('Inject network info', instance=instance)
LOG.debug('network_info to inject: |%s|', network_info, LOG.debug('network_info to inject: |%s|', network_info,
instance=instance) instance=instance)
@ -4407,8 +4401,7 @@ class ComputeManager(manager.Manager):
def get_console_output(self, context, instance, tail_length): def get_console_output(self, context, instance, tail_length):
"""Send the console output for the given instance.""" """Send the console output for the given instance."""
context = context.elevated() context = context.elevated()
LOG.info(_LI("Get console output"), context=context, LOG.info(_LI("Get console output"), instance=instance)
instance=instance)
output = self.driver.get_console_output(context, instance) output = self.driver.get_console_output(context, instance)
if type(output) is six.text_type: if type(output) is six.text_type:
@ -4677,7 +4670,7 @@ class ComputeManager(manager.Manager):
LOG.info(_LI('Attaching volume %(volume_id)s to %(mountpoint)s'), LOG.info(_LI('Attaching volume %(volume_id)s to %(mountpoint)s'),
{'volume_id': bdm.volume_id, {'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']}, 'mountpoint': bdm['mount_device']},
context=context, instance=instance) instance=instance)
try: try:
bdm.attach(context, instance, self.volume_api, self.driver, bdm.attach(context, instance, self.volume_api, self.driver,
do_check_attach=False, do_driver_attach=True) do_check_attach=False, do_driver_attach=True)
@ -4687,7 +4680,7 @@ class ComputeManager(manager.Manager):
"at %(mountpoint)s"), "at %(mountpoint)s"),
{'volume_id': bdm.volume_id, {'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']}, 'mountpoint': bdm['mount_device']},
context=context, instance=instance) instance=instance)
self.volume_api.unreserve_volume(context, bdm.volume_id) self.volume_api.unreserve_volume(context, bdm.volume_id)
info = {'volume_id': bdm.volume_id} info = {'volume_id': bdm.volume_id}
@ -4701,12 +4694,12 @@ class ComputeManager(manager.Manager):
LOG.info(_LI('Detach volume %(volume_id)s from mountpoint %(mp)s'), LOG.info(_LI('Detach volume %(volume_id)s from mountpoint %(mp)s'),
{'volume_id': volume_id, 'mp': mp}, {'volume_id': volume_id, 'mp': mp},
context=context, instance=instance) instance=instance)
try: try:
if not self.driver.instance_exists(instance): if not self.driver.instance_exists(instance):
LOG.warning(_LW('Detaching volume from unknown instance'), LOG.warning(_LW('Detaching volume from unknown instance'),
context=context, instance=instance) instance=instance)
encryption = encryptors.get_encryption_metadata( encryption = encryptors.get_encryption_metadata(
context, self.volume_api, volume_id, connection_info) context, self.volume_api, volume_id, connection_info)
@ -4725,7 +4718,7 @@ class ComputeManager(manager.Manager):
LOG.exception(_LE('Failed to detach volume %(volume_id)s ' LOG.exception(_LE('Failed to detach volume %(volume_id)s '
'from %(mp)s'), 'from %(mp)s'),
{'volume_id': volume_id, 'mp': mp}, {'volume_id': volume_id, 'mp': mp},
context=context, instance=instance) instance=instance)
self.volume_api.roll_detaching(context, volume_id) self.volume_api.roll_detaching(context, volume_id)
def _detach_volume(self, context, volume_id, instance, destroy_bdm=True, def _detach_volume(self, context, volume_id, instance, destroy_bdm=True,
@ -4872,7 +4865,7 @@ class ComputeManager(manager.Manager):
"connection infos: new: %(new_cinfo)s; " "connection infos: new: %(new_cinfo)s; "
"old: %(old_cinfo)s", "old: %(old_cinfo)s",
{'new_cinfo': new_cinfo, 'old_cinfo': old_cinfo}, {'new_cinfo': new_cinfo, 'old_cinfo': old_cinfo},
contex=context, instance=instance) instance=instance)
self.driver.swap_volume(old_cinfo, new_cinfo, instance, mountpoint, self.driver.swap_volume(old_cinfo, new_cinfo, instance, mountpoint,
resize_to) resize_to)
except Exception: except Exception:
@ -4883,14 +4876,12 @@ class ComputeManager(manager.Manager):
"for %(new_volume_id)s") "for %(new_volume_id)s")
LOG.exception(msg, {'old_volume_id': old_volume_id, LOG.exception(msg, {'old_volume_id': old_volume_id,
'new_volume_id': new_volume_id}, 'new_volume_id': new_volume_id},
context=context,
instance=instance) instance=instance)
else: else:
msg = _LE("Failed to connect to volume %(volume_id)s " msg = _LE("Failed to connect to volume %(volume_id)s "
"with volume at %(mountpoint)s") "with volume at %(mountpoint)s")
LOG.exception(msg, {'volume_id': new_volume_id, LOG.exception(msg, {'volume_id': new_volume_id,
'mountpoint': bdm['device_name']}, 'mountpoint': bdm['device_name']},
context=context,
instance=instance) instance=instance)
self.volume_api.roll_detaching(context, old_volume_id) self.volume_api.roll_detaching(context, old_volume_id)
self.volume_api.unreserve_volume(context, new_volume_id) self.volume_api.unreserve_volume(context, new_volume_id)
@ -4899,7 +4890,7 @@ class ComputeManager(manager.Manager):
if new_cinfo: if new_cinfo:
LOG.debug("swap_volume: calling Cinder terminate_connection " LOG.debug("swap_volume: calling Cinder terminate_connection "
"for %(volume)s", {'volume': conn_volume}, "for %(volume)s", {'volume': conn_volume},
context=context, instance=instance) instance=instance)
self.volume_api.terminate_connection(context, self.volume_api.terminate_connection(context,
conn_volume, conn_volume,
connector) connector)
@ -4912,7 +4903,7 @@ class ComputeManager(manager.Manager):
error=failed) error=failed)
LOG.debug("swap_volume: Cinder migrate_volume_completion " LOG.debug("swap_volume: Cinder migrate_volume_completion "
"returned: %(comp_ret)s", {'comp_ret': comp_ret}, "returned: %(comp_ret)s", {'comp_ret': comp_ret},
context=context, instance=instance) instance=instance)
return (comp_ret, new_cinfo) return (comp_ret, new_cinfo)
@ -4935,7 +4926,7 @@ class ComputeManager(manager.Manager):
LOG.info(_LI('Swapping volume %(old_volume)s for %(new_volume)s'), LOG.info(_LI('Swapping volume %(old_volume)s for %(new_volume)s'),
{'old_volume': old_volume_id, 'new_volume': new_volume_id}, {'old_volume': old_volume_id, 'new_volume': new_volume_id},
context=context, instance=instance) instance=instance)
comp_ret, new_cinfo = self._swap_volume(context, instance, comp_ret, new_cinfo = self._swap_volume(context, instance,
bdm, bdm,
connector, connector,
@ -4960,7 +4951,7 @@ class ComputeManager(manager.Manager):
LOG.debug("swap_volume: Updating volume %(volume_id)s BDM record with " LOG.debug("swap_volume: Updating volume %(volume_id)s BDM record with "
"%(updates)s", {'volume_id': bdm.volume_id, "%(updates)s", {'volume_id': bdm.volume_id,
'updates': values}, 'updates': values},
context=context, instance=instance) instance=instance)
bdm.update(values) bdm.update(values)
bdm.save() bdm.save()
@ -6786,7 +6777,7 @@ class ComputeManager(manager.Manager):
migration.save() migration.save()
except exception.MigrationNotFound: except exception.MigrationNotFound:
LOG.warning(_LW("Migration %s is not found."), LOG.warning(_LW("Migration %s is not found."),
migration.id, context=context, migration.id,
instance=instance) instance=instance)
break break

View File

@ -103,6 +103,8 @@ spawn_re = re.compile(
contextlib_nested = re.compile(r"^with (contextlib\.)?nested\(") contextlib_nested = re.compile(r"^with (contextlib\.)?nested\(")
doubled_words_re = re.compile( doubled_words_re = re.compile(
r"\b(then?|[iao]n|i[fst]|but|f?or|at|and|[dt]o)\s+\1\b") r"\b(then?|[iao]n|i[fst]|but|f?or|at|and|[dt]o)\s+\1\b")
log_remove_context = re.compile(
r"(.)*LOG\.(.*)\(.*(context=[_a-zA-Z0-9].*)+.*\)")
class BaseASTChecker(ast.NodeVisitor): class BaseASTChecker(ast.NodeVisitor):
@ -769,6 +771,29 @@ def no_log_warn(logical_line):
yield (0, msg) yield (0, msg)
def check_context_log(logical_line, physical_line, filename):
"""check whether context is being passed to the logs
Not correct: LOG.info(_LI("Rebooting instance"), context=context)
Correct: LOG.info(_LI("Rebooting instance"))
https://bugs.launchpad.net/nova/+bug/1500896
N353
"""
if "nova/tests" in filename:
return
if pep8.noqa(physical_line):
return
if log_remove_context.match(logical_line):
yield(0,
"N353: Nova is using oslo.context's RequestContext "
"which means the context object is in scope when "
"doing logging using oslo.log, so no need to pass it as"
"kwarg.")
def factory(register): def factory(register):
register(import_no_db_in_virt) register(import_no_db_in_virt)
register(no_db_session_in_public_api) register(no_db_session_in_public_api)
@ -808,3 +833,4 @@ def factory(register):
register(no_os_popen) register(no_os_popen)
register(no_log_warn) register(no_log_warn)
register(CheckForUncalledTestClosure) register(CheckForUncalledTestClosure)
register(check_context_log)

View File

@ -97,7 +97,7 @@ class FloatingIP(object):
True) True)
LOG.debug("floating IP allocation for instance " LOG.debug("floating IP allocation for instance "
"|%s|", floating_address, "|%s|", floating_address,
instance_uuid=instance_uuid, context=context) instance_uuid=instance_uuid)
# get the first fixed address belonging to the instance # get the first fixed address belonging to the instance
fixed_ips = nw_info.fixed_ips() fixed_ips = nw_info.fixed_ips()

View File

@ -273,7 +273,7 @@ class NetworkManager(manager.Manager):
network_ref = objects.Network._from_db_object( network_ref = objects.Network._from_db_object(
context, objects.Network(), network_ref) context, objects.Network(), network_ref)
LOG.debug('Setting host %s for network %s', self.host, LOG.debug('Setting host %s for network %s', self.host,
network_ref.uuid, context=context) network_ref.uuid)
network_ref.host = self.host network_ref.host = self.host
network_ref.save() network_ref.save()
return self.host return self.host
@ -402,7 +402,7 @@ class NetworkManager(manager.Manager):
networks_list = [self._get_network_dict(network) networks_list = [self._get_network_dict(network)
for network in networks] for network in networks]
LOG.debug('Networks retrieved for instance: |%s|', LOG.debug('Networks retrieved for instance: |%s|',
networks_list, context=context, instance_uuid=instance_uuid) networks_list, instance_uuid=instance_uuid)
try: try:
self._allocate_mac_addresses(admin_context, instance_uuid, self._allocate_mac_addresses(admin_context, instance_uuid,
@ -425,8 +425,7 @@ class NetworkManager(manager.Manager):
net_info = self.get_instance_nw_info(admin_context, instance_uuid, net_info = self.get_instance_nw_info(admin_context, instance_uuid,
rxtx_factor, host) rxtx_factor, host)
LOG.info(_LI("Allocated network: '%s' for instance"), net_info, LOG.info(_LI("Allocated network: '%s' for instance"), net_info,
instance_uuid=instance_uuid, instance_uuid=instance_uuid)
context=context)
return net_info return net_info
def deallocate_for_instance(self, context, **kwargs): def deallocate_for_instance(self, context, **kwargs):
@ -475,7 +474,7 @@ class NetworkManager(manager.Manager):
network_ids = set([]) network_ids = set([])
fixed_ips = [] fixed_ips = []
LOG.debug("Network deallocation for instance", LOG.debug("Network deallocation for instance",
context=context, instance_uuid=instance_uuid) instance_uuid=instance_uuid)
# deallocate fixed ips # deallocate fixed ips
for fixed_ip in fixed_ips: for fixed_ip in fixed_ips:
self.deallocate_fixed_ip(context, fixed_ip, host=host, self.deallocate_fixed_ip(context, fixed_ip, host=host,
@ -488,7 +487,7 @@ class NetworkManager(manager.Manager):
objects.VirtualInterface.delete_by_instance_uuid( objects.VirtualInterface.delete_by_instance_uuid(
read_deleted_context, instance_uuid) read_deleted_context, instance_uuid)
LOG.info(_LI("Network deallocated for instance (fixed IPs: '%s')"), LOG.info(_LI("Network deallocated for instance (fixed IPs: '%s')"),
fixed_ips, context=context, instance_uuid=instance_uuid) fixed_ips, instance_uuid=instance_uuid)
@messaging.expected_exceptions(exception.InstanceNotFound) @messaging.expected_exceptions(exception.InstanceNotFound)
def get_instance_nw_info(self, context, instance_id, rxtx_factor, def get_instance_nw_info(self, context, instance_id, rxtx_factor,
@ -1029,31 +1028,29 @@ class NetworkManager(manager.Manager):
def lease_fixed_ip(self, context, address): def lease_fixed_ip(self, context, address):
"""Called by dhcp-bridge when IP is leased.""" """Called by dhcp-bridge when IP is leased."""
LOG.debug('Leased IP |%s|', address, context=context) LOG.debug('Leased IP |%s|', address)
fixed_ip = objects.FixedIP.get_by_address(context, address) fixed_ip = objects.FixedIP.get_by_address(context, address)
if fixed_ip.instance_uuid is None: if fixed_ip.instance_uuid is None:
LOG.warning(_LW('IP %s leased that is not associated'), fixed_ip, LOG.warning(_LW('IP %s leased that is not associated'), fixed_ip)
context=context)
return return
fixed_ip.leased = True fixed_ip.leased = True
fixed_ip.save() fixed_ip.save()
if not fixed_ip.allocated: if not fixed_ip.allocated:
LOG.warning(_LW('IP |%s| leased that isn\'t allocated'), fixed_ip, LOG.warning(_LW('IP |%s| leased that isn\'t allocated'), fixed_ip,
context=context, instance_uuid=fixed_ip.instance_uuid) instance_uuid=fixed_ip.instance_uuid)
def release_fixed_ip(self, context, address, mac=None): def release_fixed_ip(self, context, address, mac=None):
"""Called by dhcp-bridge when IP is released.""" """Called by dhcp-bridge when IP is released."""
LOG.debug('Released IP |%s|', address, context=context) LOG.debug('Released IP |%s|', address)
fixed_ip = objects.FixedIP.get_by_address(context, address) fixed_ip = objects.FixedIP.get_by_address(context, address)
if fixed_ip.instance_uuid is None: if fixed_ip.instance_uuid is None:
LOG.warning(_LW('IP %s released that is not associated'), fixed_ip, LOG.warning(_LW('IP %s released that is not associated'), fixed_ip)
context=context)
return return
if not fixed_ip.leased: if not fixed_ip.leased:
LOG.warning(_LW('IP %s released that was not leased'), fixed_ip, LOG.warning(_LW('IP %s released that was not leased'), fixed_ip,
context=context, instance_uuid=fixed_ip.instance_uuid) instance_uuid=fixed_ip.instance_uuid)
else: else:
fixed_ip.leased = False fixed_ip.leased = False
fixed_ip.save() fixed_ip.save()

View File

@ -770,3 +770,24 @@ class HackingTestCase(test.NoDBTestCase):
self.assertEqual(1, len(list(func('for i in xrange (10)')))) self.assertEqual(1, len(list(func('for i in xrange (10)'))))
self.assertEqual(0, len(list(func('for i in range(10)')))) self.assertEqual(0, len(list(func('for i in range(10)'))))
self.assertEqual(0, len(list(func('for i in six.moves.range(10)')))) self.assertEqual(0, len(list(func('for i in six.moves.range(10)'))))
def test_log_context(self):
code = """
LOG.info(_LI("Rebooting instance"),
context=context, instance=instance)
"""
errors = [(1, 0, 'N353')]
self._assert_has_errors(code, checks.check_context_log,
expected_errors=errors)
code = """
LOG.info(_LI("Rebooting instance"),
context=admin_context, instance=instance)
"""
errors = [(1, 0, 'N353')]
self._assert_has_errors(code, checks.check_context_log,
expected_errors=errors)
code = """
LOG.info(_LI("Rebooting instance"),
instance=instance)
"""
self._assert_has_no_errors(code, checks.check_context_log)

View File

@ -278,7 +278,7 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
"%(volume_id)s at %(mountpoint)s"), "%(volume_id)s at %(mountpoint)s"),
{'volume_id': volume_id, {'volume_id': volume_id,
'mountpoint': self['mount_device']}, 'mountpoint': self['mount_device']},
context=context, instance=instance) instance=instance)
volume_api.terminate_connection(context, volume_id, volume_api.terminate_connection(context, volume_id,
connector) connector)
self['connection_info'] = connection_info self['connection_info'] = connection_info
@ -310,8 +310,7 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
"%(volume_id)s at %(mount_point)s."), "%(volume_id)s at %(mount_point)s."),
{'volume_id': volume_id, {'volume_id': volume_id,
'mount_point': self['mount_device']}, 'mount_point': self['mount_device']},
exc_info=True, context=context, exc_info=True, instance=instance)
instance=instance)
volume_api.terminate_connection(context, volume_id, volume_api.terminate_connection(context, volume_id,
connector) connector)
@ -484,30 +483,29 @@ def convert_volume(volume_bdm):
def attach_block_devices(block_device_mapping, *attach_args, **attach_kwargs): def attach_block_devices(block_device_mapping, *attach_args, **attach_kwargs):
def _log_and_attach(bdm): def _log_and_attach(bdm):
context = attach_args[0]
instance = attach_args[1] instance = attach_args[1]
if bdm.get('volume_id'): if bdm.get('volume_id'):
LOG.info(_LI('Booting with volume %(volume_id)s at ' LOG.info(_LI('Booting with volume %(volume_id)s at '
'%(mountpoint)s'), '%(mountpoint)s'),
{'volume_id': bdm.volume_id, {'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']}, 'mountpoint': bdm['mount_device']},
context=context, instance=instance) instance=instance)
elif bdm.get('snapshot_id'): elif bdm.get('snapshot_id'):
LOG.info(_LI('Booting with volume snapshot %(snapshot_id)s at ' LOG.info(_LI('Booting with volume snapshot %(snapshot_id)s at '
'%(mountpoint)s'), '%(mountpoint)s'),
{'snapshot_id': bdm.snapshot_id, {'snapshot_id': bdm.snapshot_id,
'mountpoint': bdm['mount_device']}, 'mountpoint': bdm['mount_device']},
context=context, instance=instance) instance=instance)
elif bdm.get('image_id'): elif bdm.get('image_id'):
LOG.info(_LI('Booting with volume-backed-image %(image_id)s at ' LOG.info(_LI('Booting with volume-backed-image %(image_id)s at '
'%(mountpoint)s'), '%(mountpoint)s'),
{'image_id': bdm.image_id, {'image_id': bdm.image_id,
'mountpoint': bdm['mount_device']}, 'mountpoint': bdm['mount_device']},
context=context, instance=instance) instance=instance)
else: else:
LOG.info(_LI('Booting with blank volume at %(mountpoint)s'), LOG.info(_LI('Booting with blank volume at %(mountpoint)s'),
{'mountpoint': bdm['mount_device']}, {'mountpoint': bdm['mount_device']},
context=context, instance=instance) instance=instance)
bdm.attach(*attach_args, **attach_kwargs) bdm.attach(*attach_args, **attach_kwargs)