Enable some off-by-default checks
Some of the available checks are disabled by default, like: [H106] Don't put vim configuration in source files [H203] Use assertIs(Not)None to check for None [H904] Use ',' instead of '%', String interpolation should be delayed to be handled by the logging code, rather than being done at the point of the logging call. Change-Id: Ie985fcf78997a86d41e40eacbb4a5ace8592a348
This commit is contained in:
parent
5757655f1d
commit
fb17422c86
@ -78,7 +78,7 @@ class ZaqarNotification(hook.HookBase):
|
|||||||
|
|
||||||
def _execute_pre_hook(self, context, func_name, *args, **kwargs):
|
def _execute_pre_hook(self, context, func_name, *args, **kwargs):
|
||||||
LOG.debug("\n PRE zaqar notification has been called for "
|
LOG.debug("\n PRE zaqar notification has been called for "
|
||||||
"method '%s'.\n" % func_name)
|
"method '%s'.\n", func_name)
|
||||||
if func_name == "deny_access":
|
if func_name == "deny_access":
|
||||||
LOG.debug("\nSending notification about denied access.\n")
|
LOG.debug("\nSending notification about denied access.\n")
|
||||||
data = self._access_changed_trigger(
|
data = self._access_changed_trigger(
|
||||||
@ -92,7 +92,7 @@ class ZaqarNotification(hook.HookBase):
|
|||||||
def _execute_post_hook(self, context, func_name, pre_hook_data,
|
def _execute_post_hook(self, context, func_name, pre_hook_data,
|
||||||
driver_action_results, *args, **kwargs):
|
driver_action_results, *args, **kwargs):
|
||||||
LOG.debug("\n POST zaqar notification has been called for "
|
LOG.debug("\n POST zaqar notification has been called for "
|
||||||
"method '%s'.\n" % func_name)
|
"method '%s'.\n", func_name)
|
||||||
if func_name == "allow_access":
|
if func_name == "allow_access":
|
||||||
LOG.debug("\nSending notification about allowed access.\n")
|
LOG.debug("\nSending notification about allowed access.\n")
|
||||||
data = self._access_changed_trigger(
|
data = self._access_changed_trigger(
|
||||||
@ -115,7 +115,7 @@ class ZaqarNotification(hook.HookBase):
|
|||||||
}
|
}
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
"\n Sending message %(m)s to '%(q)s' queue using '%(u)s' user "
|
"\n Sending message %(m)s to '%(q)s' queue using '%(u)s' user "
|
||||||
"and '%(p)s' project." % {
|
"and '%(p)s' project.", {
|
||||||
'm': message,
|
'm': message,
|
||||||
'q': queue_name,
|
'q': queue_name,
|
||||||
'u': CONF.zaqar.zaqar_username,
|
'u': CONF.zaqar.zaqar_username,
|
||||||
|
@ -733,8 +733,8 @@ class Resource(wsgi.Application):
|
|||||||
def __call__(self, request):
|
def __call__(self, request):
|
||||||
"""WSGI method that controls (de)serialization and method dispatch."""
|
"""WSGI method that controls (de)serialization and method dispatch."""
|
||||||
|
|
||||||
LOG.info("%(method)s %(url)s" % {"method": request.method,
|
LOG.info("%(method)s %(url)s", {"method": request.method,
|
||||||
"url": request.url})
|
"url": request.url})
|
||||||
if self.support_api_request_version:
|
if self.support_api_request_version:
|
||||||
# Set the version of the API requested based on the header
|
# Set the version of the API requested based on the header
|
||||||
try:
|
try:
|
||||||
@ -900,7 +900,7 @@ class Resource(wsgi.Application):
|
|||||||
# OK, it's an action; figure out which action...
|
# OK, it's an action; figure out which action...
|
||||||
mtype = _MEDIA_TYPE_MAP.get(content_type)
|
mtype = _MEDIA_TYPE_MAP.get(content_type)
|
||||||
action_name = self.action_peek[mtype](body)
|
action_name = self.action_peek[mtype](body)
|
||||||
LOG.debug("Action body: %s" % body)
|
LOG.debug("Action body: %s", body)
|
||||||
else:
|
else:
|
||||||
action_name = action
|
action_name = action
|
||||||
|
|
||||||
|
@ -182,7 +182,7 @@ def downgrade():
|
|||||||
'shares',
|
'shares',
|
||||||
type_='foreignkey')
|
type_='foreignkey')
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception("Error Dropping '%s' constraint." %
|
LOG.exception("Error Dropping '%s' constraint.",
|
||||||
SHARES_CG_FK_CONSTRAINT_NAME)
|
SHARES_CG_FK_CONSTRAINT_NAME)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -141,5 +141,5 @@ def downgrade():
|
|||||||
try:
|
try:
|
||||||
op.drop_table(table_name)
|
op.drop_table(table_name)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.error("%s table not dropped" % table_name)
|
LOG.error("%s table not dropped", table_name)
|
||||||
raise
|
raise
|
||||||
|
@ -76,7 +76,7 @@ def downgrade():
|
|||||||
)
|
)
|
||||||
|
|
||||||
LOG.info("Copying data from %(from_table)s to the migration "
|
LOG.info("Copying data from %(from_table)s to the migration "
|
||||||
"table %(migration_table)s" % {
|
"table %(migration_table)s", {
|
||||||
'from_table': TABLE_NAME,
|
'from_table': TABLE_NAME,
|
||||||
'migration_table': migration_table_name
|
'migration_table': migration_table_name
|
||||||
})
|
})
|
||||||
@ -94,13 +94,13 @@ def downgrade():
|
|||||||
})
|
})
|
||||||
op.bulk_insert(migration_table, rows)
|
op.bulk_insert(migration_table, rows)
|
||||||
|
|
||||||
LOG.info("Dropping table %(from_table)s" % {
|
LOG.info("Dropping table %(from_table)s", {
|
||||||
'from_table': TABLE_NAME
|
'from_table': TABLE_NAME
|
||||||
})
|
})
|
||||||
op.drop_table(TABLE_NAME)
|
op.drop_table(TABLE_NAME)
|
||||||
|
|
||||||
LOG.info("Rename the migration table %(migration_table)s to "
|
LOG.info("Rename the migration table %(migration_table)s to "
|
||||||
"the original table %(from_table)s" % {
|
"the original table %(from_table)s", {
|
||||||
'migration_table': migration_table_name,
|
'migration_table': migration_table_name,
|
||||||
'from_table': TABLE_NAME
|
'from_table': TABLE_NAME
|
||||||
})
|
})
|
||||||
|
@ -45,7 +45,7 @@ class API(base.Base):
|
|||||||
resource_id=None, exception=None, detail=None,
|
resource_id=None, exception=None, detail=None,
|
||||||
level=message_levels.ERROR):
|
level=message_levels.ERROR):
|
||||||
"""Create a message with the specified information."""
|
"""Create a message with the specified information."""
|
||||||
LOG.info("Creating message record for request_id = %s" %
|
LOG.info("Creating message record for request_id = %s",
|
||||||
context.request_id)
|
context.request_id)
|
||||||
|
|
||||||
# Updates expiry time for message as per message_ttl config.
|
# Updates expiry time for message as per message_ttl config.
|
||||||
@ -66,8 +66,8 @@ class API(base.Base):
|
|||||||
try:
|
try:
|
||||||
self.db.message_create(context, message_record)
|
self.db.message_create(context, message_record)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception("Failed to create message record "
|
LOG.exception(("Failed to create message record "
|
||||||
"for request_id %s" % context.request_id)
|
"for request_id %s"), context.request_id)
|
||||||
|
|
||||||
def get(self, context, id):
|
def get(self, context, id):
|
||||||
"""Return message with the specified message id."""
|
"""Return message with the specified message id."""
|
||||||
|
@ -306,7 +306,7 @@ class FilterScheduler(base.Scheduler):
|
|||||||
def schedule_create_share_group(self, context, share_group_id,
|
def schedule_create_share_group(self, context, share_group_id,
|
||||||
request_spec, filter_properties):
|
request_spec, filter_properties):
|
||||||
|
|
||||||
LOG.info("Scheduling share group %s." % share_group_id)
|
LOG.info("Scheduling share group %s.", share_group_id)
|
||||||
host = self._get_best_host_for_share_group(context, request_spec)
|
host = self._get_best_host_for_share_group(context, request_spec)
|
||||||
|
|
||||||
if not host:
|
if not host:
|
||||||
@ -314,7 +314,7 @@ class FilterScheduler(base.Scheduler):
|
|||||||
raise exception.NoValidHost(reason=msg)
|
raise exception.NoValidHost(reason=msg)
|
||||||
|
|
||||||
msg = "Chose host %(host)s for create_share_group %(group)s."
|
msg = "Chose host %(host)s for create_share_group %(group)s."
|
||||||
LOG.info(msg % {'host': host, 'group': share_group_id})
|
LOG.info(msg, {'host': host, 'group': share_group_id})
|
||||||
|
|
||||||
updated_share_group = base.share_group_update_db(
|
updated_share_group = base.share_group_update_db(
|
||||||
context, share_group_id, host)
|
context, share_group_id, host)
|
||||||
@ -361,7 +361,7 @@ class FilterScheduler(base.Scheduler):
|
|||||||
if not hosts:
|
if not hosts:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
LOG.debug("Filtered %s" % hosts)
|
LOG.debug("Filtered %s", hosts)
|
||||||
|
|
||||||
# weighted_host = WeightedHost() ... the best host for the job.
|
# weighted_host = WeightedHost() ... the best host for the job.
|
||||||
weighed_hosts = self.host_manager.get_weighed_hosts(
|
weighed_hosts = self.host_manager.get_weighed_hosts(
|
||||||
@ -396,7 +396,7 @@ class FilterScheduler(base.Scheduler):
|
|||||||
if not hosts:
|
if not hosts:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
LOG.debug("Filtered %s" % hosts)
|
LOG.debug("Filtered %s", hosts)
|
||||||
|
|
||||||
weighed_hosts = self.host_manager.get_weighed_hosts(
|
weighed_hosts = self.host_manager.get_weighed_hosts(
|
||||||
hosts,
|
hosts,
|
||||||
|
@ -51,7 +51,7 @@ class IgnoreAttemptedHostsFilter(base_host.BaseHostFilter):
|
|||||||
pass_msg = "passes" if passes else "fails"
|
pass_msg = "passes" if passes else "fails"
|
||||||
|
|
||||||
LOG.debug("Host %(host)s %(pass_msg)s. Previously tried hosts: "
|
LOG.debug("Host %(host)s %(pass_msg)s. Previously tried hosts: "
|
||||||
"%(hosts)s" % {'host': host,
|
"%(hosts)s", {'host': host,
|
||||||
'pass_msg': pass_msg,
|
'pass_msg': pass_msg,
|
||||||
'hosts': hosts})
|
'hosts': hosts})
|
||||||
return passes
|
return passes
|
||||||
|
@ -562,7 +562,7 @@ class HostManager(object):
|
|||||||
self.service_states[host] = capability_copy
|
self.service_states[host] = capability_copy
|
||||||
|
|
||||||
LOG.debug("Received %(service_name)s service update from "
|
LOG.debug("Received %(service_name)s service update from "
|
||||||
"%(host)s: %(cap)s" %
|
"%(host)s: %(cap)s",
|
||||||
{'service_name': service_name, 'host': host,
|
{'service_name': service_name, 'host': host,
|
||||||
'cap': capabilities})
|
'cap': capabilities})
|
||||||
|
|
||||||
@ -578,7 +578,7 @@ class HostManager(object):
|
|||||||
|
|
||||||
# Warn about down services and remove them from host_state_map
|
# Warn about down services and remove them from host_state_map
|
||||||
if not utils.service_is_up(service) or service['disabled']:
|
if not utils.service_is_up(service) or service['disabled']:
|
||||||
LOG.warning("Share service is down. (host: %s)." % host)
|
LOG.warning("Share service is down. (host: %s).", host)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Create and register host_state if not in host_state_map
|
# Create and register host_state if not in host_state_map
|
||||||
|
@ -293,7 +293,7 @@ class WSGIService(service.ServiceBase):
|
|||||||
if self.workers is not None and self.workers < 1:
|
if self.workers is not None and self.workers < 1:
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
"Value of config option %(name)s_workers must be integer "
|
"Value of config option %(name)s_workers must be integer "
|
||||||
"greater than 1. Input value ignored." % {'name': name})
|
"greater than 1. Input value ignored.", {'name': name})
|
||||||
# Reset workers to default
|
# Reset workers to default
|
||||||
self.workers = None
|
self.workers = None
|
||||||
self.server = wsgi.Server(
|
self.server = wsgi.Server(
|
||||||
|
@ -1323,7 +1323,7 @@ class API(base.Base):
|
|||||||
constants.TASK_STATE_DATA_COPYING_IN_PROGRESS):
|
constants.TASK_STATE_DATA_COPYING_IN_PROGRESS):
|
||||||
data_rpc = data_rpcapi.DataAPI()
|
data_rpc = data_rpcapi.DataAPI()
|
||||||
LOG.info("Sending request to get share migration information"
|
LOG.info("Sending request to get share migration information"
|
||||||
" of share %s." % share['id'])
|
" of share %s.", share['id'])
|
||||||
|
|
||||||
services = self.db.service_get_all_by_topic(context, 'manila-data')
|
services = self.db.service_get_all_by_topic(context, 'manila-data')
|
||||||
|
|
||||||
@ -1423,7 +1423,7 @@ class API(base.Base):
|
|||||||
|
|
||||||
data_rpc = data_rpcapi.DataAPI()
|
data_rpc = data_rpcapi.DataAPI()
|
||||||
LOG.info("Sending request to cancel migration of "
|
LOG.info("Sending request to cancel migration of "
|
||||||
"share %s." % share['id'])
|
"share %s.", share['id'])
|
||||||
|
|
||||||
services = self.db.service_get_all_by_topic(context, 'manila-data')
|
services = self.db.service_get_all_by_topic(context, 'manila-data')
|
||||||
|
|
||||||
@ -1873,8 +1873,8 @@ class API(base.Base):
|
|||||||
self.update(context, share, {'status': constants.STATUS_SHRINKING})
|
self.update(context, share, {'status': constants.STATUS_SHRINKING})
|
||||||
self.share_rpcapi.shrink_share(context, share, new_size)
|
self.share_rpcapi.shrink_share(context, share, new_size)
|
||||||
LOG.info("Shrink share (id=%(id)s) request issued successfully."
|
LOG.info("Shrink share (id=%(id)s) request issued successfully."
|
||||||
" New size: %(size)s" % {'id': share['id'],
|
" New size: %(size)s", {'id': share['id'],
|
||||||
'size': new_size})
|
'size': new_size})
|
||||||
|
|
||||||
def snapshot_allow_access(self, context, snapshot, access_type, access_to):
|
def snapshot_allow_access(self, context, snapshot, access_type, access_to):
|
||||||
"""Allow access to a share snapshot."""
|
"""Allow access to a share snapshot."""
|
||||||
|
@ -1310,7 +1310,7 @@ class ShareDriver(object):
|
|||||||
except exception.ManilaException:
|
except exception.ManilaException:
|
||||||
msg = ('Could not delete share group snapshot member %(snap)s '
|
msg = ('Could not delete share group snapshot member %(snap)s '
|
||||||
'for share %(share)s.')
|
'for share %(share)s.')
|
||||||
LOG.error(msg % {
|
LOG.error(msg, {
|
||||||
'snap': share_snapshot['id'],
|
'snap': share_snapshot['id'],
|
||||||
'share': share_snapshot['share_id'],
|
'share': share_snapshot['share_id'],
|
||||||
})
|
})
|
||||||
@ -1402,7 +1402,7 @@ class ShareDriver(object):
|
|||||||
msg = ('Could not create share group snapshot. Failed '
|
msg = ('Could not create share group snapshot. Failed '
|
||||||
'to create share snapshot %(snap)s for '
|
'to create share snapshot %(snap)s for '
|
||||||
'share %(share)s.')
|
'share %(share)s.')
|
||||||
LOG.exception(msg % {
|
LOG.exception(msg, {
|
||||||
'snap': share_snapshot['id'],
|
'snap': share_snapshot['id'],
|
||||||
'share': share_snapshot['share_id']
|
'share': share_snapshot['share_id']
|
||||||
})
|
})
|
||||||
@ -1466,7 +1466,7 @@ class ShareDriver(object):
|
|||||||
This value may be None.
|
This value may be None.
|
||||||
"""
|
"""
|
||||||
snapshot_members = snap_dict.get('share_group_snapshot_members', [])
|
snapshot_members = snap_dict.get('share_group_snapshot_members', [])
|
||||||
LOG.debug('Deleting share group snapshot %s.' % snap_dict['id'])
|
LOG.debug('Deleting share group snapshot %s.', snap_dict['id'])
|
||||||
for member in snapshot_members:
|
for member in snapshot_members:
|
||||||
share_snapshot = {
|
share_snapshot = {
|
||||||
'snapshot_id': member['share_group_snapshot_id'],
|
'snapshot_id': member['share_group_snapshot_id'],
|
||||||
@ -1482,7 +1482,7 @@ class ShareDriver(object):
|
|||||||
self.delete_snapshot(
|
self.delete_snapshot(
|
||||||
context, share_snapshot, share_server=share_server)
|
context, share_snapshot, share_server=share_server)
|
||||||
|
|
||||||
LOG.debug('Deleted share group snapshot %s.' % snap_dict['id'])
|
LOG.debug('Deleted share group snapshot %s.', snap_dict['id'])
|
||||||
return None, None
|
return None, None
|
||||||
|
|
||||||
def _collate_share_group_snapshot_info(self, share_group_dict,
|
def _collate_share_group_snapshot_info(self, share_group_dict,
|
||||||
|
@ -117,7 +117,7 @@ class ContainerShareDriver(driver.ShareDriver, driver.ExecuteMixin):
|
|||||||
super(ContainerShareDriver, self)._update_share_stats(data)
|
super(ContainerShareDriver, self)._update_share_stats(data)
|
||||||
|
|
||||||
def create_share(self, context, share, share_server=None):
|
def create_share(self, context, share, share_server=None):
|
||||||
LOG.debug("Create share on server '%s'." % share_server["id"])
|
LOG.debug("Create share on server '%s'.", share_server["id"])
|
||||||
server_id = self._get_container_name(share_server["id"])
|
server_id = self._get_container_name(share_server["id"])
|
||||||
share_name = share.share_id
|
share_name = share.share_id
|
||||||
self.container.execute(
|
self.container.execute(
|
||||||
@ -135,7 +135,7 @@ class ContainerShareDriver(driver.ShareDriver, driver.ExecuteMixin):
|
|||||||
|
|
||||||
@utils.synchronized('container_driver_delete_share_lock', external=True)
|
@utils.synchronized('container_driver_delete_share_lock', external=True)
|
||||||
def delete_share(self, context, share, share_server=None):
|
def delete_share(self, context, share, share_server=None):
|
||||||
LOG.debug("Deleting share %(share)s on server '%(server)s'." %
|
LOG.debug("Deleting share %(share)s on server '%(server)s'.",
|
||||||
{"server": share_server["id"],
|
{"server": share_server["id"],
|
||||||
"share": share.share_id})
|
"share": share.share_id})
|
||||||
server_id = self._get_container_name(share_server["id"])
|
server_id = self._get_container_name(share_server["id"])
|
||||||
@ -255,7 +255,7 @@ class ContainerShareDriver(driver.ShareDriver, driver.ExecuteMixin):
|
|||||||
self.configuration.container_ovs_bridge_name, host_veth,
|
self.configuration.container_ovs_bridge_name, host_veth,
|
||||||
*(e_mac + e_id + e_status + e_mcid), run_as_root=True)
|
*(e_mac + e_id + e_status + e_mcid), run_as_root=True)
|
||||||
LOG.debug("Now container %(id)s should be accessible from network "
|
LOG.debug("Now container %(id)s should be accessible from network "
|
||||||
"%(network)s and subnet %(subnet)s by address %(ip)s." %
|
"%(network)s and subnet %(subnet)s by address %(ip)s.",
|
||||||
msg_helper)
|
msg_helper)
|
||||||
|
|
||||||
@utils.synchronized("container_driver_teardown_lock", external=True)
|
@utils.synchronized("container_driver_teardown_lock", external=True)
|
||||||
@ -305,7 +305,7 @@ class ContainerShareDriver(driver.ShareDriver, driver.ExecuteMixin):
|
|||||||
def _setup_server(self, network_info, metadata=None):
|
def _setup_server(self, network_info, metadata=None):
|
||||||
msg = "Creating share server '%s'."
|
msg = "Creating share server '%s'."
|
||||||
server_id = self._get_container_name(network_info["server_id"])
|
server_id = self._get_container_name(network_info["server_id"])
|
||||||
LOG.debug(msg % server_id)
|
LOG.debug(msg, server_id)
|
||||||
|
|
||||||
veths_before = self._get_veth_state()
|
veths_before = self._get_veth_state()
|
||||||
try:
|
try:
|
||||||
|
@ -110,7 +110,7 @@ class DockerCIFSHelper(object):
|
|||||||
existing_users = self._get_existing_users(server_id, share_name,
|
existing_users = self._get_existing_users(server_id, share_name,
|
||||||
access)
|
access)
|
||||||
except TypeError:
|
except TypeError:
|
||||||
LOG.warning("Can't access smbd at share %s." % share_name)
|
LOG.warning("Can't access smbd at share %s.", share_name)
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
allowed_users = " ".join(sorted(set(existing_users.split()) -
|
allowed_users = " ".join(sorted(set(existing_users.split()) -
|
||||||
|
@ -94,7 +94,7 @@ class LVMHelper(driver.ExecuteMixin):
|
|||||||
self._execute("lvremove", "-f", "--autobackup", "n",
|
self._execute("lvremove", "-f", "--autobackup", "n",
|
||||||
to_remove, run_as_root=True)
|
to_remove, run_as_root=True)
|
||||||
except exception.ProcessExecutionError as e:
|
except exception.ProcessExecutionError as e:
|
||||||
LOG.warning("Failed to remove logical volume %s." % to_remove)
|
LOG.warning("Failed to remove logical volume %s.", to_remove)
|
||||||
LOG.error(e)
|
LOG.error(e)
|
||||||
|
|
||||||
def extend_share(self, share, new_size, share_server=None):
|
def extend_share(self, share, new_size, share_server=None):
|
||||||
|
@ -873,7 +873,7 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
|||||||
|
|
||||||
def _setup_server(self, network_info, metadata=None):
|
def _setup_server(self, network_info, metadata=None):
|
||||||
msg = "Creating share server '%s'."
|
msg = "Creating share server '%s'."
|
||||||
LOG.debug(msg % network_info['server_id'])
|
LOG.debug(msg, network_info['server_id'])
|
||||||
server = self.service_instance_manager.set_up_service_instance(
|
server = self.service_instance_manager.set_up_service_instance(
|
||||||
self.admin_context, network_info)
|
self.admin_context, network_info)
|
||||||
for helper in self._helpers.values():
|
for helper in self._helpers.values():
|
||||||
@ -945,7 +945,7 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
|||||||
|
|
||||||
linked_volume_name = self._get_volume_name(share['id'])
|
linked_volume_name = self._get_volume_name(share['id'])
|
||||||
if share_volume['name'] != linked_volume_name:
|
if share_volume['name'] != linked_volume_name:
|
||||||
LOG.debug('Manage: volume_id = %s' % share_volume['id'])
|
LOG.debug('Manage: volume_id = %s', share_volume['id'])
|
||||||
self.volume_api.update(self.admin_context, share_volume['id'],
|
self.volume_api.update(self.admin_context, share_volume['id'],
|
||||||
{'name': linked_volume_name})
|
{'name': linked_volume_name})
|
||||||
|
|
||||||
|
@ -205,7 +205,7 @@ class GlusterManager(object):
|
|||||||
exc.exit_code in error_policy):
|
exc.exit_code in error_policy):
|
||||||
return
|
return
|
||||||
if logmsg:
|
if logmsg:
|
||||||
LOG.error("%s: GlusterFS instrumentation failed." %
|
LOG.error("%s: GlusterFS instrumentation failed.",
|
||||||
logmsg)
|
logmsg)
|
||||||
raise exception.GlusterfsException(
|
raise exception.GlusterfsException(
|
||||||
_("GlusterFS management command '%(cmd)s' failed "
|
_("GlusterFS management command '%(cmd)s' failed "
|
||||||
|
@ -266,9 +266,9 @@ class NFSHelper(NASHelperBase):
|
|||||||
LOG.warning(
|
LOG.warning(
|
||||||
"Unsupported access level %(level)s or access type "
|
"Unsupported access level %(level)s or access type "
|
||||||
"%(type)s, skipping removal of access rule to "
|
"%(type)s, skipping removal of access rule to "
|
||||||
"%(to)s." % {'level': access['access_level'],
|
"%(to)s.", {'level': access['access_level'],
|
||||||
'type': access['access_type'],
|
'type': access['access_type'],
|
||||||
'to': access['access_to']})
|
'to': access['access_to']})
|
||||||
continue
|
continue
|
||||||
self._ssh_exec(server, ['sudo', 'exportfs', '-u',
|
self._ssh_exec(server, ['sudo', 'exportfs', '-u',
|
||||||
':'.join((access['access_to'], local_path))])
|
':'.join((access['access_to'], local_path))])
|
||||||
@ -282,7 +282,7 @@ class NFSHelper(NASHelperBase):
|
|||||||
access['access_to']), out)
|
access['access_to']), out)
|
||||||
if found_item is not None:
|
if found_item is not None:
|
||||||
LOG.warning("Access rule %(type)s:%(to)s already "
|
LOG.warning("Access rule %(type)s:%(to)s already "
|
||||||
"exists for share %(name)s" % {
|
"exists for share %(name)s", {
|
||||||
'to': access['access_to'],
|
'to': access['access_to'],
|
||||||
'type': access['access_type'],
|
'type': access['access_type'],
|
||||||
'name': share_name
|
'name': share_name
|
||||||
|
@ -313,7 +313,7 @@ class HNASSSHBackend(object):
|
|||||||
command = ['tree-clone-job-abort', job_id]
|
command = ['tree-clone-job-abort', job_id]
|
||||||
self._execute(command)
|
self._execute(command)
|
||||||
LOG.error("Timeout in snapshot creation from "
|
LOG.error("Timeout in snapshot creation from "
|
||||||
"source path %s." % src_path)
|
"source path %s.", src_path)
|
||||||
msg = _("Share snapshot of source path %s "
|
msg = _("Share snapshot of source path %s "
|
||||||
"was not created.") % src_path
|
"was not created.") % src_path
|
||||||
raise exception.HNASBackendException(msg=msg)
|
raise exception.HNASBackendException(msg=msg)
|
||||||
|
@ -214,7 +214,7 @@ class RestHelper(object):
|
|||||||
utils.execute('chmod', '666', filepath, run_as_root=True)
|
utils.execute('chmod', '666', filepath, run_as_root=True)
|
||||||
|
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
LOG.error('Bad response from change file: %s.' % err)
|
LOG.error('Bad response from change file: %s.', err)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def create_share(self, share_name, fs_id, share_proto):
|
def create_share(self, share_name, fs_id, share_proto):
|
||||||
|
@ -226,7 +226,7 @@ class GPFSShareDriver(driver.ExecuteMixin, driver.GaneshaMixin,
|
|||||||
|
|
||||||
# exit_status == -1 if no exit code was returned
|
# exit_status == -1 if no exit code was returned
|
||||||
if exit_status != -1:
|
if exit_status != -1:
|
||||||
LOG.debug('Result was %s' % exit_status)
|
LOG.debug('Result was %s', exit_status)
|
||||||
if ((check_exit_code and exit_status != 0)
|
if ((check_exit_code and exit_status != 0)
|
||||||
and
|
and
|
||||||
(ignore_exit_code is None or
|
(ignore_exit_code is None or
|
||||||
@ -692,7 +692,7 @@ class GPFSShareDriver(driver.ExecuteMixin, driver.GaneshaMixin,
|
|||||||
LOG.exception(msg)
|
LOG.exception(msg)
|
||||||
raise exception.GPFSException(msg)
|
raise exception.GPFSException(msg)
|
||||||
LOG.info('Existing share %(shr)s has size %(size)s KB '
|
LOG.info('Existing share %(shr)s has size %(size)s KB '
|
||||||
'which is below 1GiB, so extended it to 1GiB.' %
|
'which is below 1GiB, so extended it to 1GiB.',
|
||||||
{'shr': new_share_name, 'size': share_size})
|
{'shr': new_share_name, 'size': share_size})
|
||||||
share_size = 1
|
share_size = 1
|
||||||
else:
|
else:
|
||||||
|
@ -121,7 +121,7 @@ class LVMMixin(driver.ExecuteMixin):
|
|||||||
if "not found" not in exc.stderr:
|
if "not found" not in exc.stderr:
|
||||||
LOG.exception("Error deleting volume")
|
LOG.exception("Error deleting volume")
|
||||||
raise
|
raise
|
||||||
LOG.warning("Volume not found: %s" % exc.stderr)
|
LOG.warning("Volume not found: %s", exc.stderr)
|
||||||
|
|
||||||
def _create_snapshot(self, context, snapshot):
|
def _create_snapshot(self, context, snapshot):
|
||||||
"""Creates a snapshot."""
|
"""Creates a snapshot."""
|
||||||
|
@ -108,7 +108,7 @@ class NetAppDriver(object):
|
|||||||
|
|
||||||
fmt = {'storage_family': storage_family, 'driver_mode': driver_mode}
|
fmt = {'storage_family': storage_family, 'driver_mode': driver_mode}
|
||||||
LOG.info('Requested unified config: %(storage_family)s and '
|
LOG.info('Requested unified config: %(storage_family)s and '
|
||||||
'%(driver_mode)s.' % fmt)
|
'%(driver_mode)s.', fmt)
|
||||||
|
|
||||||
family_meta = NETAPP_UNIFIED_DRIVER_REGISTRY.get(storage_family)
|
family_meta = NETAPP_UNIFIED_DRIVER_REGISTRY.get(storage_family)
|
||||||
if family_meta is None:
|
if family_meta is None:
|
||||||
@ -125,5 +125,5 @@ class NetAppDriver(object):
|
|||||||
kwargs['netapp_mode'] = 'proxy'
|
kwargs['netapp_mode'] = 'proxy'
|
||||||
driver = importutils.import_object(driver_loc, *args, **kwargs)
|
driver = importutils.import_object(driver_loc, *args, **kwargs)
|
||||||
LOG.info('NetApp driver of family %(storage_family)s and mode '
|
LOG.info('NetApp driver of family %(storage_family)s and mode '
|
||||||
'%(driver_mode)s loaded.' % fmt)
|
'%(driver_mode)s loaded.', fmt)
|
||||||
return driver
|
return driver
|
||||||
|
@ -2649,7 +2649,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
|
|||||||
try:
|
try:
|
||||||
client.delete_nfs_export_policy(policy)
|
client.delete_nfs_export_policy(policy)
|
||||||
except netapp_api.NaApiError:
|
except netapp_api.NaApiError:
|
||||||
LOG.debug('Could not delete export policy %s.' % policy)
|
LOG.debug('Could not delete export policy %s.', policy)
|
||||||
|
|
||||||
@na_utils.trace
|
@na_utils.trace
|
||||||
def _get_deleted_nfs_export_policies(self):
|
def _get_deleted_nfs_export_policies(self):
|
||||||
@ -2717,7 +2717,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
|
|||||||
node_client.send_request('ems-autosupport-log', message_dict)
|
node_client.send_request('ems-autosupport-log', message_dict)
|
||||||
LOG.debug('EMS executed successfully.')
|
LOG.debug('EMS executed successfully.')
|
||||||
except netapp_api.NaApiError as e:
|
except netapp_api.NaApiError as e:
|
||||||
LOG.warning('Failed to invoke EMS. %s' % e)
|
LOG.warning('Failed to invoke EMS. %s', e)
|
||||||
|
|
||||||
@na_utils.trace
|
@na_utils.trace
|
||||||
def get_aggregate(self, aggregate_name):
|
def get_aggregate(self, aggregate_name):
|
||||||
@ -2741,7 +2741,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
|
|||||||
desired_attributes=desired_attributes)
|
desired_attributes=desired_attributes)
|
||||||
except netapp_api.NaApiError:
|
except netapp_api.NaApiError:
|
||||||
msg = _('Failed to get info for aggregate %s.')
|
msg = _('Failed to get info for aggregate %s.')
|
||||||
LOG.exception(msg % aggregate_name)
|
LOG.exception(msg, aggregate_name)
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
if len(aggrs) < 1:
|
if len(aggrs) < 1:
|
||||||
@ -2814,7 +2814,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
|
|||||||
result = self.send_iter_request('storage-disk-get-iter', api_args)
|
result = self.send_iter_request('storage-disk-get-iter', api_args)
|
||||||
except netapp_api.NaApiError:
|
except netapp_api.NaApiError:
|
||||||
msg = _('Failed to get disk info for aggregate %s.')
|
msg = _('Failed to get disk info for aggregate %s.')
|
||||||
LOG.exception(msg % aggregate_name)
|
LOG.exception(msg, aggregate_name)
|
||||||
return disk_types
|
return disk_types
|
||||||
|
|
||||||
attributes_list = result.get_child_by_name(
|
attributes_list = result.get_child_by_name(
|
||||||
|
@ -173,7 +173,7 @@ class NetAppCmodeFileStorageLibrary(object):
|
|||||||
if 'nfs' not in self._licenses and 'cifs' not in self._licenses:
|
if 'nfs' not in self._licenses and 'cifs' not in self._licenses:
|
||||||
msg = 'Neither NFS nor CIFS is licensed on %(backend)s'
|
msg = 'Neither NFS nor CIFS is licensed on %(backend)s'
|
||||||
msg_args = {'backend': self._backend_name}
|
msg_args = {'backend': self._backend_name}
|
||||||
LOG.error(msg % msg_args)
|
LOG.error(msg, msg_args)
|
||||||
|
|
||||||
return self._licenses
|
return self._licenses
|
||||||
|
|
||||||
@ -1064,7 +1064,7 @@ class NetAppCmodeFileStorageLibrary(object):
|
|||||||
msg = _('Could not determine snapshot %(snap)s size from '
|
msg = _('Could not determine snapshot %(snap)s size from '
|
||||||
'volume %(vol)s.')
|
'volume %(vol)s.')
|
||||||
msg_args = {'snap': existing_snapshot_name, 'vol': share_name}
|
msg_args = {'snap': existing_snapshot_name, 'vol': share_name}
|
||||||
LOG.exception(msg % msg_args)
|
LOG.exception(msg, msg_args)
|
||||||
raise exception.ShareNotFound(share_id=snapshot['share_id'])
|
raise exception.ShareNotFound(share_id=snapshot['share_id'])
|
||||||
|
|
||||||
# Ensure there aren't any mirrors on this volume
|
# Ensure there aren't any mirrors on this volume
|
||||||
@ -2139,7 +2139,7 @@ class NetAppCmodeFileStorageLibrary(object):
|
|||||||
math.ceil(float(backend_volume['size']) / units.Gi))
|
math.ceil(float(backend_volume['size']) / units.Gi))
|
||||||
|
|
||||||
LOG.debug("Checking for a pre-existing QoS policy group that "
|
LOG.debug("Checking for a pre-existing QoS policy group that "
|
||||||
"is exclusive to the volume %s." % backend_share_name)
|
"is exclusive to the volume %s.", backend_share_name)
|
||||||
|
|
||||||
# Does the volume have an exclusive QoS policy that we can rename?
|
# Does the volume have an exclusive QoS policy that we can rename?
|
||||||
if backend_volume['qos-policy-group-name'] is not None:
|
if backend_volume['qos-policy-group-name'] is not None:
|
||||||
|
@ -247,7 +247,7 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
|
|||||||
|
|
||||||
network_allocations = network_info.get('admin_network_allocations')
|
network_allocations = network_info.get('admin_network_allocations')
|
||||||
if not network_allocations:
|
if not network_allocations:
|
||||||
LOG.info('No admin network defined for Vserver %s.' %
|
LOG.info('No admin network defined for Vserver %s.',
|
||||||
vserver_name)
|
vserver_name)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -77,7 +77,7 @@ class NetAppCmodeSingleSVMFileStorageLibrary(
|
|||||||
msg_args = {'vserver': self._vserver, 'backend': self._backend_name}
|
msg_args = {'vserver': self._vserver, 'backend': self._backend_name}
|
||||||
msg_args['creds'] = ('cluster' if self._have_cluster_creds
|
msg_args['creds'] = ('cluster' if self._have_cluster_creds
|
||||||
else 'Vserver')
|
else 'Vserver')
|
||||||
LOG.info(msg % msg_args)
|
LOG.info(msg, msg_args)
|
||||||
|
|
||||||
(super(NetAppCmodeSingleSVMFileStorageLibrary, self).
|
(super(NetAppCmodeSingleSVMFileStorageLibrary, self).
|
||||||
check_for_setup_error())
|
check_for_setup_error())
|
||||||
|
@ -89,14 +89,14 @@ class NetAppCmodeNFSHelper(base.NetAppBaseHelper):
|
|||||||
|
|
||||||
# Rename policy currently in force
|
# Rename policy currently in force
|
||||||
LOG.info('Renaming NFS export policy for share %(share)s to '
|
LOG.info('Renaming NFS export policy for share %(share)s to '
|
||||||
'%(policy)s.' %
|
'%(policy)s.',
|
||||||
{'share': share_name, 'policy': temp_old_export_policy_name})
|
{'share': share_name, 'policy': temp_old_export_policy_name})
|
||||||
self._client.rename_nfs_export_policy(export_policy_name,
|
self._client.rename_nfs_export_policy(export_policy_name,
|
||||||
temp_old_export_policy_name)
|
temp_old_export_policy_name)
|
||||||
|
|
||||||
# Switch share to the new policy
|
# Switch share to the new policy
|
||||||
LOG.info('Setting NFS export policy for share %(share)s to '
|
LOG.info('Setting NFS export policy for share %(share)s to '
|
||||||
'%(policy)s.' %
|
'%(policy)s.',
|
||||||
{'share': share_name, 'policy': temp_new_export_policy_name})
|
{'share': share_name, 'policy': temp_new_export_policy_name})
|
||||||
self._client.set_nfs_export_policy_for_volume(
|
self._client.set_nfs_export_policy_for_volume(
|
||||||
share_name, temp_new_export_policy_name)
|
share_name, temp_new_export_policy_name)
|
||||||
@ -106,7 +106,7 @@ class NetAppCmodeNFSHelper(base.NetAppBaseHelper):
|
|||||||
|
|
||||||
# Rename new policy to its final name
|
# Rename new policy to its final name
|
||||||
LOG.info('Renaming NFS export policy for share %(share)s to '
|
LOG.info('Renaming NFS export policy for share %(share)s to '
|
||||||
'%(policy)s.' %
|
'%(policy)s.',
|
||||||
{'share': share_name, 'policy': export_policy_name})
|
{'share': share_name, 'policy': export_policy_name})
|
||||||
self._client.rename_nfs_export_policy(temp_new_export_policy_name,
|
self._client.rename_nfs_export_policy(temp_new_export_policy_name,
|
||||||
export_policy_name)
|
export_policy_name)
|
||||||
|
@ -74,7 +74,7 @@ def setup_tracing(trace_flags_string):
|
|||||||
flags = trace_flags_string.split(',')
|
flags = trace_flags_string.split(',')
|
||||||
flags = [flag.strip() for flag in flags]
|
flags = [flag.strip() for flag in flags]
|
||||||
for invalid_flag in list(set(flags) - set(VALID_TRACE_FLAGS)):
|
for invalid_flag in list(set(flags) - set(VALID_TRACE_FLAGS)):
|
||||||
LOG.warning('Invalid trace flag: %s' % invalid_flag)
|
LOG.warning('Invalid trace flag: %s', invalid_flag)
|
||||||
TRACE_METHOD = 'method' in flags
|
TRACE_METHOD = 'method' in flags
|
||||||
TRACE_API = 'api' in flags
|
TRACE_API = 'api' in flags
|
||||||
|
|
||||||
@ -164,7 +164,7 @@ class OpenStackInfo(object):
|
|||||||
"'%{version}\t%{release}\t%{vendor}'",
|
"'%{version}\t%{release}\t%{vendor}'",
|
||||||
self.PACKAGE_NAME)
|
self.PACKAGE_NAME)
|
||||||
if not out:
|
if not out:
|
||||||
LOG.info('No rpm info found for %(pkg)s package.' % {
|
LOG.info('No rpm info found for %(pkg)s package.', {
|
||||||
'pkg': self.PACKAGE_NAME})
|
'pkg': self.PACKAGE_NAME})
|
||||||
return False
|
return False
|
||||||
parts = out.split()
|
parts = out.split()
|
||||||
@ -173,7 +173,7 @@ class OpenStackInfo(object):
|
|||||||
self._vendor = ' '.join(parts[2::])
|
self._vendor = ' '.join(parts[2::])
|
||||||
return True
|
return True
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.info('Could not run rpm command: %(msg)s.' % {
|
LOG.info('Could not run rpm command: %(msg)s.', {
|
||||||
'msg': e})
|
'msg': e})
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -186,7 +186,7 @@ class OpenStackInfo(object):
|
|||||||
self.PACKAGE_NAME)
|
self.PACKAGE_NAME)
|
||||||
if not out:
|
if not out:
|
||||||
LOG.info(
|
LOG.info(
|
||||||
'No dpkg-query info found for %(pkg)s package.' % {
|
'No dpkg-query info found for %(pkg)s package.', {
|
||||||
'pkg': self.PACKAGE_NAME})
|
'pkg': self.PACKAGE_NAME})
|
||||||
return False
|
return False
|
||||||
# Debian format: [epoch:]upstream_version[-debian_revision]
|
# Debian format: [epoch:]upstream_version[-debian_revision]
|
||||||
@ -204,7 +204,7 @@ class OpenStackInfo(object):
|
|||||||
self._vendor = _vendor
|
self._vendor = _vendor
|
||||||
return True
|
return True
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.info('Could not run dpkg-query command: %(msg)s.' % {
|
LOG.info('Could not run dpkg-query command: %(msg)s.', {
|
||||||
'msg': e})
|
'msg': e})
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@ -97,7 +97,7 @@ class JsonRpc(object):
|
|||||||
return self._checked_for_application_error(response)
|
return self._checked_for_application_error(response)
|
||||||
|
|
||||||
# If things did not work out provide error info
|
# If things did not work out provide error info
|
||||||
LOG.debug("Backend request resulted in error: %s" % result.text)
|
LOG.debug("Backend request resulted in error: %s", result.text)
|
||||||
result.raise_for_status()
|
result.raise_for_status()
|
||||||
|
|
||||||
def _checked_for_application_error(self, result):
|
def _checked_for_application_error(self, result):
|
||||||
|
@ -245,9 +245,9 @@ class QuobyteShareDriver(driver.ExecuteMixin, driver.ShareDriver,):
|
|||||||
self._get_project_name(context, share['project_id']))
|
self._get_project_name(context, share['project_id']))
|
||||||
if not volume_uuid:
|
if not volume_uuid:
|
||||||
LOG.warning("No volume found for "
|
LOG.warning("No volume found for "
|
||||||
"share %(project_id)s/%(name)s"
|
"share %(project_id)s/%(name)s",
|
||||||
% {"project_id": share['project_id'],
|
{"project_id": share['project_id'],
|
||||||
"name": share['name']})
|
"name": share['name']})
|
||||||
return
|
return
|
||||||
|
|
||||||
if self.configuration.quobyte_delete_shares:
|
if self.configuration.quobyte_delete_shares:
|
||||||
@ -274,7 +274,7 @@ class QuobyteShareDriver(driver.ExecuteMixin, driver.ShareDriver,):
|
|||||||
share['name'],
|
share['name'],
|
||||||
self._get_project_name(context, share['project_id']))
|
self._get_project_name(context, share['project_id']))
|
||||||
|
|
||||||
LOG.debug("Ensuring Quobyte share %s" % share['name'])
|
LOG.debug("Ensuring Quobyte share %s", share['name'])
|
||||||
|
|
||||||
if not volume_uuid:
|
if not volume_uuid:
|
||||||
raise (exception.ShareResourceNotFound(
|
raise (exception.ShareResourceNotFound(
|
||||||
|
@ -642,7 +642,7 @@ class ServiceInstanceManager(object):
|
|||||||
break
|
break
|
||||||
|
|
||||||
LOG.debug("Waiting for instance %(instance_id)s to be active. "
|
LOG.debug("Waiting for instance %(instance_id)s to be active. "
|
||||||
"Current status: %(instance_status)s." %
|
"Current status: %(instance_status)s.",
|
||||||
dict(instance_id=instance_id,
|
dict(instance_id=instance_id,
|
||||||
instance_status=instance_status))
|
instance_status=instance_status))
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
@ -960,7 +960,7 @@ class NeutronNetworkHelper(BaseNetworkhelper):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
if 'does not exist' in six.text_type(e):
|
if 'does not exist' in six.text_type(e):
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
"Device %s does not exist anymore." % device.name)
|
"Device %s does not exist anymore.", device.name)
|
||||||
else:
|
else:
|
||||||
raise
|
raise
|
||||||
for addr in addr_list:
|
for addr in addr_list:
|
||||||
|
@ -209,9 +209,9 @@ class WindowsSMBHelper(helpers.CIFSHelperBase):
|
|||||||
LOG.warning(
|
LOG.warning(
|
||||||
"Unsupported access level %(level)s or access type "
|
"Unsupported access level %(level)s or access type "
|
||||||
"%(type)s, skipping removal of access rule to "
|
"%(type)s, skipping removal of access rule to "
|
||||||
"%(to)s." % {'level': deleted_rule['access_level'],
|
"%(to)s.", {'level': deleted_rule['access_level'],
|
||||||
'type': deleted_rule['access_type'],
|
'type': deleted_rule['access_type'],
|
||||||
'to': deleted_rule['access_to']})
|
'to': deleted_rule['access_to']})
|
||||||
continue
|
continue
|
||||||
self._revoke_share_access(server, share_name,
|
self._revoke_share_access(server, share_name,
|
||||||
deleted_rule['access_to'])
|
deleted_rule['access_to'])
|
||||||
|
@ -1550,5 +1550,5 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
|||||||
e)
|
e)
|
||||||
|
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
"Migration of share with ID '%s' has been canceled." %
|
"Migration of share with ID '%s' has been canceled.",
|
||||||
source_share["id"])
|
source_share["id"])
|
||||||
|
@ -1956,7 +1956,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
|||||||
msg = _("The driver was unable to delete access rules "
|
msg = _("The driver was unable to delete access rules "
|
||||||
"for the replica: %s. Will attempt to delete "
|
"for the replica: %s. Will attempt to delete "
|
||||||
"the replica anyway.")
|
"the replica anyway.")
|
||||||
LOG.exception(msg % share_replica['id'])
|
LOG.exception(msg, share_replica['id'])
|
||||||
exc_context.reraise = False
|
exc_context.reraise = False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -3265,7 +3265,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
|||||||
share_server = self._get_share_server(context, share_instance)
|
share_server = self._get_share_server(context, share_instance)
|
||||||
|
|
||||||
LOG.debug("Received request to update access for share instance"
|
LOG.debug("Received request to update access for share instance"
|
||||||
" %s." % share_instance_id)
|
" %s.", share_instance_id)
|
||||||
|
|
||||||
self.access_helper.update_access_rules(
|
self.access_helper.update_access_rules(
|
||||||
context,
|
context,
|
||||||
|
@ -77,7 +77,7 @@ def fake_execute(*cmd_parts, **kwargs):
|
|||||||
for fake_replier in _fake_execute_repliers:
|
for fake_replier in _fake_execute_repliers:
|
||||||
if re.match(fake_replier[0], cmd_str):
|
if re.match(fake_replier[0], cmd_str):
|
||||||
reply_handler = fake_replier[1]
|
reply_handler = fake_replier[1]
|
||||||
LOG.debug('Faked command matched %s' % fake_replier[0])
|
LOG.debug('Faked command matched %s', fake_replier[0])
|
||||||
break
|
break
|
||||||
|
|
||||||
if isinstance(reply_handler, six.string_types):
|
if isinstance(reply_handler, six.string_types):
|
||||||
|
@ -71,7 +71,8 @@ class MessageApiTest(test.TestCase):
|
|||||||
self.message_api.db.message_create.assert_called_once_with(
|
self.message_api.db.message_create.assert_called_once_with(
|
||||||
self.ctxt, mock.ANY)
|
self.ctxt, mock.ANY)
|
||||||
exception_log.assert_called_once_with(
|
exception_log.assert_called_once_with(
|
||||||
'Failed to create message record for request_id fakerequestid')
|
'Failed to create message record for request_id %s',
|
||||||
|
self.ctxt.request_id)
|
||||||
|
|
||||||
def test_get(self):
|
def test_get(self):
|
||||||
self.message_api.get(self.ctxt, 'fake_id')
|
self.message_api.get(self.ctxt, 'fake_id')
|
||||||
|
@ -416,13 +416,13 @@ class DummyDriver(driver.ShareDriver):
|
|||||||
@slow_me_down
|
@slow_me_down
|
||||||
def create_cgsnapshot(self, context, snap_dict, share_server=None):
|
def create_cgsnapshot(self, context, snap_dict, share_server=None):
|
||||||
"""Create a consistency group snapshot."""
|
"""Create a consistency group snapshot."""
|
||||||
LOG.debug("Successfully created CG snapshot %s." % snap_dict["id"])
|
LOG.debug("Successfully created CG snapshot %s.", snap_dict["id"])
|
||||||
return None, None
|
return None, None
|
||||||
|
|
||||||
@slow_me_down
|
@slow_me_down
|
||||||
def delete_cgsnapshot(self, context, snap_dict, share_server=None):
|
def delete_cgsnapshot(self, context, snap_dict, share_server=None):
|
||||||
"""Delete a consistency group snapshot."""
|
"""Delete a consistency group snapshot."""
|
||||||
LOG.debug("Successfully deleted CG snapshot %s." % snap_dict["id"])
|
LOG.debug("Successfully deleted CG snapshot %s.", snap_dict["id"])
|
||||||
return None, None
|
return None, None
|
||||||
|
|
||||||
@slow_me_down
|
@slow_me_down
|
||||||
@ -545,7 +545,7 @@ class DummyDriver(driver.ShareDriver):
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
"Migration of dummy share with ID '%s' has been started." %
|
"Migration of dummy share with ID '%s' has been started.",
|
||||||
source_share["id"])
|
source_share["id"])
|
||||||
self.migration_progress[source_share['share_id']] = 0
|
self.migration_progress[source_share['share_id']] = 0
|
||||||
|
|
||||||
@ -561,9 +561,9 @@ class DummyDriver(driver.ShareDriver):
|
|||||||
self.migration_progress[source_share["id"]] += 50
|
self.migration_progress[source_share["id"]] += 50
|
||||||
|
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
"Migration of dummy share with ID '%s' is continuing, %s." %
|
"Migration of dummy share with ID '%s' is continuing, %s.",
|
||||||
(source_share["id"],
|
source_share["id"],
|
||||||
self.migration_progress[source_share["id"]]))
|
self.migration_progress[source_share["id"]])
|
||||||
|
|
||||||
return self.migration_progress[source_share["id"]] == 100
|
return self.migration_progress[source_share["id"]] == 100
|
||||||
|
|
||||||
@ -596,7 +596,7 @@ class DummyDriver(driver.ShareDriver):
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
"Migration of dummy share with ID '%s' has been completed." %
|
"Migration of dummy share with ID '%s' has been completed.",
|
||||||
source_share_ref["id"])
|
source_share_ref["id"])
|
||||||
self.migration_progress.pop(source_share_ref["id"], None)
|
self.migration_progress.pop(source_share_ref["id"], None)
|
||||||
|
|
||||||
@ -610,7 +610,7 @@ class DummyDriver(driver.ShareDriver):
|
|||||||
destination_share_server=None):
|
destination_share_server=None):
|
||||||
"""Is called to cancel driver migration."""
|
"""Is called to cancel driver migration."""
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
"Migration of dummy share with ID '%s' has been canceled." %
|
"Migration of dummy share with ID '%s' has been canceled.",
|
||||||
source_share["id"])
|
source_share["id"])
|
||||||
self.migration_progress.pop(source_share["id"], None)
|
self.migration_progress.pop(source_share["id"], None)
|
||||||
|
|
||||||
|
@ -192,7 +192,8 @@ class QuobyteShareDriverTestCase(test.TestCase):
|
|||||||
self._driver.delete_share(self._context, self.share)
|
self._driver.delete_share(self._context, self.share)
|
||||||
|
|
||||||
mock_warning.assert_called_with(
|
mock_warning.assert_called_with(
|
||||||
'No volume found for share fake_project_uuid/fakename')
|
'No volume found for share %(project_id)s/%(name)s',
|
||||||
|
{'project_id': 'fake_project_uuid', 'name': 'fakename'})
|
||||||
|
|
||||||
def test_allow_access(self):
|
def test_allow_access(self):
|
||||||
def rpc_handler(name, *args):
|
def rpc_handler(name, *args):
|
||||||
|
@ -80,7 +80,7 @@ class RemoteClient(object):
|
|||||||
# Shell options below add more clearness on failures,
|
# Shell options below add more clearness on failures,
|
||||||
# path is extended for some non-cirros guest oses (centos7)
|
# path is extended for some non-cirros guest oses (centos7)
|
||||||
cmd = CONF.validation.ssh_shell_prologue + " " + cmd
|
cmd = CONF.validation.ssh_shell_prologue + " " + cmd
|
||||||
LOG.debug("Remote command: %s" % cmd)
|
LOG.debug("Remote command: %s", cmd)
|
||||||
return self.ssh_client.exec_command(cmd)
|
return self.ssh_client.exec_command(cmd)
|
||||||
|
|
||||||
@debug_ssh
|
@debug_ssh
|
||||||
|
@ -91,7 +91,7 @@ class handle_cleanup_exceptions(object):
|
|||||||
return False # Do not suppress error if any
|
return False # Do not suppress error if any
|
||||||
if exc_traceback:
|
if exc_traceback:
|
||||||
LOG.error("Suppressed cleanup error in Manila: "
|
LOG.error("Suppressed cleanup error in Manila: "
|
||||||
"\n%s" % traceback.format_exc())
|
"\n%s", traceback.format_exc())
|
||||||
return True # Suppress error if any
|
return True # Suppress error if any
|
||||||
|
|
||||||
|
|
||||||
@ -925,7 +925,7 @@ class BaseSharesTest(test.BaseTestCase):
|
|||||||
client.wait_for_resource_deletion(replica_id=res_id)
|
client.wait_for_resource_deletion(replica_id=res_id)
|
||||||
else:
|
else:
|
||||||
LOG.warning("Provided unsupported resource type for "
|
LOG.warning("Provided unsupported resource type for "
|
||||||
"cleanup '%s'. Skipping." % res["type"])
|
"cleanup '%s'. Skipping.", res["type"])
|
||||||
res["deleted"] = True
|
res["deleted"] = True
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
@ -183,7 +183,7 @@ class SecurityServicesTest(base.BaseSharesTest,
|
|||||||
LOG.warning("Caught exception. It is expected in case backend "
|
LOG.warning("Caught exception. It is expected in case backend "
|
||||||
"fails having security-service with improper data "
|
"fails having security-service with improper data "
|
||||||
"that leads to share-server creation error. "
|
"that leads to share-server creation error. "
|
||||||
"%s" % six.text_type(e))
|
"%s", six.text_type(e))
|
||||||
|
|
||||||
update_data = {
|
update_data = {
|
||||||
"name": "name",
|
"name": "name",
|
||||||
|
@ -115,7 +115,7 @@ class SecServicesMappingNegativeTest(base.BaseSharesTest):
|
|||||||
LOG.warning("Caught exception. It is expected in case backend "
|
LOG.warning("Caught exception. It is expected in case backend "
|
||||||
"fails having security-service with improper data "
|
"fails having security-service with improper data "
|
||||||
"that leads to share-server creation error. "
|
"that leads to share-server creation error. "
|
||||||
"%s" % six.text_type(e))
|
"%s", six.text_type(e))
|
||||||
|
|
||||||
self.assertRaises(lib_exc.Forbidden,
|
self.assertRaises(lib_exc.Forbidden,
|
||||||
self.cl.remove_sec_service_from_share_network,
|
self.cl.remove_sec_service_from_share_network,
|
||||||
|
@ -101,7 +101,7 @@ class SecurityServicesNegativeTest(base.BaseSharesTest):
|
|||||||
LOG.warning("Caught exception. It is expected in case backend "
|
LOG.warning("Caught exception. It is expected in case backend "
|
||||||
"fails having security-service with improper data "
|
"fails having security-service with improper data "
|
||||||
"that leads to share-server creation error. "
|
"that leads to share-server creation error. "
|
||||||
"%s" % six.text_type(e))
|
"%s", six.text_type(e))
|
||||||
|
|
||||||
self.assertRaises(lib_exc.Forbidden,
|
self.assertRaises(lib_exc.Forbidden,
|
||||||
self.shares_client.update_security_service,
|
self.shares_client.update_security_service,
|
||||||
|
@ -228,7 +228,7 @@ class ShareScenarioTest(manager.NetworkScenarioTest):
|
|||||||
try:
|
try:
|
||||||
linux_client.validate_authentication()
|
linux_client.validate_authentication()
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception('Initializing SSH connection to %s failed' % ip)
|
LOG.exception('Initializing SSH connection to %s failed', ip)
|
||||||
self._log_console_output()
|
self._log_console_output()
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
@ -189,7 +189,7 @@ class ShareBasicOpsBase(manager.ShareScenarioTest):
|
|||||||
first_address = net_addresses.values()[0][0]
|
first_address = net_addresses.values()[0][0]
|
||||||
ip = first_address['addr']
|
ip = first_address['addr']
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.debug("Instance: %s" % instance)
|
LOG.debug("Instance: %s", instance)
|
||||||
# In case on an error ip will be still none
|
# In case on an error ip will be still none
|
||||||
LOG.exception("Instance does not have a valid IP address."
|
LOG.exception("Instance does not have a valid IP address."
|
||||||
"Falling back to default")
|
"Falling back to default")
|
||||||
|
5
tox.ini
5
tox.ini
@ -112,6 +112,11 @@ commands = alembic -c manila/db/migrations/alembic.ini revision -m ""{posargs}
|
|||||||
# Following checks are ignored on purpose:
|
# Following checks are ignored on purpose:
|
||||||
ignore =
|
ignore =
|
||||||
builtins = _
|
builtins = _
|
||||||
|
# [H106] Don't put vim configuration in source files.
|
||||||
|
# [H203] Use assertIs(Not)None to check for None.
|
||||||
|
# [H904] Use ',' instead of '%', String interpolation should be delayed to be handled by the logging code,
|
||||||
|
# rather than being done at the point of the logging call..
|
||||||
|
enable-extensions = H106,H203,H904
|
||||||
exclude = .git,.tox,.testrepository,.venv,build,cover,dist,doc,*egg,api-ref/build,*/source/conf.py
|
exclude = .git,.tox,.testrepository,.venv,build,cover,dist,doc,*egg,api-ref/build,*/source/conf.py
|
||||||
|
|
||||||
[hacking]
|
[hacking]
|
||||||
|
Loading…
Reference in New Issue
Block a user