Merge "Handle log message interpolation by the logger part 8"

This commit is contained in:
Jenkins 2017-06-07 05:56:02 +00:00 committed by Gerrit Code Review
commit ea9f8aec5c
23 changed files with 99 additions and 88 deletions

View File

@ -83,33 +83,34 @@ class API(object):
serializer=serializer) serializer=serializer)
def _call(self, method_name, timeout_sec, version, **kwargs): def _call(self, method_name, timeout_sec, version, **kwargs):
LOG.debug("Calling %s with timeout %s" % (method_name, timeout_sec)) LOG.debug("Calling %(name)s with timeout %(timeout)s",
{'name': method_name, 'timeout': timeout_sec})
try: try:
cctxt = self.client.prepare(version=version, timeout=timeout_sec) cctxt = self.client.prepare(version=version, timeout=timeout_sec)
result = cctxt.call(self.context, method_name, **kwargs) result = cctxt.call(self.context, method_name, **kwargs)
LOG.debug("Result is %s." % result) LOG.debug("Result is %s.", result)
return result return result
except RemoteError as r: except RemoteError as r:
LOG.exception(_("Error calling %s") % method_name) LOG.exception(_("Error calling %s"), method_name)
raise exception.GuestError(original_message=r.value) raise exception.GuestError(original_message=r.value)
except Exception as e: except Exception as e:
LOG.exception(_("Error calling %s") % method_name) LOG.exception(_("Error calling %s"), method_name)
raise exception.GuestError(original_message=str(e)) raise exception.GuestError(original_message=str(e))
except Timeout: except Timeout:
raise exception.GuestTimeout() raise exception.GuestTimeout()
def _cast(self, method_name, version, **kwargs): def _cast(self, method_name, version, **kwargs):
LOG.debug("Casting %s" % method_name) LOG.debug("Casting %s", method_name)
try: try:
with NotificationCastWrapper(self.context, 'guest'): with NotificationCastWrapper(self.context, 'guest'):
cctxt = self.client.prepare(version=version) cctxt = self.client.prepare(version=version)
cctxt.cast(self.context, method_name, **kwargs) cctxt.cast(self.context, method_name, **kwargs)
except RemoteError as r: except RemoteError as r:
LOG.exception(_("Error calling %s") % method_name) LOG.exception(_("Error calling %s"), method_name)
raise exception.GuestError(original_message=r.value) raise exception.GuestError(original_message=r.value)
except Exception as e: except Exception as e:
LOG.exception(_("Error calling %s") % method_name) LOG.exception(_("Error calling %s"), method_name)
raise exception.GuestError(original_message=str(e)) raise exception.GuestError(original_message=str(e))
def _get_routing_key(self): def _get_routing_key(self):
@ -199,7 +200,7 @@ class API(object):
def delete_user(self, user): def delete_user(self, user):
"""Make an asynchronous call to delete an existing database user.""" """Make an asynchronous call to delete an existing database user."""
LOG.debug("Deleting user %(user)s for instance %(instance_id)s." % LOG.debug("Deleting user %(user)s for instance %(instance_id)s.",
{'user': user, 'instance_id': self.id}) {'user': user, 'instance_id': self.id})
version = self.API_BASE_VERSION version = self.API_BASE_VERSION
@ -229,8 +230,8 @@ class API(object):
within the specified container within the specified container
""" """
LOG.debug("Deleting database %(database)s for " LOG.debug("Deleting database %(database)s for "
"instance %(instance_id)s." % {'database': database, "instance %(instance_id)s.", {'database': database,
'instance_id': self.id}) 'instance_id': self.id})
version = self.API_BASE_VERSION version = self.API_BASE_VERSION
self._cast("delete_database", version=version, database=database) self._cast("delete_database", version=version, database=database)
@ -377,7 +378,7 @@ class API(object):
def start_db_with_conf_changes(self, config_contents): def start_db_with_conf_changes(self, config_contents):
"""Start the database server.""" """Start the database server."""
LOG.debug("Sending the call to start the database process on " LOG.debug("Sending the call to start the database process on "
"the Guest with a timeout of %s." % AGENT_HIGH_TIMEOUT) "the Guest with a timeout of %s.", AGENT_HIGH_TIMEOUT)
version = self.API_BASE_VERSION version = self.API_BASE_VERSION
self._call("start_db_with_conf_changes", AGENT_HIGH_TIMEOUT, self._call("start_db_with_conf_changes", AGENT_HIGH_TIMEOUT,
@ -388,7 +389,7 @@ class API(object):
the config file to a new flavor. the config file to a new flavor.
""" """
LOG.debug("Sending the call to change the database conf file on the " LOG.debug("Sending the call to change the database conf file on the "
"Guest with a timeout of %s." % AGENT_HIGH_TIMEOUT) "Guest with a timeout of %s.", AGENT_HIGH_TIMEOUT)
version = self.API_BASE_VERSION version = self.API_BASE_VERSION
self._call("reset_configuration", AGENT_HIGH_TIMEOUT, self._call("reset_configuration", AGENT_HIGH_TIMEOUT,
@ -431,7 +432,7 @@ class API(object):
def create_backup(self, backup_info): def create_backup(self, backup_info):
"""Make async call to create a full backup of this instance.""" """Make async call to create a full backup of this instance."""
LOG.debug("Create Backup %(backup_id)s " LOG.debug("Create Backup %(backup_id)s "
"for instance %(instance_id)s." % "for instance %(instance_id)s.",
{'backup_id': backup_info['id'], 'instance_id': self.id}) {'backup_id': backup_info['id'], 'instance_id': self.id})
version = self.API_BASE_VERSION version = self.API_BASE_VERSION
@ -440,7 +441,7 @@ class API(object):
def mount_volume(self, device_path=None, mount_point=None): def mount_volume(self, device_path=None, mount_point=None):
"""Mount the volume.""" """Mount the volume."""
LOG.debug("Mount volume %(mount)s on instance %(id)s." % { LOG.debug("Mount volume %(mount)s on instance %(id)s.", {
'mount': mount_point, 'id': self.id}) 'mount': mount_point, 'id': self.id})
version = self.API_BASE_VERSION version = self.API_BASE_VERSION
@ -449,7 +450,7 @@ class API(object):
def unmount_volume(self, device_path=None, mount_point=None): def unmount_volume(self, device_path=None, mount_point=None):
"""Unmount the volume.""" """Unmount the volume."""
LOG.debug("Unmount volume %(device)s on instance %(id)s." % { LOG.debug("Unmount volume %(device)s on instance %(id)s.", {
'device': device_path, 'id': self.id}) 'device': device_path, 'id': self.id})
version = self.API_BASE_VERSION version = self.API_BASE_VERSION
@ -458,7 +459,7 @@ class API(object):
def resize_fs(self, device_path=None, mount_point=None): def resize_fs(self, device_path=None, mount_point=None):
"""Resize the filesystem.""" """Resize the filesystem."""
LOG.debug("Resize device %(device)s on instance %(id)s." % { LOG.debug("Resize device %(device)s on instance %(id)s.", {
'device': device_path, 'id': self.id}) 'device': device_path, 'id': self.id})
version = self.API_BASE_VERSION version = self.API_BASE_VERSION
@ -522,14 +523,14 @@ class API(object):
AGENT_HIGH_TIMEOUT, version=version) AGENT_HIGH_TIMEOUT, version=version)
def attach_replica(self, replica_info, slave_config): def attach_replica(self, replica_info, slave_config):
LOG.debug("Attaching replica %s." % replica_info) LOG.debug("Attaching replica %s.", replica_info)
version = self.API_BASE_VERSION version = self.API_BASE_VERSION
self._call("attach_replica", AGENT_HIGH_TIMEOUT, version=version, self._call("attach_replica", AGENT_HIGH_TIMEOUT, version=version,
replica_info=replica_info, slave_config=slave_config) replica_info=replica_info, slave_config=slave_config)
def make_read_only(self, read_only): def make_read_only(self, read_only):
LOG.debug("Executing make_read_only(%s)" % read_only) LOG.debug("Executing make_read_only(%s)", read_only)
version = self.API_BASE_VERSION version = self.API_BASE_VERSION
self._call("make_read_only", AGENT_HIGH_TIMEOUT, version=version, self._call("make_read_only", AGENT_HIGH_TIMEOUT, version=version,

View File

@ -158,10 +158,13 @@ class GuestLog(object):
(self.status == LogStatus.Restart_Required and (self.status == LogStatus.Restart_Required and
status == LogStatus.Restart_Completed)): status == LogStatus.Restart_Completed)):
self._status = status self._status = status
LOG.debug("Log status for '%s' set to %s" % (self._name, status)) LOG.debug("Log status for '%(name)s' set to %(status)s",
{'name': self._name, 'status': status})
else: else:
LOG.debug("Log status for '%s' *not* set to %s (currently %s)" % LOG.debug("Log status for '%(name)s' *not* set to %(status)s "
(self._name, status, self.status)) "(currently %(current_status)s)",
{'name': self._name, 'status': status,
'current_status': self.status})
def get_container_name(self, force=False): def get_container_name(self, force=False):
if not self._container_name or force: if not self._container_name or force:
@ -170,12 +173,12 @@ class GuestLog(object):
self.swift_client.get_container(container_name, prefix='dummy') self.swift_client.get_container(container_name, prefix='dummy')
except ClientException as ex: except ClientException as ex:
if ex.http_status == 404: if ex.http_status == 404:
LOG.debug("Container '%s' not found; creating now" % LOG.debug("Container '%s' not found; creating now",
container_name) container_name)
self.swift_client.put_container( self.swift_client.put_container(
container_name, headers=self._get_headers()) container_name, headers=self._get_headers())
else: else:
LOG.exception(_("Could not retrieve container '%s'") % LOG.exception(_("Could not retrieve container '%s'"),
container_name) container_name)
raise raise
self._container_name = container_name self._container_name = container_name
@ -223,12 +226,12 @@ class GuestLog(object):
meta_details[self.MF_LABEL_LOG_HEADER]) meta_details[self.MF_LABEL_LOG_HEADER])
except ClientException as ex: except ClientException as ex:
if ex.http_status == 404: if ex.http_status == 404:
LOG.debug("No published metadata found for log '%s'" % LOG.debug("No published metadata found for log '%s'",
self._name) self._name)
self._published_size = 0 self._published_size = 0
else: else:
LOG.exception(_("Could not get meta details for log '%s'") LOG.exception(_("Could not get meta details for log '%s'"),
% self._name) self._name)
raise raise
except ConnectionError as e: except ConnectionError as e:
# A bad endpoint will cause a ConnectionError # A bad endpoint will cause a ConnectionError
@ -237,8 +240,10 @@ class GuestLog(object):
raise exc raise exc
self._update_details() self._update_details()
LOG.debug("Log size for '%s' set to %d (published %d)" % ( LOG.debug("Log size for '%(name)s' set to %(size)d "
self._name, self._size, self._published_size)) "(published %(published)d)",
{'name': self._name, 'size': self._size,
'published': self._published_size})
def _update_details(self): def _update_details(self):
# Make sure we can read the file # Make sure we can read the file
@ -299,7 +304,7 @@ class GuestLog(object):
if self.exposed: if self.exposed:
if self._log_rotated(): if self._log_rotated():
LOG.debug("Log file rotation detected for '%s' - " LOG.debug("Log file rotation detected for '%s' - "
"discarding old log" % self._name) "discarding old log", self._name)
self._delete_log_components() self._delete_log_components()
if os.path.isfile(self._file): if os.path.isfile(self._file):
self._publish_to_container(self._file) self._publish_to_container(self._file)
@ -402,10 +407,10 @@ class GuestLog(object):
return 'log-%s' % str(datetime.utcnow()).replace(' ', 'T') return 'log-%s' % str(datetime.utcnow()).replace(' ', 'T')
def _get_meta_details(self): def _get_meta_details(self):
LOG.debug("Getting meta details for '%s'" % self._name) LOG.debug("Getting meta details for '%s'", self._name)
metafile_name = self._metafile_name() metafile_name = self._metafile_name()
container_name = self.get_container_name() container_name = self.get_container_name()
headers, metafile_details = self.swift_client.get_object( headers, metafile_details = self.swift_client.get_object(
container_name, metafile_name) container_name, metafile_name)
LOG.debug("Found meta details for '%s'" % self._name) LOG.debug("Found meta details for '%s'", self._name)
return self._codec.deserialize(metafile_details) return self._codec.deserialize(metafile_details)

View File

@ -57,7 +57,7 @@ class AgentHeartBeat(dbmodels.DatabaseModelBase):
if not self.is_valid(): if not self.is_valid():
raise exception.InvalidModelError(errors=self.errors) raise exception.InvalidModelError(errors=self.errors)
self['updated_at'] = utils.utcnow() self['updated_at'] = utils.utcnow()
LOG.debug("Saving %(name)s: %(dict)s" % LOG.debug("Saving %(name)s: %(dict)s",
{'name': self.__class__.__name__, 'dict': self.__dict__}) {'name': self.__class__.__name__, 'dict': self.__dict__})
return get_db_api().save(self) return get_db_api().save(self)
@ -84,7 +84,7 @@ class AgentHeartBeat(dbmodels.DatabaseModelBase):
return cls.find_by(instance_id=instance_id) return cls.find_by(instance_id=instance_id)
except exception.NotFound: except exception.NotFound:
LOG.exception(_("Error finding instance %s") % instance_id) LOG.exception(_("Error finding instance %s"), instance_id)
raise exception.ModelNotFoundError(instance_id=instance_id) raise exception.ModelNotFoundError(instance_id=instance_id)
@staticmethod @staticmethod

View File

@ -168,7 +168,7 @@ def output(log_message=None, success_message=None,
fail_msg = fail_msg % message_args fail_msg = fail_msg % message_args
except Exception: except Exception:
# if there's a problem, just log it and drive on # if there's a problem, just log it and drive on
LOG.warning(_("Could not apply message args: %s") % LOG.warning(_("Could not apply message args: %s"),
message_args) message_args)
pass pass

View File

@ -68,7 +68,7 @@ class NewRelicLicenseDriver(module_driver.ModuleDriver):
cmd[-1] = cmd[-1] % license_key cmd[-1] = cmd[-1] % license_key
utils.execute_with_timeout(*cmd, **exec_args) utils.execute_with_timeout(*cmd, **exec_args)
except Exception: except Exception:
LOG.exception(_("Could not install license key '%s'") % LOG.exception(_("Could not install license key '%s'"),
license_key) license_key)
raise raise
@ -81,7 +81,7 @@ class NewRelicLicenseDriver(module_driver.ModuleDriver):
cmd.append(command) cmd.append(command)
utils.execute_with_timeout(*cmd, **exec_args) utils.execute_with_timeout(*cmd, **exec_args)
except Exception: except Exception:
LOG.exception(_("Could not %s New Relic server") % command) LOG.exception(_("Could not %s New Relic server"), command)
raise raise
@module_driver.output( @module_driver.output(

View File

@ -66,7 +66,7 @@ class ModuleManager(object):
applied, message = driver.apply( applied, message = driver.apply(
name, datastore, ds_version, data_file, admin_module) name, datastore, ds_version, data_file, admin_module)
except Exception as ex: except Exception as ex:
LOG.exception(_("Could not apply module '%s'") % name) LOG.exception(_("Could not apply module '%s'"), name)
applied = False applied = False
message = ex.message message = ex.message
finally: finally:
@ -149,7 +149,7 @@ class ModuleManager(object):
result_file, codec=stream_codecs.JsonCodec()) result_file, codec=stream_codecs.JsonCodec())
except Exception: except Exception:
if not result: if not result:
LOG.exception(_("Could not find module result in %s") % LOG.exception(_("Could not find module result in %s"),
result_file) result_file)
raise raise
return result return result
@ -217,7 +217,7 @@ class ModuleManager(object):
name, datastore, ds_version, contents_file) name, datastore, ds_version, contents_file)
cls.remove_module_result(module_dir) cls.remove_module_result(module_dir)
except Exception: except Exception:
LOG.exception(_("Could not remove module '%s'") % name) LOG.exception(_("Could not remove module '%s'"), name)
raise raise
return removed, message return removed, message

View File

@ -134,7 +134,7 @@ class RPMPackagerMixin(BasePackagerMixin):
utils.execute("rpm", "-e", "--nodeps", package_name, utils.execute("rpm", "-e", "--nodeps", package_name,
run_as_root=True, root_helper="sudo") run_as_root=True, root_helper="sudo")
except ProcessExecutionError: except ProcessExecutionError:
LOG.exception(_("Error removing conflict %(package)s") % LOG.exception(_("Error removing conflict %(package)s"),
package_name) package_name)
def _install(self, packages, time_out): def _install(self, packages, time_out):
@ -179,7 +179,7 @@ class RPMPackagerMixin(BasePackagerMixin):
line = matches.group() line = matches.group()
return line return line
LOG.error(_("Unexpected output from rpm command. (%(output)s)") % LOG.error(_("Unexpected output from rpm command. (%(output)s)"),
{'output': std_out}) {'output': std_out})
def pkg_remove(self, package_name, time_out): def pkg_remove(self, package_name, time_out):
@ -215,7 +215,7 @@ class RedhatPackagerMixin(RPMPackagerMixin):
'.*already installed and latest version', '.*already installed and latest version',
'Updated:', 'Updated:',
'Installed:'] 'Installed:']
LOG.debug("Running package install command: %s" % cmd) LOG.debug("Running package install command: %s", cmd)
i, match = self.pexpect_run(cmd, output_expects, time_out) i, match = self.pexpect_run(cmd, output_expects, time_out)
if i == 0: if i == 0:
raise PkgPermissionError(_("Invalid permissions.")) raise PkgPermissionError(_("Invalid permissions."))
@ -242,7 +242,7 @@ class RedhatPackagerMixin(RPMPackagerMixin):
""" """
cmd = "sudo yum --color=never -y remove %s" % package_name cmd = "sudo yum --color=never -y remove %s" % package_name
LOG.debug("Running package remove command: %s" % cmd) LOG.debug("Running package remove command: %s", cmd)
output_expects = ['\[sudo\] password for .*:', output_expects = ['\[sudo\] password for .*:',
'No Packages marked for removal', 'No Packages marked for removal',
'Removed:'] 'Removed:']
@ -323,7 +323,7 @@ class DebianPackagerMixin(BasePackagerMixin):
"broken packages."), "broken packages."),
"Setting up (.*)", "Setting up (.*)",
"is already the newest version"] "is already the newest version"]
LOG.debug("Running package install command: %s" % cmd) LOG.debug("Running package install command: %s", cmd)
i, match = self.pexpect_run(cmd, output_expects, time_out) i, match = self.pexpect_run(cmd, output_expects, time_out)
if i == 0: if i == 0:
raise PkgPermissionError(_("Invalid permissions.")) raise PkgPermissionError(_("Invalid permissions."))
@ -356,7 +356,7 @@ class DebianPackagerMixin(BasePackagerMixin):
"'sudo dpkg --configure -a'"), "'sudo dpkg --configure -a'"),
"Unable to lock the administration directory", "Unable to lock the administration directory",
"Removing %s*" % package_name] "Removing %s*" % package_name]
LOG.debug("Running remove package command %s" % cmd) LOG.debug("Running remove package command %s", cmd)
i, match = self.pexpect_run(cmd, output_expects, time_out) i, match = self.pexpect_run(cmd, output_expects, time_out)
if i == 0: if i == 0:
raise PkgPermissionError(_("Invalid permissions.")) raise PkgPermissionError(_("Invalid permissions."))
@ -415,7 +415,7 @@ class DebianPackagerMixin(BasePackagerMixin):
installed_version = self.pkg_version(package_name) installed_version = self.pkg_version(package_name)
if ((package_version and installed_version == package_version) or if ((package_version and installed_version == package_version) or
(installed_version and not package_version)): (installed_version and not package_version)):
LOG.debug("Package %s already installed." % package_name) LOG.debug("Package %s already installed.", package_name)
else: else:
return False return False
return True return True

View File

@ -22,5 +22,5 @@ LOG = logging.getLogger(__name__)
def get_backup_strategy(backup_driver, ns=__name__): def get_backup_strategy(backup_driver, ns=__name__):
LOG.debug("Getting backup strategy: %s." % backup_driver) LOG.debug("Getting backup strategy: %s.", backup_driver)
return Strategy.get_strategy(backup_driver, ns) return Strategy.get_strategy(backup_driver, ns)

View File

@ -66,12 +66,12 @@ class NodetoolSnapshot(base.BackupRunner):
def _remove_snapshot(self, snapshot_name): def _remove_snapshot(self, snapshot_name):
LOG.debug('Clearing snapshot(s) for all keyspaces with snapshot name ' LOG.debug('Clearing snapshot(s) for all keyspaces with snapshot name '
'"%s".' % snapshot_name) '"%s".', snapshot_name)
utils.execute('nodetool', 'clearsnapshot', '-t %s' % snapshot_name) utils.execute('nodetool', 'clearsnapshot', '-t %s' % snapshot_name)
def _snapshot_all_keyspaces(self, snapshot_name): def _snapshot_all_keyspaces(self, snapshot_name):
LOG.debug('Creating snapshot(s) for all keyspaces with snapshot name ' LOG.debug('Creating snapshot(s) for all keyspaces with snapshot name '
'"%s".' % snapshot_name) '"%s".', snapshot_name)
utils.execute('nodetool', 'snapshot', '-t %s' % snapshot_name) utils.execute('nodetool', 'snapshot', '-t %s' % snapshot_name)
@property @property
@ -98,16 +98,16 @@ class NodetoolSnapshot(base.BackupRunner):
at least the system keyspace. Fail if there is nothing to backup. at least the system keyspace. Fail if there is nothing to backup.
""" """
LOG.debug('Searching for all snapshot(s) with name "%s".' LOG.debug('Searching for all snapshot(s) with name "%s".',
% snapshot_name) snapshot_name)
snapshot_files = operating_system.list_files_in_directory( snapshot_files = operating_system.list_files_in_directory(
data_dir, recursive=True, include_dirs=False, data_dir, recursive=True, include_dirs=False,
pattern='.*/snapshots/%s/.*\.%s' % (snapshot_name, pattern='.*/snapshots/%s/.*\.%s' % (snapshot_name,
self._SNAPSHOT_EXTENSION), self._SNAPSHOT_EXTENSION),
as_root=True) as_root=True)
num_snapshot_files = len(snapshot_files) num_snapshot_files = len(snapshot_files)
LOG.debug('Found %d snapshot (*.%s) files.' LOG.debug('Found %(num)d snapshot (*.%(ext)s) files.',
% (num_snapshot_files, self._SNAPSHOT_EXTENSION)) {'num': num_snapshot_files, 'ext': self._SNAPSHOT_EXTENSION})
if num_snapshot_files > 0: if num_snapshot_files > 0:
return ('sudo tar ' return ('sudo tar '
'--transform="s#snapshots/%s/##" -cpPf - -C "%s" "%s"' '--transform="s#snapshots/%s/##" -cpPf - -C "%s" "%s"'

View File

@ -59,7 +59,7 @@ class DB2Backup(base.BackupRunner):
except exception.ProcessExecutionError: except exception.ProcessExecutionError:
LOG.exception(_("An error occurred while trying to " LOG.exception(_("An error occurred while trying to "
"estimate backup size")) "estimate backup size"))
LOG.debug("Estimated size for databases: " + str(size)) LOG.debug("Estimated size for databases: %d", size)
return size return size
def estimate_log_size(self): def estimate_log_size(self):
@ -132,7 +132,7 @@ class DB2OnlineBackup(DB2Backup):
except exception.ProcessExecutionError: except exception.ProcessExecutionError:
LOG.exception(_("An error occurred while trying to estimate log " LOG.exception(_("An error occurred while trying to estimate log "
"size")) "size"))
LOG.debug("Estimated log size for all databases: " + str(log_size)) LOG.debug("Estimated log size for all databases: %d", log_size)
return log_size return log_size
def run_backup(self): def run_backup(self):

View File

@ -187,9 +187,11 @@ class PgBaseBackup(base.BackupRunner, PgBaseBackupUtil):
return False return False
metadata = self.base_backup_metadata( metadata = self.base_backup_metadata(
os.path.join(WAL_ARCHIVE_DIR, self.mrb)) os.path.join(WAL_ARCHIVE_DIR, self.mrb))
LOG.debug("Label to pg_basebackup: %s label found: %s" % LOG.debug("Label to pg_basebackup: %(base_filename)s "
(self.base_filename, metadata['label'])) "label found: %(label)s",
LOG.info(_("Metadata for backup: %s.") % str(metadata)) {'base_filename': self.base_filename,
'label': metadata['label']})
LOG.info(_("Metadata for backup: %s."), str(metadata))
return metadata['label'] == self.base_filename return metadata['label'] == self.base_filename
try: try:

View File

@ -91,7 +91,7 @@ class InnoBackupEx(base.BackupRunner):
match = lsn.search(output) match = lsn.search(output)
if match: if match:
meta = {'lsn': match.group(1)} meta = {'lsn': match.group(1)}
LOG.info(_("Metadata for backup: %s.") % str(meta)) LOG.info(_("Metadata for backup: %s."), str(meta))
return meta return meta
@property @property

View File

@ -41,8 +41,9 @@ def get_instance(manager):
replication_strategy, __replication_namespace) replication_strategy, __replication_namespace)
__replication_instance = replication_strategy_cls() __replication_instance = replication_strategy_cls()
__replication_manager = manager __replication_manager = manager
LOG.debug('Got replication instance from: %s.%s' % ( LOG.debug('Got replication instance from: %(namespace)s.%(strategy)s',
__replication_namespace, __replication_strategy)) {'namespace': __replication_namespace,
'strategy': __replication_strategy})
return __replication_instance return __replication_instance

View File

@ -49,7 +49,7 @@ class MariaDBGTIDReplication(mysql_base.MysqlReplicationBase):
def connect_to_master(self, service, snapshot): def connect_to_master(self, service, snapshot):
logging_config = snapshot['log_position'] logging_config = snapshot['log_position']
LOG.debug("connect_to_master %s" % logging_config['replication_user']) LOG.debug("connect_to_master %s", logging_config['replication_user'])
change_master_cmd = ( change_master_cmd = (
"CHANGE MASTER TO MASTER_HOST='%(host)s', " "CHANGE MASTER TO MASTER_HOST='%(host)s', "
"MASTER_PORT=%(port)s, " "MASTER_PORT=%(port)s, "

View File

@ -78,7 +78,7 @@ class PostgresqlReplicationStreaming(base.Replication):
snapshot_id = snapshot_info['id'] snapshot_id = snapshot_info['id']
replica_number = snapshot_info.get('replica_number', 1) replica_number = snapshot_info.get('replica_number', 1)
LOG.debug("Acquiring backup for replica number %d." % replica_number) LOG.debug("Acquiring backup for replica number %d.", replica_number)
# Only create a backup if it's the first replica # Only create a backup if it's the first replica
if replica_number == 1: if replica_number == 1:
AGENT.execute_backup( AGENT.execute_backup(
@ -153,7 +153,7 @@ class PostgresqlReplicationStreaming(base.Replication):
the replication user in pg_hba and ensure that WAL logging is the replication user in pg_hba and ensure that WAL logging is
at the appropriate level (use the same settings as backups) at the appropriate level (use the same settings as backups)
""" """
LOG.debug("Enabling as master, with cfg: %s " % master_config) LOG.debug("Enabling as master, with cfg: %s ", master_config)
self._get_or_create_replication_user(service) self._get_or_create_replication_user(service)
hba_entry = "host replication replicator 0.0.0.0/0 md5 \n" hba_entry = "host replication replicator 0.0.0.0/0 md5 \n"
@ -177,7 +177,7 @@ class PostgresqlReplicationStreaming(base.Replication):
"""Adds appropriate config options to postgresql.conf, and writes out """Adds appropriate config options to postgresql.conf, and writes out
the recovery.conf file used to set up replication the recovery.conf file used to set up replication
""" """
LOG.debug("Got slave_config: %s" % str(slave_config)) LOG.debug("Got slave_config: %s", str(slave_config))
self._write_standby_recovery_file(service, snapshot, sslmode='prefer') self._write_standby_recovery_file(service, snapshot, sslmode='prefer')
self.enable_hot_standby(service) self.enable_hot_standby(service)
# Ensure the WAL arch is empty before restoring # Ensure the WAL arch is empty before restoring
@ -225,9 +225,8 @@ class PostgresqlReplicationStreaming(base.Replication):
'--source-server=' + conninfo]) '--source-server=' + conninfo])
out, err = utils.execute("sudo", "su", "-", service.pgsql_owner, out, err = utils.execute("sudo", "su", "-", service.pgsql_owner,
"-c", "%s" % cmd_full, check_exit_code=0) "-c", "%s" % cmd_full, check_exit_code=0)
LOG.debug("Got stdout %s and stderr %s from pg_rewind" % LOG.debug("Got stdout %(out)s and stderr %(err)s from pg_rewind",
(str(out), str(err))) {'out': str(out), 'err': str(err)})
operating_system.move(tmprec, rec, as_root=True) operating_system.move(tmprec, rec, as_root=True)
def demote_master(self, service): def demote_master(self, service):
@ -253,7 +252,7 @@ class PostgresqlReplicationStreaming(base.Replication):
def _write_standby_recovery_file(self, service, snapshot, def _write_standby_recovery_file(self, service, snapshot,
sslmode='prefer'): sslmode='prefer'):
LOG.info(_("Snapshot data received: %s") % str(snapshot)) LOG.info(_("Snapshot data received: %s"), str(snapshot))
logging_config = snapshot['log_position'] logging_config = snapshot['log_position']
conninfo_params = \ conninfo_params = \

View File

@ -99,7 +99,7 @@ class MysqlReplicationBase(base.Replication):
snapshot_id = snapshot_info['id'] snapshot_id = snapshot_info['id']
replica_number = snapshot_info.get('replica_number', 1) replica_number = snapshot_info.get('replica_number', 1)
LOG.debug("Acquiring backup for replica number %d." % replica_number) LOG.debug("Acquiring backup for replica number %d.", replica_number)
# Only create a backup if it's the first replica # Only create a backup if it's the first replica
if replica_number == 1: if replica_number == 1:
AGENT.execute_backup( AGENT.execute_backup(
@ -108,8 +108,10 @@ class MysqlReplicationBase(base.Replication):
incremental_runner=self.repl_incr_backup_runner) incremental_runner=self.repl_incr_backup_runner)
else: else:
LOG.debug("Using existing backup created for previous replica.") LOG.debug("Using existing backup created for previous replica.")
LOG.debug("Replication snapshot %s used for replica number %d." LOG.debug("Replication snapshot %(snapshot_id)s used for replica "
% (snapshot_id, replica_number)) "number %(replica_number)d.",
{'snapshot_id': snapshot_id,
'replica_number': replica_number})
replication_user = self._create_replication_user() replication_user = self._create_replication_user()
service.grant_replication_privilege(replication_user) service.grant_replication_privilege(replication_user)

View File

@ -62,9 +62,9 @@ class MysqlBinlogReplication(mysql_base.MysqlReplicationBase):
def _read_log_position(self): def _read_log_position(self):
INFO_FILE = ('%s/xtrabackup_binlog_info' % MySqlApp.get_data_dir()) INFO_FILE = ('%s/xtrabackup_binlog_info' % MySqlApp.get_data_dir())
LOG.info(_("Setting read permissions on %s") % INFO_FILE) LOG.info(_("Setting read permissions on %s"), INFO_FILE)
operating_system.chmod(INFO_FILE, FileMode.ADD_READ_ALL, as_root=True) operating_system.chmod(INFO_FILE, FileMode.ADD_READ_ALL, as_root=True)
LOG.info(_("Reading log position from %s") % INFO_FILE) LOG.info(_("Reading log position from %s"), INFO_FILE)
try: try:
with open(INFO_FILE, 'rb') as f: with open(INFO_FILE, 'rb') as f:
row = next(csv.reader(f, delimiter='\t', row = next(csv.reader(f, delimiter='\t',

View File

@ -47,7 +47,7 @@ class MysqlGTIDReplication(mysql_base.MysqlReplicationBase):
service.execute_on_client(set_gtid_cmd) service.execute_on_client(set_gtid_cmd)
logging_config = snapshot['log_position'] logging_config = snapshot['log_position']
LOG.debug("connect_to_master %s" % logging_config['replication_user']) LOG.debug("connect_to_master %s", logging_config['replication_user'])
change_master_cmd = ( change_master_cmd = (
"CHANGE MASTER TO MASTER_HOST='%(host)s', " "CHANGE MASTER TO MASTER_HOST='%(host)s', "
"MASTER_PORT=%(port)s, " "MASTER_PORT=%(port)s, "
@ -66,9 +66,9 @@ class MysqlGTIDReplication(mysql_base.MysqlReplicationBase):
def _read_last_master_gtid(self): def _read_last_master_gtid(self):
INFO_FILE = ('%s/xtrabackup_binlog_info' % MySqlApp.get_data_dir()) INFO_FILE = ('%s/xtrabackup_binlog_info' % MySqlApp.get_data_dir())
LOG.info(_("Setting read permissions on %s") % INFO_FILE) LOG.info(_("Setting read permissions on %s"), INFO_FILE)
operating_system.chmod(INFO_FILE, FileMode.ADD_READ_ALL, as_root=True) operating_system.chmod(INFO_FILE, FileMode.ADD_READ_ALL, as_root=True)
LOG.info(_("Reading last master GTID from %s") % INFO_FILE) LOG.info(_("Reading last master GTID from %s"), INFO_FILE)
try: try:
with open(INFO_FILE, 'rb') as f: with open(INFO_FILE, 'rb') as f:
row = f.read().split('\t') row = f.read().split('\t')

View File

@ -20,5 +20,5 @@ LOG = logging.getLogger(__name__)
def get_restore_strategy(restore_driver, ns=__name__): def get_restore_strategy(restore_driver, ns=__name__):
LOG.debug("Getting restore strategy: %s." % restore_driver) LOG.debug("Getting restore strategy: %s.", restore_driver)
return Strategy.get_strategy(restore_driver, ns) return Strategy.get_strategy(restore_driver, ns)

View File

@ -89,7 +89,7 @@ class RestoreRunner(Strategy):
content_length += len(chunk) content_length += len(chunk)
process.stdin.close() process.stdin.close()
utils.raise_if_process_errored(process, RestoreError) utils.raise_if_process_errored(process, RestoreError)
LOG.debug("Restored %s bytes from stream." % content_length) LOG.debug("Restored %s bytes from stream.", content_length)
return content_length return content_length

View File

@ -68,7 +68,7 @@ class PgDump(base.RestoreRunner):
content_length += len(chunk) content_length += len(chunk)
process.stdin.close() process.stdin.close()
self._handle_errors(process) self._handle_errors(process)
LOG.info(_("Restored %s bytes from stream.") % content_length) LOG.info(_("Restored %s bytes from stream."), content_length)
return content_length return content_length

View File

@ -274,7 +274,7 @@ class InnoBackupExIncremental(InnoBackupEx):
incremental_dir = None incremental_dir = None
if 'parent_location' in metadata: if 'parent_location' in metadata:
LOG.info(_("Restoring parent: %(parent_location)s" LOG.info(_("Restoring parent: %(parent_location)s"
" checksum: %(parent_checksum)s.") % metadata) " checksum: %(parent_checksum)s."), metadata)
parent_location = metadata['parent_location'] parent_location = metadata['parent_location']
parent_checksum = metadata['parent_checksum'] parent_checksum = metadata['parent_checksum']
# Restore parents recursively so backup are applied sequentially # Restore parents recursively so backup are applied sequentially

View File

@ -72,7 +72,7 @@ class VolumeDevice(object):
""" """
try: try:
num_tries = CONF.num_tries num_tries = CONF.num_tries
LOG.debug("Checking if %s exists." % self.device_path) LOG.debug("Checking if %s exists.", self.device_path)
utils.execute("blockdev", "--getsize64", self.device_path, utils.execute("blockdev", "--getsize64", self.device_path,
run_as_root=True, root_helper="sudo", run_as_root=True, root_helper="sudo",
@ -83,7 +83,7 @@ class VolumeDevice(object):
def _check_format(self): def _check_format(self):
"""Checks that a volume is formatted.""" """Checks that a volume is formatted."""
LOG.debug("Checking whether '%s' is formatted." % self.device_path) LOG.debug("Checking whether '%s' is formatted.", self.device_path)
try: try:
stdout, stderr = utils.execute( stdout, stderr = utils.execute(
"dumpe2fs", self.device_path, "dumpe2fs", self.device_path,
@ -107,7 +107,7 @@ class VolumeDevice(object):
format_options = shlex.split(CONF.format_options) format_options = shlex.split(CONF.format_options)
format_options.append(self.device_path) format_options.append(self.device_path)
volume_format_timeout = CONF.volume_format_timeout volume_format_timeout = CONF.volume_format_timeout
LOG.debug("Formatting '%s'." % self.device_path) LOG.debug("Formatting '%s'.", self.device_path)
try: try:
utils.execute_with_timeout( utils.execute_with_timeout(
"mkfs", "--type", volume_fstype, *format_options, "mkfs", "--type", volume_fstype, *format_options,
@ -125,7 +125,8 @@ class VolumeDevice(object):
def mount(self, mount_point, write_to_fstab=True): def mount(self, mount_point, write_to_fstab=True):
"""Mounts, and writes to fstab.""" """Mounts, and writes to fstab."""
LOG.debug("Will mount %s at %s." % (self.device_path, mount_point)) LOG.debug("Will mount %(path)s at %(mount_point)s.",
{'path': self.device_path, 'mount_point': mount_point})
mount_point = VolumeMountPoint(self.device_path, mount_point) mount_point = VolumeMountPoint(self.device_path, mount_point)
mount_point.mount() mount_point.mount()
@ -152,7 +153,7 @@ class VolumeDevice(object):
# Thus it may be necessary to wait for the mount and then unmount # Thus it may be necessary to wait for the mount and then unmount
# the fs again (since the volume was just attached). # the fs again (since the volume was just attached).
if self._wait_for_mount(mount_point, timeout=2): if self._wait_for_mount(mount_point, timeout=2):
LOG.debug("Unmounting '%s' before resizing." % mount_point) LOG.debug("Unmounting '%s' before resizing.", mount_point)
self.unmount(mount_point) self.unmount(mount_point)
try: try:
utils.execute("e2fsck", "-f", "-p", self.device_path, utils.execute("e2fsck", "-f", "-p", self.device_path,
@ -180,7 +181,7 @@ class VolumeDevice(object):
mount_points = self.mount_points(device_path) mount_points = self.mount_points(device_path)
for mnt in mount_points: for mnt in mount_points:
LOG.info(_("Device '%(device)s' is mounted on " LOG.info(_("Device '%(device)s' is mounted on "
"'%(mount_point)s'. Unmounting now.") % "'%(mount_point)s'. Unmounting now."),
{'device': device_path, 'mount_point': mnt}) {'device': device_path, 'mount_point': mnt})
self.unmount(mnt) self.unmount(mnt)
@ -234,7 +235,7 @@ class VolumeMountPoint(object):
fstab_line = ("%s\t%s\t%s\t%s\t0\t0" % fstab_line = ("%s\t%s\t%s\t%s\t0\t0" %
(self.device_path, self.mount_point, self.volume_fstype, (self.device_path, self.mount_point, self.volume_fstype,
self.mount_options)) self.mount_options))
LOG.debug("Writing new line to fstab:%s" % fstab_line) LOG.debug("Writing new line to fstab:%s", fstab_line)
with open('/etc/fstab', "r") as fstab: with open('/etc/fstab', "r") as fstab:
fstab_content = fstab.read() fstab_content = fstab.read()
with NamedTemporaryFile(mode='w', delete=False) as tempfstab: with NamedTemporaryFile(mode='w', delete=False) as tempfstab: