Fix Freezer Agent Logging

Standardize on oslo.log.
Log exception stack traces where appropriate.
Add logging to all files.
Remove '[*]' from log messages.
Log command line arguments at Freezer Agent start
so that errors can be traced to specific invocations.

Leave scheduler module mostly unchanged because it
is doing python logging configuration.

Change-Id: I23de0558409e63978303963d592a4e5ee4dee2b5
changes/80/331880/19
Deklan Dieterly 7 years ago
parent c682addec0
commit 00db2b0d95

@ -18,8 +18,12 @@ limitations under the License.
import json
import requests
from oslo_log import log
from freezer.apiclient import exceptions
LOG = log.getLogger(__name__)
class ActionManager(object):

@ -17,8 +17,12 @@ limitations under the License.
import json
import requests
from oslo_log import log
from freezer.apiclient import exceptions
LOG = log.getLogger(__name__)
class BackupsManager(object):
@ -52,7 +56,7 @@ class BackupsManager(object):
def list_all(self, limit=10, offset=0, search=None):
"""
Retrieves a list of backup infos
Retrieves a list of backups
:param limit: number of result to return (optional, default 10)
:param offset: order of first document (optional, default 0)

@ -23,6 +23,7 @@ from keystoneauth1.identity import v2
from keystoneauth1.identity import v3
from keystoneauth1 import session as ksa_session
from oslo_config import cfg
from oslo_log import log
from freezer.apiclient import actions
from freezer.apiclient import backups
@ -31,8 +32,7 @@ from freezer.apiclient import registration
from freezer.apiclient import sessions
from freezer.utils import utils
CONF = cfg.CONF
LOG = log.getLogger(__name__)
FREEZER_SERVICE_TYPE = 'backup'

@ -16,6 +16,9 @@ limitations under the License.
"""
import json
from oslo_log import log
LOG = log.getLogger(__name__)
class ApiClientException(Exception):
@ -35,7 +38,7 @@ class ApiClientException(Exception):
"""
try:
body = json.loads(r.text)
message = "[*] Error {0}: {1}".format(
message = "Error {0}: {1}".format(
r.status_code,
body['description'])
except Exception:
@ -52,7 +55,7 @@ class ApiClientException(Exception):
:return: string with error message or None if it fails
"""
try:
message = "[*] Error {0}: {1}".format(
message = "Error {0}: {1}".format(
r.status_code,
r.text)
except Exception:

@ -17,8 +17,12 @@ limitations under the License.
import json
import requests
from oslo_log import log
from freezer.apiclient import exceptions
LOG = log.getLogger(__name__)
class JobManager(object):

@ -17,8 +17,12 @@ limitations under the License.
import json
import requests
from oslo_log import log
from freezer.apiclient import exceptions
LOG = log.getLogger(__name__)
class RegistrationManager(object):

@ -17,8 +17,12 @@ limitations under the License.
import json
import requests
from oslo_log import log
from freezer.apiclient import exceptions
LOG = log.getLogger(__name__)
class SessionManager(object):

@ -552,7 +552,7 @@ def get_backup_args():
'trickle', path=":".join(os.environ.get('PATH')))
if trickle_executable:
LOG.info("[*] Info: Starting trickle ...")
LOG.info("Info: Starting trickle ...")
trickle_command = '{0} -d {1} -u {2} '.\
format(trickle_executable,
getattr(backup_args, 'download_limit') or -1,
@ -570,7 +570,7 @@ def get_backup_args():
else:
os.environ["tricklecount"] = str(1)
else:
LOG.warn("[*] Trickle not found. Switching to normal mode without "
LOG.warn("Trickle not found. Switching to normal mode without "
"limiting bandwidth")
if backup_args.config:
# remove index tmp_file from backup arguments dict

@ -24,15 +24,13 @@ import six
from six.moves import queue
import time
from oslo_config import cfg
from oslo_log import log
from freezer.engine.exceptions import EngineException
from freezer.utils import streaming
from freezer.utils import utils
CONF = cfg.CONF
logging = log.getLogger(__name__)
LOG = log.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
@ -116,7 +114,7 @@ class BackupEngine(object):
if not except_queue.empty():
while not except_queue.empty():
e = except_queue.get_nowait()
logging.critical('Engine error: {0}'.format(e))
LOG.exception('Engine error: {0}'.format(e))
return True
else:
return False
@ -170,16 +168,16 @@ class BackupEngine(object):
"""
:type backup: freezer.storage.Backup
"""
logging.info("Creation restore path: {0}".format(restore_path))
LOG.info("Creation restore path: {0}".format(restore_path))
utils.create_dir_tree(restore_path)
if not overwrite and not utils.is_empty_dir(restore_path):
raise Exception(
"Restore dir is not empty. "
"Please use --overwrite or provide different path.")
logging.info("Creation restore path completed")
LOG.info("Creation restore path completed")
for level in range(0, backup.level + 1):
b = backup.full_backup.increments[level]
logging.info("Restore backup {0}".format(b))
LOG.info("Restore backup {0}".format(b))
# Use SimpleQueue because Queue does not work on Mac OS X.
read_except_queue = SimpleQueue()
@ -214,7 +212,7 @@ class BackupEngine(object):
if not except_queue.empty():
while not except_queue.empty():
e = except_queue.get()
logging.critical('Engine error: {0}'.format(e))
LOG.exception('Engine error: {0}'.format(e))
return True
else:
return False
@ -228,8 +226,8 @@ class BackupEngine(object):
if tar_stream.exitcode or got_exception:
raise EngineException("Engine error. Failed to restore.")
logging.info(
'[*] Restore execution successfully executed \
LOG.info(
'Restore execution successfully executed \
for backup name {0}'.format(backup))
@abc.abstractmethod

@ -17,6 +17,10 @@ Freezer Tar related functions
"""
from freezer.utils import utils
from oslo_log import log
LOG = log.getLogger(__name__)
class TarCommandBuilder(object):
"""
@ -159,6 +163,6 @@ def get_tar_flag_from_algo(compression):
}
compression_exec = utils.get_executable_path(compression)
if not compression_exec:
raise Exception("[*] Critical Error: [*] {0} executable not found ".
raise Exception("Critical Error: {0} executable not found ".
format(compression))
return algo.get(compression)

@ -15,14 +15,17 @@ limitations under the License.
Freezer general utils functions
"""
import logging
import os
import subprocess
from oslo_log import log
from freezer.engine import engine
from freezer.engine.tar import tar_builders
from freezer.utils import winutils
LOG = log.getLogger(__name__)
class TarBackupEngine(engine.BackupEngine):
@ -53,7 +56,7 @@ class TarBackupEngine(engine.BackupEngine):
self.storage.upload_freezer_meta_data(backup, metadata)
def backup_data(self, backup_path, manifest_path):
logging.info("Tar engine backup stream enter")
LOG.info("Tar engine backup stream enter")
tar_command = tar_builders.TarCommandBuilder(
backup_path, self.compression_algo, self.is_windows)
if self.encrypt_pass_file:
@ -65,7 +68,7 @@ class TarBackupEngine(engine.BackupEngine):
command = tar_command.build()
logging.info("Execution command: \n{}".format(command))
LOG.info("Execution command: \n{}".format(command))
tar_process = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
@ -77,7 +80,7 @@ class TarBackupEngine(engine.BackupEngine):
self.check_process_output(tar_process, 'Backup')
logging.info("Tar engine streaming end")
LOG.info("Tar engine streaming end")
def restore_level(self, restore_path, read_pipe, backup, except_queue):
"""
@ -119,12 +122,13 @@ class TarBackupEngine(engine.BackupEngine):
while True:
tar_process.stdin.write(read_pipe.recv_bytes())
except EOFError:
logging.info('[*] Pipe closed as EOF reached. '
'Data transmitted successfully')
LOG.info('Pipe closed as EOF reached. '
'Data transmitted successfully')
finally:
self.check_process_output(tar_process, 'Restore')
except Exception as e:
LOG.exception(e)
except_queue.put(e)
@staticmethod
@ -144,10 +148,10 @@ class TarBackupEngine(engine.BackupEngine):
tar_err = process.communicate()[1]
if tar_err:
logging.error('{0} error: {1}'.format(function, tar_err))
LOG.error('{0} error: {1}'.format(function, tar_err))
if process.returncode:
logging.error('{0} return code is not 0'
.format(process.returncode))
LOG.error('{0} return code is not 0'
.format(process.returncode))
raise Exception('{0} process failed with return code: {1}'
.format(function, process.returncode))

@ -34,7 +34,7 @@ from oslo_log import log
from oslo_utils import importutils
CONF = cfg.CONF
logging = log.getLogger(__name__)
LOG = log.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
@ -51,11 +51,11 @@ class Job(object):
def execute(self):
start_time = utils.DateTime.now()
logging.info('[*] Job execution Started at: {0}'.format(start_time))
LOG.info('Job execution Started at: {0}'.format(start_time))
retval = self.execute_method()
end_time = utils.DateTime.now()
logging.info('[*] Job execution Finished, at: {0}'.format(end_time))
logging.info('[*] Job time Elapsed: {0}'.format(end_time - start_time))
LOG.info('Job execution Finished, at: {0}'.format(end_time))
LOG.info('Job time Elapsed: {0}'.format(end_time - start_time))
return retval
@abc.abstractmethod
@ -73,9 +73,9 @@ class BackupJob(Job):
try:
(out, err) = utils.create_subprocess('sync')
if err:
logging.error('Error while sync exec: {0}'.format(err))
LOG.error('Error while sync exec: {0}'.format(err))
except Exception as error:
logging.error('Error while sync exec: {0}'.format(error))
LOG.error('Error while sync exec: {0}'.format(error))
if not self.conf.mode:
raise ValueError("Empty mode")
mod_name = 'freezer.mode.{0}.{1}'.format(
@ -154,8 +154,8 @@ class BackupJob(Job):
self.conf.dereference_symlink == 'hard')
consistency_checksum = CheckSum(
filepath, ignorelinks=ignorelinks).compute()
logging.info('[*] Computed checksum for consistency {0}'.
format(consistency_checksum))
LOG.info('Computed checksum for consistency {0}'.
format(consistency_checksum))
self.conf.consistency_checksum = consistency_checksum
hostname_backup_name = self.conf.hostname_backup_name
@ -181,13 +181,13 @@ class BackupJob(Job):
self.storage)
if backup_media == 'nova':
logging.info('[*] Executing nova backup')
LOG.info('Executing nova backup')
backup_os.backup_nova(self.conf.nova_inst_id)
elif backup_media == 'cindernative':
logging.info('[*] Executing cinder backup')
LOG.info('Executing cinder backup')
backup_os.backup_cinder(self.conf.cindernative_vol_id)
elif backup_media == 'cinder':
logging.info('[*] Executing cinder snapshot')
LOG.info('Executing cinder snapshot')
backup_os.backup_cinder_by_glance(self.conf.cinder_vol_id)
else:
raise Exception('unknown parameter backup_media %s' % backup_media)
@ -198,7 +198,7 @@ class RestoreJob(Job):
def execute_method(self):
conf = self.conf
logging.info('[*] Executing FS restore...')
LOG.info('Executing FS restore...')
restore_timestamp = None
restore_abs_path = conf.restore_abs_path
@ -215,7 +215,7 @@ class RestoreJob(Job):
restore_checksum = CheckSum(restore_abs_path,
ignorelinks=True)
if restore_checksum.compare(backup_checksum):
logging.info('[*] Consistency check success.')
LOG.info('Consistency check success.')
else:
raise ConsistencyCheckException(
"Backup Consistency Check failed: backup checksum "
@ -261,11 +261,11 @@ class AdminJob(Job):
class ExecJob(Job):
def execute_method(self):
logging.info('[*] exec job....')
LOG.info('exec job....')
if self.conf.command:
logging.info('[*] Executing exec job....')
LOG.info('Executing exec job....')
exec_cmd.execute(self.conf.command)
else:
logging.warning(
'[*] No command info options were set. Exiting.')
LOG.warning(
'No command info options were set. Exiting.')
return {}

@ -46,6 +46,7 @@ def freezer_main(backup_args):
"""
if not backup_args.quiet:
LOG.info("Begin freezer agent process with args: {0}".format(sys.argv))
LOG.info('log file at {0}'.format(CONF.get('log_file')))
if backup_args.max_priority:
@ -86,7 +87,7 @@ def freezer_main(backup_args):
if hasattr(backup_args, 'trickle_command'):
if "tricklecount" in os.environ:
if int(os.environ.get("tricklecount")) > 1:
LOG.critical("[*] Trickle seems to be not working, Switching "
LOG.critical("Trickle seems to be not working, Switching "
"to normal mode ")
return run_job(backup_args, storage)
@ -107,12 +108,16 @@ def freezer_main(backup_args):
utils.delete_file(backup_args.tmp_file)
if process.returncode:
LOG.warn("[*] Trickle Error: {0}".format(error))
LOG.info("[*] Switching to work without trickle ...")
LOG.warn("Trickle Error: {0}".format(error))
LOG.info("Switching to work without trickle ...")
return run_job(backup_args, storage)
else:
return run_job(backup_args, storage)
run_job(backup_args, storage)
if not backup_args.quiet:
LOG.info("End freezer agent process successfully")
def run_job(conf, storage):
@ -134,7 +139,7 @@ def run_job(conf, storage):
def fail(exit_code, e, quiet, do_log=True):
""" Catch the exceptions and write it to log """
msg = '[*] Critical Error: {0}\n'.format(e)
msg = 'Critical Error: {0}\n'.format(e)
if not quiet:
sys.stderr.write(msg)
if do_log:
@ -189,6 +194,7 @@ def main():
"""freezer-agent binary main execution"""
backup_args = None
try:
freezer_config.config()
freezer_config.setup_logging()
backup_args = freezer_config.get_backup_args()
@ -200,8 +206,11 @@ def main():
CONF.print_help()
sys.exit(1)
freezer_main(backup_args)
except Exception as err:
quiet = backup_args.quiet if backup_args else False
LOG.exception(err)
LOG.critical("End freezer agent process unsuccessfully")
return fail(1, err, quiet)
if __name__ == '__main__':

@ -12,8 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log
from freezer.mode import mode
LOG = log.getLogger(__name__)
class FsMode(mode.Mode):

@ -15,6 +15,10 @@
import abc
import six
from oslo_log import log
LOG = log.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class Mode(object):

@ -12,10 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from oslo_log import log
from freezer.mode import mode
LOG = log.getLogger(__name__)
class MongoDbMode(mode.Mode):
"""
@ -42,8 +44,8 @@ class MongoDbMode(mode.Mode):
except ImportError:
raise ImportError('please install pymongo module')
logging.info('[*] MongoDB backup is being executed...')
logging.info('[*] Checking is the localhost is Master/Primary...')
LOG.info('MongoDB backup is being executed...')
LOG.info('Checking is the localhost is Master/Primary...')
# todo unhardcode this
mongodb_port = '27017'
local_hostname = conf.hostname
@ -51,5 +53,5 @@ class MongoDbMode(mode.Mode):
mongo_client = pymongo.MongoClient(db_host_port)
master_dict = dict(mongo_client.admin.command("isMaster"))
if master_dict['me'] != master_dict['primary']:
raise Exception('[*] localhost {0} is not Master/Primary,\
raise Exception('localhost {0} is not Master/Primary,\
exiting...'.format(local_hostname))

@ -12,9 +12,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log
from freezer.mode import mode
from freezer.utils import config
LOG = log.getLogger(__name__)
class MysqlMode(mode.Mode):
"""
@ -68,4 +72,4 @@ class MysqlMode(mode.Mode):
passwd=parsed_config.get("password", False))
self.cursor = None
except Exception as error:
raise Exception('[*] MySQL: {0}'.format(error))
raise Exception('MySQL: {0}'.format(error))

@ -12,13 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from oslo_log import log
from freezer.mode import mode
from freezer.utils import config
from freezer.utils import utils
from freezer.utils import winutils
LOG = log.getLogger(__name__)
class SqlserverMode(mode.Mode):
"""
@ -46,13 +49,13 @@ class SqlserverMode(mode.Mode):
""" Stop a SQL Server instance to
perform the backup of the db files """
logging.info('[*] Stopping SQL Server for backup')
LOG.info('Stopping SQL Server for backup')
with winutils.DisableFileSystemRedirection():
cmd = 'net stop "SQL Server ({0})"'\
.format(self.sql_server_instance)
(out, err) = utils.create_subprocess(cmd)
if err != '':
raise Exception('[*] Error while stopping SQL Server,'
raise Exception('Error while stopping SQL Server,'
', error {0}'.format(err))
def start_sql_server(self):
@ -63,9 +66,9 @@ class SqlserverMode(mode.Mode):
self.sql_server_instance)
(out, err) = utils.create_subprocess(cmd)
if err != '':
raise Exception('[*] Error while starting SQL Server'
raise Exception('Error while starting SQL Server'
', error {0}'.format(err))
logging.info('[*] SQL Server back to normal')
LOG.info('SQL Server back to normal')
def prepare(self):
self.stop_sql_server()

@ -17,13 +17,11 @@ Freezer Backup modes related functions
"""
import time
from oslo_config import cfg
from oslo_log import log
from freezer.utils import utils
CONF = cfg.CONF
logging = log.getLogger(__name__)
LOG = log.getLogger(__name__)
class BackupOs(object):
@ -66,15 +64,15 @@ class BackupOs(object):
try:
image = glance.images.get(image_id)
except Exception as e:
logging.error(e)
LOG.error(e)
stream = client_manager.download_image(image)
package = "{0}/{1}".format(instance_id, utils.DateTime.now().timestamp)
logging.info("[*] Uploading image to swift")
LOG.info("Uploading image to swift")
headers = {"x-object-meta-name": instance._info['name'],
"x-object-meta-flavor-id": instance._info['flavor']['id']}
self.storage.add_stream(stream, package, headers)
logging.info("[*] Deleting temporary image {0}".format(image))
LOG.info("Deleting temporary image {0}".format(image))
glance.images.delete(image.id)
def backup_cinder_by_glance(self, volume_id):
@ -89,24 +87,24 @@ class BackupOs(object):
cinder = client_manager.get_cinder()
volume = cinder.volumes.get(volume_id)
logging.debug("Creation temporary snapshot")
LOG.debug("Creation temporary snapshot")
snapshot = client_manager.provide_snapshot(
volume, "backup_snapshot_for_volume_%s" % volume_id)
logging.debug("Creation temporary volume")
LOG.debug("Creation temporary volume")
copied_volume = client_manager.do_copy_volume(snapshot)
logging.debug("Creation temporary glance image")
LOG.debug("Creation temporary glance image")
image = client_manager.make_glance_image("name", copied_volume)
logging.debug("Download temporary glance image {0}".format(image.id))
LOG.debug("Download temporary glance image {0}".format(image.id))
stream = client_manager.download_image(image)
package = "{0}/{1}".format(volume_id, utils.DateTime.now().timestamp)
logging.debug("Uploading image to swift")
LOG.debug("Uploading image to swift")
headers = {}
self.storage.add_stream(stream, package, headers=headers)
logging.debug("Deleting temporary snapshot")
LOG.debug("Deleting temporary snapshot")
client_manager.clean_snapshot(snapshot)
logging.debug("Deleting temporary volume")
LOG.debug("Deleting temporary volume")
cinder.volumes.delete(copied_volume)
logging.debug("Deleting temporary image")
LOG.debug("Deleting temporary image")
client_manager.get_glance().images.delete(image.id)
def backup_cinder(self, volume_id, name=None, description=None):

@ -22,13 +22,11 @@ from glanceclient.client import Client as glance_client
from keystoneauth1 import loading
from keystoneauth1 import session
from novaclient.client import Client as nova_client
from oslo_config import cfg
from oslo_log import log
from freezer.utils import utils
CONF = cfg.CONF
logging = log.getLogger(__name__)
LOG = log.getLogger(__name__)
class OSClientManager(object):
@ -194,11 +192,11 @@ class OSClientManager(object):
name=snapshot_name,
force=True)
logging.debug("Snapshot for volume with id {0}".format(volume.id))
LOG.debug("Snapshot for volume with id {0}".format(volume.id))
while snapshot.status != "available":
try:
logging.debug("Snapshot status: " + snapshot.status)
LOG.debug("Snapshot status: " + snapshot.status)
snapshot = self.get_cinder().volume_snapshots.get(snapshot.id)
if snapshot.status == "error":
raise Exception("snapshot has error state")
@ -206,7 +204,7 @@ class OSClientManager(object):
except Exception as e:
if str(e) == "snapshot has error state":
raise e
logging.exception(e)
LOG.exception(e)
return snapshot
def do_copy_volume(self, snapshot):
@ -221,12 +219,12 @@ class OSClientManager(object):
while volume.status != "available":
try:
logging.info("[*] Volume copy status: " + volume.status)
LOG.info("Volume copy status: " + volume.status)
volume = self.get_cinder().volumes.get(volume.id)
time.sleep(5)
except Exception as e:
logging.exception(e)
logging.warn("[*] Exception getting volume status")
LOG.exception(e)
LOG.warn("Exception getting volume status")
return volume
def make_glance_image(self, image_volume_name, copy_volume):
@ -246,15 +244,15 @@ class OSClientManager(object):
while image.status != "active":
try:
time.sleep(5)
logging.info("Image status: " + image.status)
LOG.info("Image status: " + image.status)
image = self.get_glance().images.get(image.id)
if image.status in ("killed", "deleted"):
raise Exception("Image have killed state")
except Exception as e:
if image.status in ("killed", "deleted"):
raise e
logging.exception(e)
logging.warn("Exception getting image status")
LOG.exception(e)
LOG.warn("Exception getting image status")
return image
def clean_snapshot(self, snapshot):
@ -262,7 +260,7 @@ class OSClientManager(object):
Deletes snapshot
:param snapshot: snapshot name
"""
logging.info("[*] Deleting existed snapshot: " + snapshot.id)
LOG.info("Deleting existed snapshot: " + snapshot.id)
self.get_cinder().volume_snapshots.delete(snapshot)
def download_image(self, image):
@ -271,9 +269,9 @@ class OSClientManager(object):
:param image: Image object for downloading
:return: stream of image data
"""
logging.debug("Download image enter")
LOG.debug("Download image enter")
stream = self.get_glance().images.data(image.id)
logging.debug("Stream with size {0}".format(image.size))
LOG.debug("Stream with size {0}".format(image.size))
return utils.ReSizeStream(stream, image.size, 1000000)
@ -365,10 +363,10 @@ class OpenstackOpts(object):
'Generated from auth_url: {1}'
.format(version, auth_url))
logging.info('Authenticating with Keystone version: '
'{0}, auth_url: {1}, username: {2}, project: {3}'.
format(self.auth_version, self.auth_url,
self.username, self.project_name))
LOG.info('Authenticating with Keystone version: '
'{0}, auth_url: {1}, username: {2}, project: {3}'.
format(self.auth_version, self.auth_url,
self.username, self.project_name))
def get_opts_dicts(self):
"""

@ -16,13 +16,11 @@ limitations under the License.
Freezer restore modes related functions
"""
from oslo_config import cfg
from oslo_log import log
from freezer.utils import utils
CONF = cfg.CONF
logging = log.getLogger(__name__)
LOG = log.getLogger(__name__)
class RestoreOs(object):
@ -46,7 +44,7 @@ class RestoreOs(object):
if not backups:
msg = "Cannot find backups for path: %s" % path
logging.error(msg)
LOG.error(msg)
raise BaseException(msg)
return backups[-1]
@ -63,7 +61,7 @@ class RestoreOs(object):
stream = swift.get_object(self.container, "%s/%s" % (path, backup),
resp_chunk_size=10000000)
length = int(stream[0]["x-object-meta-length"])
logging.info("[*] Creation glance image")
LOG.info("Creation glance image")
image = glance.images.create(
data=utils.ReSizeStream(stream[1], length, 1),
container_format="bare", disk_format="raw")
@ -83,8 +81,8 @@ class RestoreOs(object):
x.created_at.split('.')[0]) >= restore_from_timestamp])
if not backups_filter:
logging.warning("no available backups for cinder volume,"
"restore newest backup")
LOG.warning("no available backups for cinder volume,"
"restore newest backup")
backup = max(backups, key=lambda x: x.created_at)
cinder.restores.restore(backup_id=backup.id)
else:
@ -107,10 +105,10 @@ class RestoreOs(object):
size = length / gb
if length % gb > 0:
size += 1
logging.info("[*] Creation volume from image")
LOG.info("Creation volume from image")
self.client_manager.get_cinder().volumes.create(size,
imageRef=image.id)
logging.info("[*] Deleting temporary image")
LOG.info("Deleting temporary image")
self.client_manager.get_glance().images.delete(image)
def restore_nova(self, instance_id, restore_from_timestamp):
@ -123,5 +121,5 @@ class RestoreOs(object):
(info, image) = self._create_image(instance_id, restore_from_timestamp)
nova = self.client_manager.get_nova()
flavor = nova.flavors.get(info['x-object-meta-flavor-id'])
logging.info("[*] Creation an instance")
LOG.info("Creation an instance")
nova.servers.create(info['x-object-meta-name'], image, flavor)

@ -95,7 +95,7 @@ class NoDaemon(object):
@staticmethod
def handle_program_exit(signum, frame):
logging.info('[*] Got signal {0}. Exiting ...'.format(signum))
logging.info('Got signal {0}. Exiting ...'.format(signum))
NoDaemon.exit_flag = True
NoDaemon.instance.daemonizable.stop()
@ -107,16 +107,16 @@ class NoDaemon(object):
setup_logging(log_file)
while not NoDaemon.exit_flag:
try:
logging.info('[*] Starting in no-daemon mode')
logging.info('Starting in no-daemon mode')
self.daemonizable.start()
NoDaemon.exit_flag = True
except Exception as e:
if dump_stack_trace:
logging.error(traceback.format_exc(e))
logging.error('[*] Restarting procedure in no-daemon mode '
logging.error('Restarting procedure in no-daemon mode '
'after Fatal Error: {0}'.format(e))
sleep(10)
logging.info('[*] Done exiting')
logging.info('Done exiting')
def stop(self):
pass
@ -178,17 +178,17 @@ class Daemon(object):
setup_logging(log_file)
while not Daemon.exit_flag:
try:
logging.info('[*] freezer daemon starting, pid: {0}'.
logging.info('freezer daemon starting, pid: {0}'.
format(self.pid))
self.daemonizable.start()
Daemon.exit_flag = True
except Exception as e:
if dump_stack_trace:
logging.error(traceback.format_exc(e))
logging.error('[*] Restarting daemonized procedure '
logging.error('Restarting daemonized procedure '
'after Fatal Error: {0}'.format(e))
sleep(10)
logging.info('[*] freezer daemon done, pid: {0}'.format(self.pid))
logging.info('freezer daemon done, pid: {0}'.format(self.pid))
def stop(self):
pid = self.pid

@ -114,7 +114,7 @@ class FreezerScheduler(object):
try:
return self.client.jobs.update(job_id, job_doc)
except Exception as e:
LOG.error("[*] Job update error: {0}".format(e))
LOG.error("Job update error: {0}".format(e))
def update_job_status(self, job_id, status):
doc = {'job_schedule': {'status': status}}
@ -134,7 +134,7 @@ class FreezerScheduler(object):
try:
work_job_doc_list = self.get_jobs()
except Exception as e:
LOG.error("[*] Unable to get jobs: {0}".format(e))
LOG.error("Unable to get jobs: {0}".format(e))
return
work_job_id_list = []

@ -29,7 +29,7 @@ from six.moves import configparser
CONF = cfg.CONF
logging = log.getLogger(__name__)
LOG = log.getLogger(__name__)
class StopState(object):
@ -133,13 +133,13 @@ class Job(object):
def create(scheduler, executable, job_doc):
job = Job(scheduler, executable, job_doc)
if job.job_doc_status in ['running', 'scheduled']:
logging.warning('Resetting {0} status from job {1}'
.format(job.job_doc_status, job.id))
LOG.warning('Resetting {0} status from job {1}'
.format(job.job_doc_status, job.id))
if job.job_doc_status == 'stop' and not job.event:
logging.info('Job {0} was stopped.'.format(job.id))
LOG.info('Job {0} was stopped.'.format(job.id))
job.event = Job.STOP_EVENT
elif not job.event:
logging.info('Autostart Job {0}'.format(job.id))
LOG.info('Autostart Job {0}'.format(job.id))
job.event = Job.START_EVENT
return job
@ -153,7 +153,7 @@ class Job(object):
def remove(self):
with self.scheduler.lock:
# delegate to state object
logging.info('REMOVE job {0}'.format(self.id))
LOG.info('REMOVE job {0}'.format(self.id))
self.state.remove(self)
@property
@ -262,13 +262,13 @@ class Job(object):
next_event = job_doc['job_schedule'].get('event', '')
while next_event:
if next_event == Job.STOP_EVENT:
logging.info('JOB {0} event: STOP'.format(self.id))
LOG.info('JOB {0} event: STOP'.format(self.id))
next_event = self.state.stop(self, job_doc)
elif next_event == Job.START_EVENT:
logging.info('JOB {0} event: START'.format(self.id))
LOG.info('JOB {0} event: START'.format(self.id))
next_event = self.state.start(self, job_doc)
elif next_event == Job.ABORT_EVENT:
logging.info('JOB {0} event: ABORT'.format(self.id))
LOG.info('JOB {0} event: ABORT'.format(self.id))
next_event = self.state.abort(self, job_doc)
def upload_metadata(self, metadata_string):
@ -277,10 +277,10 @@ class Job(object):
if metadata:
metadata['job_id'] = self.id
self.scheduler.upload_metadata(metadata)
logging.info("[*] Job {0}, freezer action metadata uploaded"
.format(self.id))
LOG.info("Job {0}, freezer action metadata uploaded"
.format(self.id))
except Exception as e:
logging.error('[*] metrics upload error: {0}'.format(e))
LOG.error('metrics upload error: {0}'.format(e))
def execute_job_action(self, job_action):
max_retries = job_action.get('max_retries', 1)
@ -305,7 +305,7 @@ class Job(object):
utils.delete_file(config_file_name)
if error:
logging.error("[*] Freezer client error: {0}".format(error))
LOG.error("Freezer client error: {0}".format(error))
elif output:
self.upload_metadata(output)
@ -313,21 +313,21 @@ class Job(object):
# ERROR
tries -= 1
if tries:
logging.warning('[*] Job {0} failed {1} action,'
' retrying in {2} seconds'
.format(self.id, action_name,
max_retries_interval))
LOG.warning('Job {0} failed {1} action,'
' retrying in {2} seconds'
.format(self.id, action_name,
max_retries_interval))
# sleeping with the bloody lock, but we don't want other
# actions to mess with our stuff like fs snapshots, do we ?
time.sleep(max_retries_interval)
else:
# SUCCESS
logging.info('[*] Job {0} action {1}'
' returned success exit code'.
format(self.id, action_name))
LOG.info('Job {0} action {1}'
' returned success exit code'.
format(self.id, action_name))
return Job.SUCCESS_RESULT
logging.error('[*] Job {0} action {1} failed after {2} tries'
.format(self.id, action_name, max_retries))
LOG.error('Job {0} action {1} failed after {2} tries'
.format(self.id, action_name, max_retries))
return Job.FAIL_RESULT
@ -344,7 +344,7 @@ class Job(object):
result = Job.SUCCESS_RESULT
with self.scheduler.execution_lock:
with self.scheduler.lock:
logging.info('job {0} running'.format(self.id))
LOG.info('job {0} running'.format(self.id))
self.state = RunningState
self.job_doc_status = Job.RUNNING_STATUS
self.scheduler.update_job_status(self.id, self.job_doc_status)
@ -353,9 +353,9 @@ class Job(object):
# if the job contains exec action and the scheduler passes the
# parameter --disable-exec job execution should fail
if self.contains_exec() and CONF.disable_exec:
logging.info("Job {0} failed because it contains exec action "
"and exec actions are disabled by scheduler"
.format(self.id))
LOG.info("Job {0} failed because it contains exec action "
"and exec actions are disabled by scheduler"
.format(self.id))
self.result = Job.FAIL_RESULT
self.finish()
return
@ -369,8 +369,8 @@ class Job(object):
else:
freezer_action = job_action.get('freezer_action', {})
action_name = freezer_action.get('action', '')
logging.warning("[*]skipping {0} action".
format(action_name))
LOG.warning("skipping {0} action".
format(action_name))
self.result = result
self.finish()
@ -410,12 +410,12 @@ class Job(object):
self.session_tag = resp['session_tag']
return
except Exception as e:
logging.error('[*]Error while starting session {0}. {1}'.
format(self.session_id, e))
logging.warning('[*]Retrying to start session {0}'.
format(self.session_id))
LOG.error('Error while starting session {0}. {1}'.
format(self.session_id, e))
LOG.warning('Retrying to start session {0}'.
format(self.session_id))
retry -= 1
logging.error('[*]Unable to start session {0}'.format(self.session_id))
LOG.error('Unable to start session {0}'.format(self.session_id))
def end_session(self, result):
if not self.session_id:
@ -430,22 +430,22 @@ class Job(object):
if resp['result'] == 'success':
return
except Exception as e:
logging.error('[*]Error while ending session {0}. {1}'.
format(self.session_id, e))
logging.warning('[*]Retrying to end session {0}'.
format(self.session_id))
LOG.error('Error while ending session {0}. {1}'.
format(self.session_id, e))
LOG.warning('Retrying to end session {0}'.
format(self.session_id))
retry -= 1
logging.error('[*]Unable to end session {0}'.format(self.session_id))
LOG.error('Unable to end session {0}'.format(self.session_id))
def schedule(self):
try:
kwargs = self.get_schedule_args()
self.scheduler.add_job(self.execute, id=self.id, **kwargs)
except Exception as e:
logging.error("[*] Unable to schedule job {0}: {1}".
format(self.id, e))
LOG.error("Unable to schedule job {0}: {1}".
format(self.id, e))
logging.info('scheduler job with parameters {0}'.format(kwargs))
LOG.info('scheduler job with parameters {0}'.format(kwargs))
if self.scheduled:
self.job_doc_status = Job.SCHEDULED_STATUS

@ -16,15 +16,18 @@ limitations under the License.
Freezer LVM related functions
"""
import logging
import os
import re
import subprocess
import uuid
from oslo_log import log
from freezer.common import config as freezer_config
from freezer.utils import utils
LOG = log.getLogger(__name__)
def lvm_snap_remove(backup_opt_dict):
"""
@ -39,12 +42,12 @@ def lvm_snap_remove(backup_opt_dict):
try:
_umount(backup_opt_dict.lvm_dirmount)
except Exception as e:
logging.warning("Snapshot unmount errror: {0}".format(e))
LOG.warning("Snapshot unmount errror: {0}".format(e))
lv = os.path.join('/dev',
backup_opt_dict.lvm_volgroup,
backup_opt_dict.lvm_snapname)
_lvremove(lv)
logging.info('[*] Snapshot volume {0} removed'.format(lv))
LOG.info('Snapshot volume {0} removed'.format(lv))
def get_vol_fs_type(vol_name):
@ -54,8 +57,8 @@ def get_vol_fs_type(vol_name):
file system type
"""
if os.path.exists(vol_name) is False:
err = '[*] Provided volume name not found: {0} '.format(vol_name)
logging.exception(err)
err = 'Provided volume name not found: {0} '.format(vol_name)
LOG.exception(err)
raise Exception(err)
file_cmd = '{0} -0 -bLs --no-pad --no-buffer --preserve-date \
@ -67,12 +70,12 @@ def get_vol_fs_type(vol_name):
(file_out, file_err) = file_process.communicate()
file_match = re.search(r'(\S+?) filesystem data', file_out, re.I)
if file_match is None:
err = '[*] File system type not guessable: {0}'.format(file_err)
logging.exception(err)
err = 'File system type not guessable: {0}'.format(file_err)
LOG.exception(err)
raise Exception(err)
else:
filesys_type = file_match.group(1)
logging.info('[*] File system {0} found for volume {1}'.format(
LOG.info('File system {0} found for volume {1}'.format(
filesys_type, vol_name))
return filesys_type.lower().strip()
@ -115,7 +118,7 @@ def lvm_snap(backup_opt_dict):
lvm_info['snap_path'])
if not validate_lvm_params(backup_opt_dict):
logging.info('[*] No LVM requested/configured')
LOG.info('No LVM requested/configured')
return False
utils.create_dir(backup_opt_dict.lvm_dirmount)
@ -145,9 +148,9 @@ def lvm_snap(backup_opt_dict):
if lvm_process.returncode:
raise Exception('lvm snapshot creation error: {0}'.format(lvm_err))
logging.debug('[*] {0}'.format(lvm_out))
logging.warning('[*] Logical volume "{0}" created'.
format(backup_opt_dict.lvm_snapname))
LOG.debug('{0}'.format(lvm_out))
LOG.warning('Logical volume "{0}" created'.