Fix Freezer Agent Logging

Standardize on oslo.log.
Log exception stack traces where appropriate.
Add logging to all files.
Remove '[*]' from log messages.
Log command line arguments at Freezer Agent start
so that errors can be traced to specific invocations.

Leave scheduler module mostly unchanged because it
is doing python logging configuration.

Change-Id: I23de0558409e63978303963d592a4e5ee4dee2b5
This commit is contained in:
Deklan Dieterly 2016-06-20 16:49:28 -06:00
parent c682addec0
commit 00db2b0d95
36 changed files with 314 additions and 248 deletions

View File

@ -18,8 +18,12 @@ limitations under the License.
import json
import requests
from oslo_log import log
from freezer.apiclient import exceptions
LOG = log.getLogger(__name__)
class ActionManager(object):

View File

@ -17,8 +17,12 @@ limitations under the License.
import json
import requests
from oslo_log import log
from freezer.apiclient import exceptions
LOG = log.getLogger(__name__)
class BackupsManager(object):
@ -52,7 +56,7 @@ class BackupsManager(object):
def list_all(self, limit=10, offset=0, search=None):
"""
Retrieves a list of backup infos
Retrieves a list of backups
:param limit: number of result to return (optional, default 10)
:param offset: order of first document (optional, default 0)

View File

@ -23,6 +23,7 @@ from keystoneauth1.identity import v2
from keystoneauth1.identity import v3
from keystoneauth1 import session as ksa_session
from oslo_config import cfg
from oslo_log import log
from freezer.apiclient import actions
from freezer.apiclient import backups
@ -31,8 +32,7 @@ from freezer.apiclient import registration
from freezer.apiclient import sessions
from freezer.utils import utils
CONF = cfg.CONF
LOG = log.getLogger(__name__)
FREEZER_SERVICE_TYPE = 'backup'

View File

@ -16,6 +16,9 @@ limitations under the License.
"""
import json
from oslo_log import log
LOG = log.getLogger(__name__)
class ApiClientException(Exception):
@ -35,7 +38,7 @@ class ApiClientException(Exception):
"""
try:
body = json.loads(r.text)
message = "[*] Error {0}: {1}".format(
message = "Error {0}: {1}".format(
r.status_code,
body['description'])
except Exception:
@ -52,7 +55,7 @@ class ApiClientException(Exception):
:return: string with error message or None if it fails
"""
try:
message = "[*] Error {0}: {1}".format(
message = "Error {0}: {1}".format(
r.status_code,
r.text)
except Exception:

View File

@ -17,8 +17,12 @@ limitations under the License.
import json
import requests
from oslo_log import log
from freezer.apiclient import exceptions
LOG = log.getLogger(__name__)
class JobManager(object):

View File

@ -17,8 +17,12 @@ limitations under the License.
import json
import requests
from oslo_log import log
from freezer.apiclient import exceptions
LOG = log.getLogger(__name__)
class RegistrationManager(object):

View File

@ -17,8 +17,12 @@ limitations under the License.
import json
import requests
from oslo_log import log
from freezer.apiclient import exceptions
LOG = log.getLogger(__name__)
class SessionManager(object):

View File

@ -552,7 +552,7 @@ def get_backup_args():
'trickle', path=":".join(os.environ.get('PATH')))
if trickle_executable:
LOG.info("[*] Info: Starting trickle ...")
LOG.info("Info: Starting trickle ...")
trickle_command = '{0} -d {1} -u {2} '.\
format(trickle_executable,
getattr(backup_args, 'download_limit') or -1,
@ -570,7 +570,7 @@ def get_backup_args():
else:
os.environ["tricklecount"] = str(1)
else:
LOG.warn("[*] Trickle not found. Switching to normal mode without "
LOG.warn("Trickle not found. Switching to normal mode without "
"limiting bandwidth")
if backup_args.config:
# remove index tmp_file from backup arguments dict

View File

@ -24,15 +24,13 @@ import six
from six.moves import queue
import time
from oslo_config import cfg
from oslo_log import log
from freezer.engine.exceptions import EngineException
from freezer.utils import streaming
from freezer.utils import utils
CONF = cfg.CONF
logging = log.getLogger(__name__)
LOG = log.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
@ -116,7 +114,7 @@ class BackupEngine(object):
if not except_queue.empty():
while not except_queue.empty():
e = except_queue.get_nowait()
logging.critical('Engine error: {0}'.format(e))
LOG.exception('Engine error: {0}'.format(e))
return True
else:
return False
@ -170,16 +168,16 @@ class BackupEngine(object):
"""
:type backup: freezer.storage.Backup
"""
logging.info("Creation restore path: {0}".format(restore_path))
LOG.info("Creation restore path: {0}".format(restore_path))
utils.create_dir_tree(restore_path)
if not overwrite and not utils.is_empty_dir(restore_path):
raise Exception(
"Restore dir is not empty. "
"Please use --overwrite or provide different path.")
logging.info("Creation restore path completed")
LOG.info("Creation restore path completed")
for level in range(0, backup.level + 1):
b = backup.full_backup.increments[level]
logging.info("Restore backup {0}".format(b))
LOG.info("Restore backup {0}".format(b))
# Use SimpleQueue because Queue does not work on Mac OS X.
read_except_queue = SimpleQueue()
@ -214,7 +212,7 @@ class BackupEngine(object):
if not except_queue.empty():
while not except_queue.empty():
e = except_queue.get()
logging.critical('Engine error: {0}'.format(e))
LOG.exception('Engine error: {0}'.format(e))
return True
else:
return False
@ -228,8 +226,8 @@ class BackupEngine(object):
if tar_stream.exitcode or got_exception:
raise EngineException("Engine error. Failed to restore.")
logging.info(
'[*] Restore execution successfully executed \
LOG.info(
'Restore execution successfully executed \
for backup name {0}'.format(backup))
@abc.abstractmethod

View File

@ -17,6 +17,10 @@ Freezer Tar related functions
"""
from freezer.utils import utils
from oslo_log import log
LOG = log.getLogger(__name__)
class TarCommandBuilder(object):
"""
@ -159,6 +163,6 @@ def get_tar_flag_from_algo(compression):
}
compression_exec = utils.get_executable_path(compression)
if not compression_exec:
raise Exception("[*] Critical Error: [*] {0} executable not found ".
raise Exception("Critical Error: {0} executable not found ".
format(compression))
return algo.get(compression)

View File

@ -15,14 +15,17 @@ limitations under the License.
Freezer general utils functions
"""
import logging
import os
import subprocess
from oslo_log import log
from freezer.engine import engine
from freezer.engine.tar import tar_builders
from freezer.utils import winutils
LOG = log.getLogger(__name__)
class TarBackupEngine(engine.BackupEngine):
@ -53,7 +56,7 @@ class TarBackupEngine(engine.BackupEngine):
self.storage.upload_freezer_meta_data(backup, metadata)
def backup_data(self, backup_path, manifest_path):
logging.info("Tar engine backup stream enter")
LOG.info("Tar engine backup stream enter")
tar_command = tar_builders.TarCommandBuilder(
backup_path, self.compression_algo, self.is_windows)
if self.encrypt_pass_file:
@ -65,7 +68,7 @@ class TarBackupEngine(engine.BackupEngine):
command = tar_command.build()
logging.info("Execution command: \n{}".format(command))
LOG.info("Execution command: \n{}".format(command))
tar_process = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
@ -77,7 +80,7 @@ class TarBackupEngine(engine.BackupEngine):
self.check_process_output(tar_process, 'Backup')
logging.info("Tar engine streaming end")
LOG.info("Tar engine streaming end")
def restore_level(self, restore_path, read_pipe, backup, except_queue):
"""
@ -119,12 +122,13 @@ class TarBackupEngine(engine.BackupEngine):
while True:
tar_process.stdin.write(read_pipe.recv_bytes())
except EOFError:
logging.info('[*] Pipe closed as EOF reached. '
LOG.info('Pipe closed as EOF reached. '
'Data transmitted successfully')
finally:
self.check_process_output(tar_process, 'Restore')
except Exception as e:
LOG.exception(e)
except_queue.put(e)
@staticmethod
@ -144,10 +148,10 @@ class TarBackupEngine(engine.BackupEngine):
tar_err = process.communicate()[1]
if tar_err:
logging.error('{0} error: {1}'.format(function, tar_err))
LOG.error('{0} error: {1}'.format(function, tar_err))
if process.returncode:
logging.error('{0} return code is not 0'
LOG.error('{0} return code is not 0'
.format(process.returncode))
raise Exception('{0} process failed with return code: {1}'
.format(function, process.returncode))

View File

@ -34,7 +34,7 @@ from oslo_log import log
from oslo_utils import importutils
CONF = cfg.CONF
logging = log.getLogger(__name__)
LOG = log.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
@ -51,11 +51,11 @@ class Job(object):
def execute(self):
start_time = utils.DateTime.now()
logging.info('[*] Job execution Started at: {0}'.format(start_time))
LOG.info('Job execution Started at: {0}'.format(start_time))
retval = self.execute_method()
end_time = utils.DateTime.now()
logging.info('[*] Job execution Finished, at: {0}'.format(end_time))
logging.info('[*] Job time Elapsed: {0}'.format(end_time - start_time))
LOG.info('Job execution Finished, at: {0}'.format(end_time))
LOG.info('Job time Elapsed: {0}'.format(end_time - start_time))
return retval
@abc.abstractmethod
@ -73,9 +73,9 @@ class BackupJob(Job):
try:
(out, err) = utils.create_subprocess('sync')
if err:
logging.error('Error while sync exec: {0}'.format(err))
LOG.error('Error while sync exec: {0}'.format(err))
except Exception as error:
logging.error('Error while sync exec: {0}'.format(error))
LOG.error('Error while sync exec: {0}'.format(error))
if not self.conf.mode:
raise ValueError("Empty mode")
mod_name = 'freezer.mode.{0}.{1}'.format(
@ -154,7 +154,7 @@ class BackupJob(Job):
self.conf.dereference_symlink == 'hard')
consistency_checksum = CheckSum(
filepath, ignorelinks=ignorelinks).compute()
logging.info('[*] Computed checksum for consistency {0}'.
LOG.info('Computed checksum for consistency {0}'.
format(consistency_checksum))
self.conf.consistency_checksum = consistency_checksum
@ -181,13 +181,13 @@ class BackupJob(Job):
self.storage)
if backup_media == 'nova':
logging.info('[*] Executing nova backup')
LOG.info('Executing nova backup')
backup_os.backup_nova(self.conf.nova_inst_id)
elif backup_media == 'cindernative':
logging.info('[*] Executing cinder backup')
LOG.info('Executing cinder backup')
backup_os.backup_cinder(self.conf.cindernative_vol_id)
elif backup_media == 'cinder':
logging.info('[*] Executing cinder snapshot')
LOG.info('Executing cinder snapshot')
backup_os.backup_cinder_by_glance(self.conf.cinder_vol_id)
else:
raise Exception('unknown parameter backup_media %s' % backup_media)
@ -198,7 +198,7 @@ class RestoreJob(Job):
def execute_method(self):
conf = self.conf
logging.info('[*] Executing FS restore...')
LOG.info('Executing FS restore...')
restore_timestamp = None
restore_abs_path = conf.restore_abs_path
@ -215,7 +215,7 @@ class RestoreJob(Job):
restore_checksum = CheckSum(restore_abs_path,
ignorelinks=True)
if restore_checksum.compare(backup_checksum):
logging.info('[*] Consistency check success.')
LOG.info('Consistency check success.')
else:
raise ConsistencyCheckException(
"Backup Consistency Check failed: backup checksum "
@ -261,11 +261,11 @@ class AdminJob(Job):
class ExecJob(Job):
def execute_method(self):
logging.info('[*] exec job....')
LOG.info('exec job....')
if self.conf.command:
logging.info('[*] Executing exec job....')
LOG.info('Executing exec job....')
exec_cmd.execute(self.conf.command)
else:
logging.warning(
'[*] No command info options were set. Exiting.')
LOG.warning(
'No command info options were set. Exiting.')
return {}

View File

@ -46,6 +46,7 @@ def freezer_main(backup_args):
"""
if not backup_args.quiet:
LOG.info("Begin freezer agent process with args: {0}".format(sys.argv))
LOG.info('log file at {0}'.format(CONF.get('log_file')))
if backup_args.max_priority:
@ -86,7 +87,7 @@ def freezer_main(backup_args):
if hasattr(backup_args, 'trickle_command'):
if "tricklecount" in os.environ:
if int(os.environ.get("tricklecount")) > 1:
LOG.critical("[*] Trickle seems to be not working, Switching "
LOG.critical("Trickle seems to be not working, Switching "
"to normal mode ")
return run_job(backup_args, storage)
@ -107,12 +108,16 @@ def freezer_main(backup_args):
utils.delete_file(backup_args.tmp_file)
if process.returncode:
LOG.warn("[*] Trickle Error: {0}".format(error))
LOG.info("[*] Switching to work without trickle ...")
LOG.warn("Trickle Error: {0}".format(error))
LOG.info("Switching to work without trickle ...")
return run_job(backup_args, storage)
else:
return run_job(backup_args, storage)
run_job(backup_args, storage)
if not backup_args.quiet:
LOG.info("End freezer agent process successfully")
def run_job(conf, storage):
@ -134,7 +139,7 @@ def run_job(conf, storage):
def fail(exit_code, e, quiet, do_log=True):
""" Catch the exceptions and write it to log """
msg = '[*] Critical Error: {0}\n'.format(e)
msg = 'Critical Error: {0}\n'.format(e)
if not quiet:
sys.stderr.write(msg)
if do_log:
@ -189,6 +194,7 @@ def main():
"""freezer-agent binary main execution"""
backup_args = None
try:
freezer_config.config()
freezer_config.setup_logging()
backup_args = freezer_config.get_backup_args()
@ -200,8 +206,11 @@ def main():
CONF.print_help()
sys.exit(1)
freezer_main(backup_args)
except Exception as err:
quiet = backup_args.quiet if backup_args else False
LOG.exception(err)
LOG.critical("End freezer agent process unsuccessfully")
return fail(1, err, quiet)
if __name__ == '__main__':

View File

@ -12,8 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log
from freezer.mode import mode
LOG = log.getLogger(__name__)
class FsMode(mode.Mode):

View File

@ -15,6 +15,10 @@
import abc
import six
from oslo_log import log
LOG = log.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class Mode(object):

View File

@ -12,10 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from oslo_log import log
from freezer.mode import mode
LOG = log.getLogger(__name__)
class MongoDbMode(mode.Mode):
"""
@ -42,8 +44,8 @@ class MongoDbMode(mode.Mode):
except ImportError:
raise ImportError('please install pymongo module')
logging.info('[*] MongoDB backup is being executed...')
logging.info('[*] Checking is the localhost is Master/Primary...')
LOG.info('MongoDB backup is being executed...')
LOG.info('Checking is the localhost is Master/Primary...')
# todo unhardcode this
mongodb_port = '27017'
local_hostname = conf.hostname
@ -51,5 +53,5 @@ class MongoDbMode(mode.Mode):
mongo_client = pymongo.MongoClient(db_host_port)
master_dict = dict(mongo_client.admin.command("isMaster"))
if master_dict['me'] != master_dict['primary']:
raise Exception('[*] localhost {0} is not Master/Primary,\
raise Exception('localhost {0} is not Master/Primary,\
exiting...'.format(local_hostname))

View File

@ -12,9 +12,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log
from freezer.mode import mode
from freezer.utils import config
LOG = log.getLogger(__name__)
class MysqlMode(mode.Mode):
"""
@ -68,4 +72,4 @@ class MysqlMode(mode.Mode):
passwd=parsed_config.get("password", False))
self.cursor = None
except Exception as error:
raise Exception('[*] MySQL: {0}'.format(error))
raise Exception('MySQL: {0}'.format(error))

View File

@ -12,13 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from oslo_log import log
from freezer.mode import mode
from freezer.utils import config
from freezer.utils import utils
from freezer.utils import winutils
LOG = log.getLogger(__name__)
class SqlserverMode(mode.Mode):
"""
@ -46,13 +49,13 @@ class SqlserverMode(mode.Mode):
""" Stop a SQL Server instance to
perform the backup of the db files """
logging.info('[*] Stopping SQL Server for backup')
LOG.info('Stopping SQL Server for backup')
with winutils.DisableFileSystemRedirection():
cmd = 'net stop "SQL Server ({0})"'\
.format(self.sql_server_instance)
(out, err) = utils.create_subprocess(cmd)
if err != '':
raise Exception('[*] Error while stopping SQL Server,'
raise Exception('Error while stopping SQL Server,'
', error {0}'.format(err))
def start_sql_server(self):
@ -63,9 +66,9 @@ class SqlserverMode(mode.Mode):
self.sql_server_instance)
(out, err) = utils.create_subprocess(cmd)
if err != '':
raise Exception('[*] Error while starting SQL Server'
raise Exception('Error while starting SQL Server'
', error {0}'.format(err))
logging.info('[*] SQL Server back to normal')
LOG.info('SQL Server back to normal')
def prepare(self):
self.stop_sql_server()

View File

@ -17,13 +17,11 @@ Freezer Backup modes related functions
"""
import time
from oslo_config import cfg
from oslo_log import log
from freezer.utils import utils
CONF = cfg.CONF
logging = log.getLogger(__name__)
LOG = log.getLogger(__name__)
class BackupOs(object):
@ -66,15 +64,15 @@ class BackupOs(object):
try:
image = glance.images.get(image_id)
except Exception as e:
logging.error(e)
LOG.error(e)
stream = client_manager.download_image(image)
package = "{0}/{1}".format(instance_id, utils.DateTime.now().timestamp)
logging.info("[*] Uploading image to swift")
LOG.info("Uploading image to swift")
headers = {"x-object-meta-name": instance._info['name'],
"x-object-meta-flavor-id": instance._info['flavor']['id']}
self.storage.add_stream(stream, package, headers)
logging.info("[*] Deleting temporary image {0}".format(image))
LOG.info("Deleting temporary image {0}".format(image))
glance.images.delete(image.id)
def backup_cinder_by_glance(self, volume_id):
@ -89,24 +87,24 @@ class BackupOs(object):
cinder = client_manager.get_cinder()
volume = cinder.volumes.get(volume_id)
logging.debug("Creation temporary snapshot")
LOG.debug("Creation temporary snapshot")
snapshot = client_manager.provide_snapshot(
volume, "backup_snapshot_for_volume_%s" % volume_id)
logging.debug("Creation temporary volume")
LOG.debug("Creation temporary volume")
copied_volume = client_manager.do_copy_volume(snapshot)
logging.debug("Creation temporary glance image")
LOG.debug("Creation temporary glance image")
image = client_manager.make_glance_image("name", copied_volume)
logging.debug("Download temporary glance image {0}".format(image.id))
LOG.debug("Download temporary glance image {0}".format(image.id))
stream = client_manager.download_image(image)
package = "{0}/{1}".format(volume_id, utils.DateTime.now().timestamp)
logging.debug("Uploading image to swift")
LOG.debug("Uploading image to swift")
headers = {}
self.storage.add_stream(stream, package, headers=headers)
logging.debug("Deleting temporary snapshot")
LOG.debug("Deleting temporary snapshot")
client_manager.clean_snapshot(snapshot)
logging.debug("Deleting temporary volume")
LOG.debug("Deleting temporary volume")
cinder.volumes.delete(copied_volume)
logging.debug("Deleting temporary image")
LOG.debug("Deleting temporary image")
client_manager.get_glance().images.delete(image.id)
def backup_cinder(self, volume_id, name=None, description=None):

View File

@ -22,13 +22,11 @@ from glanceclient.client import Client as glance_client
from keystoneauth1 import loading
from keystoneauth1 import session
from novaclient.client import Client as nova_client
from oslo_config import cfg
from oslo_log import log
from freezer.utils import utils
CONF = cfg.CONF
logging = log.getLogger(__name__)
LOG = log.getLogger(__name__)
class OSClientManager(object):
@ -194,11 +192,11 @@ class OSClientManager(object):
name=snapshot_name,
force=True)
logging.debug("Snapshot for volume with id {0}".format(volume.id))
LOG.debug("Snapshot for volume with id {0}".format(volume.id))
while snapshot.status != "available":
try:
logging.debug("Snapshot status: " + snapshot.status)
LOG.debug("Snapshot status: " + snapshot.status)
snapshot = self.get_cinder().volume_snapshots.get(snapshot.id)
if snapshot.status == "error":
raise Exception("snapshot has error state")
@ -206,7 +204,7 @@ class OSClientManager(object):
except Exception as e:
if str(e) == "snapshot has error state":
raise e
logging.exception(e)
LOG.exception(e)
return snapshot
def do_copy_volume(self, snapshot):
@ -221,12 +219,12 @@ class OSClientManager(object):
while volume.status != "available":
try:
logging.info("[*] Volume copy status: " + volume.status)
LOG.info("Volume copy status: " + volume.status)
volume = self.get_cinder().volumes.get(volume.id)
time.sleep(5)
except Exception as e:
logging.exception(e)
logging.warn("[*] Exception getting volume status")
LOG.exception(e)
LOG.warn("Exception getting volume status")
return volume
def make_glance_image(self, image_volume_name, copy_volume):
@ -246,15 +244,15 @@ class OSClientManager(object):
while image.status != "active":
try:
time.sleep(5)
logging.info("Image status: " + image.status)
LOG.info("Image status: " + image.status)
image = self.get_glance().images.get(image.id)
if image.status in ("killed", "deleted"):
raise Exception("Image have killed state")
except Exception as e:
if image.status in ("killed", "deleted"):
raise e
logging.exception(e)
logging.warn("Exception getting image status")
LOG.exception(e)
LOG.warn("Exception getting image status")
return image
def clean_snapshot(self, snapshot):
@ -262,7 +260,7 @@ class OSClientManager(object):
Deletes snapshot
:param snapshot: snapshot name
"""
logging.info("[*] Deleting existed snapshot: " + snapshot.id)
LOG.info("Deleting existed snapshot: " + snapshot.id)
self.get_cinder().volume_snapshots.delete(snapshot)
def download_image(self, image):
@ -271,9 +269,9 @@ class OSClientManager(object):
:param image: Image object for downloading
:return: stream of image data
"""
logging.debug("Download image enter")
LOG.debug("Download image enter")
stream = self.get_glance().images.data(image.id)
logging.debug("Stream with size {0}".format(image.size))
LOG.debug("Stream with size {0}".format(image.size))
return utils.ReSizeStream(stream, image.size, 1000000)
@ -365,7 +363,7 @@ class OpenstackOpts(object):
'Generated from auth_url: {1}'
.format(version, auth_url))
logging.info('Authenticating with Keystone version: '
LOG.info('Authenticating with Keystone version: '
'{0}, auth_url: {1}, username: {2}, project: {3}'.
format(self.auth_version, self.auth_url,
self.username, self.project_name))

View File

@ -16,13 +16,11 @@ limitations under the License.
Freezer restore modes related functions
"""
from oslo_config import cfg
from oslo_log import log
from freezer.utils import utils
CONF = cfg.CONF
logging = log.getLogger(__name__)
LOG = log.getLogger(__name__)
class RestoreOs(object):
@ -46,7 +44,7 @@ class RestoreOs(object):
if not backups:
msg = "Cannot find backups for path: %s" % path
logging.error(msg)
LOG.error(msg)
raise BaseException(msg)
return backups[-1]
@ -63,7 +61,7 @@ class RestoreOs(object):
stream = swift.get_object(self.container, "%s/%s" % (path, backup),
resp_chunk_size=10000000)
length = int(stream[0]["x-object-meta-length"])
logging.info("[*] Creation glance image")
LOG.info("Creation glance image")
image = glance.images.create(
data=utils.ReSizeStream(stream[1], length, 1),
container_format="bare", disk_format="raw")
@ -83,7 +81,7 @@ class RestoreOs(object):
x.created_at.split('.')[0]) >= restore_from_timestamp])
if not backups_filter:
logging.warning("no available backups for cinder volume,"
LOG.warning("no available backups for cinder volume,"
"restore newest backup")
backup = max(backups, key=lambda x: x.created_at)
cinder.restores.restore(backup_id=backup.id)
@ -107,10 +105,10 @@ class RestoreOs(object):
size = length / gb
if length % gb > 0:
size += 1
logging.info("[*] Creation volume from image")
LOG.info("Creation volume from image")
self.client_manager.get_cinder().volumes.create(size,
imageRef=image.id)
logging.info("[*] Deleting temporary image")
LOG.info("Deleting temporary image")
self.client_manager.get_glance().images.delete(image)
def restore_nova(self, instance_id, restore_from_timestamp):
@ -123,5 +121,5 @@ class RestoreOs(object):
(info, image) = self._create_image(instance_id, restore_from_timestamp)
nova = self.client_manager.get_nova()
flavor = nova.flavors.get(info['x-object-meta-flavor-id'])
logging.info("[*] Creation an instance")
LOG.info("Creation an instance")
nova.servers.create(info['x-object-meta-name'], image, flavor)

View File

@ -95,7 +95,7 @@ class NoDaemon(object):
@staticmethod
def handle_program_exit(signum, frame):
logging.info('[*] Got signal {0}. Exiting ...'.format(signum))
logging.info('Got signal {0}. Exiting ...'.format(signum))
NoDaemon.exit_flag = True
NoDaemon.instance.daemonizable.stop()
@ -107,16 +107,16 @@ class NoDaemon(object):
setup_logging(log_file)
while not NoDaemon.exit_flag:
try:
logging.info('[*] Starting in no-daemon mode')
logging.info('Starting in no-daemon mode')
self.daemonizable.start()
NoDaemon.exit_flag = True
except Exception as e:
if dump_stack_trace:
logging.error(traceback.format_exc(e))
logging.error('[*] Restarting procedure in no-daemon mode '
logging.error('Restarting procedure in no-daemon mode '
'after Fatal Error: {0}'.format(e))
sleep(10)
logging.info('[*] Done exiting')
logging.info('Done exiting')
def stop(self):
pass
@ -178,17 +178,17 @@ class Daemon(object):
setup_logging(log_file)
while not Daemon.exit_flag:
try:
logging.info('[*] freezer daemon starting, pid: {0}'.
logging.info('freezer daemon starting, pid: {0}'.
format(self.pid))
self.daemonizable.start()
Daemon.exit_flag = True
except Exception as e:
if dump_stack_trace:
logging.error(traceback.format_exc(e))
logging.error('[*] Restarting daemonized procedure '
logging.error('Restarting daemonized procedure '
'after Fatal Error: {0}'.format(e))
sleep(10)
logging.info('[*] freezer daemon done, pid: {0}'.format(self.pid))
logging.info('freezer daemon done, pid: {0}'.format(self.pid))
def stop(self):
pid = self.pid

View File

@ -114,7 +114,7 @@ class FreezerScheduler(object):
try:
return self.client.jobs.update(job_id, job_doc)
except Exception as e:
LOG.error("[*] Job update error: {0}".format(e))
LOG.error("Job update error: {0}".format(e))
def update_job_status(self, job_id, status):
doc = {'job_schedule': {'status': status}}
@ -134,7 +134,7 @@ class FreezerScheduler(object):
try:
work_job_doc_list = self.get_jobs()
except Exception as e:
LOG.error("[*] Unable to get jobs: {0}".format(e))
LOG.error("Unable to get jobs: {0}".format(e))
return
work_job_id_list = []

View File

@ -29,7 +29,7 @@ from six.moves import configparser
CONF = cfg.CONF
logging = log.getLogger(__name__)
LOG = log.getLogger(__name__)
class StopState(object):
@ -133,13 +133,13 @@ class Job(object):
def create(scheduler, executable, job_doc):
job = Job(scheduler, executable, job_doc)
if job.job_doc_status in ['running', 'scheduled']:
logging.warning('Resetting {0} status from job {1}'
LOG.warning('Resetting {0} status from job {1}'
.format(job.job_doc_status, job.id))
if job.job_doc_status == 'stop' and not job.event:
logging.info('Job {0} was stopped.'.format(job.id))
LOG.info('Job {0} was stopped.'.format(job.id))
job.event = Job.STOP_EVENT
elif not job.event:
logging.info('Autostart Job {0}'.format(job.id))
LOG.info('Autostart Job {0}'.format(job.id))
job.event = Job.START_EVENT
return job
@ -153,7 +153,7 @@ class Job(object):
def remove(self):
with self.scheduler.lock:
# delegate to state object
logging.info('REMOVE job {0}'.format(self.id))
LOG.info('REMOVE job {0}'.format(self.id))
self.state.remove(self)
@property
@ -262,13 +262,13 @@ class Job(object):
next_event = job_doc['job_schedule'].get('event', '')
while next_event:
if next_event == Job.STOP_EVENT:
logging.info('JOB {0} event: STOP'.format(self.id))
LOG.info('JOB {0} event: STOP'.format(self.id))
next_event = self.state.stop(self, job_doc)
elif next_event == Job.START_EVENT:
logging.info('JOB {0} event: START'.format(self.id))
LOG.info('JOB {0} event: START'.format(self.id))
next_event = self.state.start(self, job_doc)
elif next_event == Job.ABORT_EVENT:
logging.info('JOB {0} event: ABORT'.format(self.id))
LOG.info('JOB {0} event: ABORT'.format(self.id))
next_event = self.state.abort(self, job_doc)
def upload_metadata(self, metadata_string):
@ -277,10 +277,10 @@ class Job(object):
if metadata:
metadata['job_id'] = self.id
self.scheduler.upload_metadata(metadata)
logging.info("[*] Job {0}, freezer action metadata uploaded"
LOG.info("Job {0}, freezer action metadata uploaded"
.format(self.id))
except Exception as e:
logging.error('[*] metrics upload error: {0}'.format(e))
LOG.error('metrics upload error: {0}'.format(e))
def execute_job_action(self, job_action):
max_retries = job_action.get('max_retries', 1)
@ -305,7 +305,7 @@ class Job(object):
utils.delete_file(config_file_name)
if error:
logging.error("[*] Freezer client error: {0}".format(error))
LOG.error("Freezer client error: {0}".format(error))
elif output:
self.upload_metadata(output)
@ -313,7 +313,7 @@ class Job(object):
# ERROR
tries -= 1
if tries:
logging.warning('[*] Job {0} failed {1} action,'
LOG.warning('Job {0} failed {1} action,'
' retrying in {2} seconds'
.format(self.id, action_name,
max_retries_interval))
@ -322,11 +322,11 @@ class Job(object):
time.sleep(max_retries_interval)
else:
# SUCCESS
logging.info('[*] Job {0} action {1}'
LOG.info('Job {0} action {1}'
' returned success exit code'.
format(self.id, action_name))
return Job.SUCCESS_RESULT
logging.error('[*] Job {0} action {1} failed after {2} tries'
LOG.error('Job {0} action {1} failed after {2} tries'
.format(self.id, action_name, max_retries))
return Job.FAIL_RESULT
@ -344,7 +344,7 @@ class Job(object):
result = Job.SUCCESS_RESULT
with self.scheduler.execution_lock:
with self.scheduler.lock:
logging.info('job {0} running'.format(self.id))
LOG.info('job {0} running'.format(self.id))
self.state = RunningState
self.job_doc_status = Job.RUNNING_STATUS
self.scheduler.update_job_status(self.id, self.job_doc_status)
@ -353,7 +353,7 @@ class Job(object):
# if the job contains exec action and the scheduler passes the
# parameter --disable-exec job execution should fail
if self.contains_exec() and CONF.disable_exec:
logging.info("Job {0} failed because it contains exec action "
LOG.info("Job {0} failed because it contains exec action "
"and exec actions are disabled by scheduler"
.format(self.id))
self.result = Job.FAIL_RESULT
@ -369,7 +369,7 @@ class Job(object):
else:
freezer_action = job_action.get('freezer_action', {})
action_name = freezer_action.get('action', '')
logging.warning("[*]skipping {0} action".
LOG.warning("skipping {0} action".
format(action_name))
self.result = result
self.finish()
@ -410,12 +410,12 @@ class Job(object):
self.session_tag = resp['session_tag']
return
except Exception as e:
logging.error('[*]Error while starting session {0}. {1}'.
LOG.error('Error while starting session {0}. {1}'.
format(self.session_id, e))
logging.warning('[*]Retrying to start session {0}'.
LOG.warning('Retrying to start session {0}'.
format(self.session_id))
retry -= 1
logging.error('[*]Unable to start session {0}'.format(self.session_id))
LOG.error('Unable to start session {0}'.format(self.session_id))
def end_session(self, result):
if not self.session_id:
@ -430,22 +430,22 @@ class Job(object):
if resp['result'] == 'success':
return
except Exception as e:
logging.error('[*]Error while ending session {0}. {1}'.
LOG.error('Error while ending session {0}. {1}'.
format(self.session_id, e))
logging.warning('[*]Retrying to end session {0}'.
LOG.warning('Retrying to end session {0}'.
format(self.session_id))
retry -= 1
logging.error('[*]Unable to end session {0}'.format(self.session_id))
LOG.error('Unable to end session {0}'.format(self.session_id))
def schedule(self):
try:
kwargs = self.get_schedule_args()
self.scheduler.add_job(self.execute, id=self.id, **kwargs)
except Exception as e:
logging.error("[*] Unable to schedule job {0}: {1}".
LOG.error("Unable to schedule job {0}: {1}".
format(self.id, e))
logging.info('scheduler job with parameters {0}'.format(kwargs))
LOG.info('scheduler job with parameters {0}'.format(kwargs))
if self.scheduled:
self.job_doc_status = Job.SCHEDULED_STATUS

View File

@ -16,15 +16,18 @@ limitations under the License.
Freezer LVM related functions
"""
import logging
import os
import re
import subprocess
import uuid
from oslo_log import log
from freezer.common import config as freezer_config
from freezer.utils import utils
LOG = log.getLogger(__name__)
def lvm_snap_remove(backup_opt_dict):
"""
@ -39,12 +42,12 @@ def lvm_snap_remove(backup_opt_dict):
try:
_umount(backup_opt_dict.lvm_dirmount)
except Exception as e:
logging.warning("Snapshot unmount errror: {0}".format(e))
LOG.warning("Snapshot unmount errror: {0}".format(e))
lv = os.path.join('/dev',
backup_opt_dict.lvm_volgroup,
backup_opt_dict.lvm_snapname)
_lvremove(lv)
logging.info('[*] Snapshot volume {0} removed'.format(lv))
LOG.info('Snapshot volume {0} removed'.format(lv))
def get_vol_fs_type(vol_name):
@ -54,8 +57,8 @@ def get_vol_fs_type(vol_name):
file system type
"""
if os.path.exists(vol_name) is False:
err = '[*] Provided volume name not found: {0} '.format(vol_name)
logging.exception(err)
err = 'Provided volume name not found: {0} '.format(vol_name)
LOG.exception(err)
raise Exception(err)
file_cmd = '{0} -0 -bLs --no-pad --no-buffer --preserve-date \
@ -67,12 +70,12 @@ def get_vol_fs_type(vol_name):
(file_out, file_err) = file_process.communicate()
file_match = re.search(r'(\S+?) filesystem data', file_out, re.I)
if file_match is None:
err = '[*] File system type not guessable: {0}'.format(file_err)
logging.exception(err)
err = 'File system type not guessable: {0}'.format(file_err)
LOG.exception(err)
raise Exception(err)
else:
filesys_type = file_match.group(1)
logging.info('[*] File system {0} found for volume {1}'.format(
LOG.info('File system {0} found for volume {1}'.format(
filesys_type, vol_name))
return filesys_type.lower().strip()
@ -115,7 +118,7 @@ def lvm_snap(backup_opt_dict):
lvm_info['snap_path'])
if not validate_lvm_params(backup_opt_dict):
logging.info('[*] No LVM requested/configured')
LOG.info('No LVM requested/configured')
return False
utils.create_dir(backup_opt_dict.lvm_dirmount)
@ -145,8 +148,8 @@ def lvm_snap(backup_opt_dict):
if lvm_process.returncode:
raise Exception('lvm snapshot creation error: {0}'.format(lvm_err))
logging.debug('[*] {0}'.format(lvm_out))
logging.warning('[*] Logical volume "{0}" created'.
LOG.debug('{0}'.format(lvm_out))
LOG.warning('Logical volume "{0}" created'.
format(backup_opt_dict.lvm_snapname))
# Guess the file system of the provided source volume and st mount
@ -170,16 +173,16 @@ def lvm_snap(backup_opt_dict):
executable=utils.find_executable('bash'))
mount_err = mount_process.communicate()[1]
if 'already mounted' in mount_err:
logging.warning('[*] Volume {0} already mounted on {1}\
LOG.warning('Volume {0} already mounted on {1}\
'.format(abs_snap_name, backup_opt_dict.lvm_dirmount))
return True
if mount_err:
logging.error("[*] Snapshot mount error. Removing snapshot")
LOG.error("Snapshot mount error. Removing snapshot")
lvm_snap_remove(backup_opt_dict)
raise Exception('lvm snapshot mounting error: {0}'.format(mount_err))
else:
logging.warning(
'[*] Volume {0} succesfully mounted on {1}'.format(
LOG.warning(
'Volume {0} succesfully mounted on {1}'.format(
abs_snap_name, backup_opt_dict.lvm_dirmount))
return True
@ -264,11 +267,11 @@ def validate_lvm_params(backup_opt_dict):
True snapshot is requested and parameters are valid
"""
if backup_opt_dict.lvm_snapperm not in ('ro', 'rw'):
raise ValueError('[*] Error: Invalid value for option lvm-snap-perm: '
raise ValueError('Error: Invalid value for option lvm-snap-perm: '
'{}'.format(backup_opt_dict.lvm_snapperm))
if not backup_opt_dict.path_to_backup:
raise ValueError('[*] Error: no path-to-backup and '
raise ValueError('Error: no path-to-backup and '
'no lvm-auto-snap provided')
if not backup_opt_dict.lvm_srcvol and not backup_opt_dict.lvm_volgroup:
@ -276,23 +279,23 @@ def validate_lvm_params(backup_opt_dict):
return False
if not backup_opt_dict.lvm_srcvol:
raise ValueError('[*] Error: no lvm-srcvol and '
raise ValueError('Error: no lvm-srcvol and '
'no lvm-auto-snap provided')
if not backup_opt_dict.lvm_volgroup:
raise ValueError('[*] Error: no lvm-volgroup and '
raise ValueError('Error: no lvm-volgroup and '
'no lvm-auto-snap provided')
logging.info('[*] Source LVM Volume: {0}'.format(
LOG.info('Source LVM Volume: {0}'.format(
backup_opt_dict.lvm_srcvol))
logging.info('[*] LVM Volume Group: {0}'.format(
LOG.info('LVM Volume Group: {0}'.format(
backup_opt_dict.lvm_volgroup))
logging.info('[*] Snapshot name: {0}'.format(
LOG.info('Snapshot name: {0}'.format(
backup_opt_dict.lvm_snapname))
logging.info('[*] Snapshot size: {0}'.format(
LOG.info('Snapshot size: {0}'.format(
backup_opt_dict.lvm_snapsize))
logging.info('[*] Directory where the lvm snaphost will be mounted on:'
LOG.info('Directory where the lvm snaphost will be mounted on:'
' {0}'.format(backup_opt_dict.lvm_dirmount.strip()))
logging.info('[*] Path to backup (including snapshot): {0}'
LOG.info('Path to backup (including snapshot): {0}'
.format(backup_opt_dict.path_to_backup))
return True
@ -312,7 +315,7 @@ def _umount(path):
raise Exception('Impossible to umount {0}. {1}'
.format(path, mount_err))
os.rmdir(path)
logging.info('[*] Volume {0} unmounted'.format(path))
LOG.info('Volume {0} unmounted'.format(path))
def _lvremove(lv):
@ -326,7 +329,7 @@ def _lvremove(lv):
output, error = lvremove_proc.communicate()
if lvremove_proc.returncode:
if "contains a filesystem in use" in error:
logging.warning("Couldn't remove volume {0}. "
LOG.warning("Couldn't remove volume {0}. "
"It is still in use.".format(lv))
log_volume_holding_process(lv)
else:
@ -349,7 +352,7 @@ def log_volume_holding_process(lv):
# lsof is quite long, so no need to add a sleep here
utils.find_executable('lsof'), dev_id[0], dev_id[1])
process = subprocess.check_output([command], shell=True)
logging.warning("Process holding the volume is '{}'".format(process))
LOG.warning("Process holding the volume is '{}'".format(process))
except Exception as e:
logging.warning("Could not get informations on the process holding the"
LOG.warning("Could not get informations on the process holding the"
" volume: {}".format(str(e)))

View File

@ -16,16 +16,13 @@ limitations under the License.
Freezer Backup modes related functions
"""
from oslo_config import cfg
from oslo_log import log
from freezer.snapshot import lvm
from freezer.snapshot import vss
from freezer.utils import winutils
CONF = cfg.CONF
logging = log.getLogger(__name__)
LOG = log.getLogger(__name__)
def snapshot_create(backup_opt_dict):

View File

@ -12,12 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from oslo_log import log
from freezer.utils import utils
from freezer.utils import winutils
LOG = log.getLogger(__name__)
def vss_create_shadow_copy(windows_volume):
"""
@ -68,7 +71,7 @@ def vss_create_shadow_copy(windows_volume):
['powershell.exe', '-executionpolicy', 'unrestricted',
'-command', script, '-volume', windows_volume])
if err != '':
raise Exception('[*] Error creating a new shadow copy on {0}'
raise Exception('Error creating a new shadow copy on {0}'
', error {1}' .format(windows_volume, err))
for line in out.split('\n'):
@ -78,8 +81,7 @@ def vss_create_shadow_copy(windows_volume):
shadow_id = line.split('=')[1].strip().lower() + '}'
shadow_id = shadow_id[1:]
logging.info('[*] Created shadow copy {0}'.
format(shadow_id))
LOG.info('Created shadow copy {0}'.format(shadow_id))
return shadow_path, shadow_id
@ -96,13 +98,12 @@ def vss_delete_shadow_copy(shadow_id, windows_volume):
'/shadow={0}'.format(shadow_id), '/quiet']
(out, err) = utils.create_subprocess(cmd)
if err != '':
raise Exception('[*] Error deleting shadow copy with id {0}'
raise Exception('Error deleting shadow copy with id {0}'
', error {1}' .format(shadow_id, err))
vss_delete_symlink(windows_volume)
logging.info('[*] Deleting shadow copy {0}'.
format(shadow_id))
LOG.info('Deleting shadow copy {0}'.format(shadow_id))
return True
@ -114,6 +115,6 @@ def vss_delete_symlink(windows_volume):
if os.path.exists(path):
os.rmdir(path)
except Exception:
logging.error('Failed to delete shadow copy symlink {0}'.
LOG.error('Failed to delete shadow copy symlink {0}'.
format(os.path.join(windows_volume,
'freezer_shadowcopy')))

View File

@ -14,13 +14,16 @@
import abc
import logging
import os
import re
import six
from oslo_log import log
from freezer.utils import utils
LOG = log.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class Storage(object):
@ -122,7 +125,7 @@ class Storage(object):
if recent_to_date:
backups = [b for b in backups
if b.timestamp <= recent_to_date]
err_msg = '[*] No matching backup name "{0}" found'\
err_msg = 'No matching backup name "{0}" found'\
.format(hostname_backup_name)
if not backups:
raise IndexError(err_msg)
@ -338,9 +341,8 @@ class Backup(object):
backup.tar_meta = name in tar_names
backups.append(backup)
except Exception as e:
logging.exception(e)
logging.error("cannot parse backup name: {0}"
.format(name))
LOG.exception(e)
LOG.error("cannot parse backup name: {0}".format(name))
backups.sort(
key=lambda x: (x.hostname_backup_name, x.timestamp, x.level))
zero_backups = []
@ -357,7 +359,7 @@ class Backup(object):
last_backup.add_increment(backup.backup(storage,
last_backup))
else:
logging.error("Incremental backup without parent: {0}"
LOG.error("Incremental backup without parent: {0}"
.format(backup))
return zero_backups

View File

@ -15,9 +15,13 @@
import abc
import six
from oslo_log import log
from freezer.storage import base
from freezer.utils import utils
LOG = log.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class FsLikeStorage(base.Storage):

View File

@ -19,9 +19,13 @@ import io
import os
import shutil
from oslo_log import log
from freezer.storage import fslike
from freezer.utils import utils
LOG = log.getLogger(__name__)
class LocalStorage(fslike.FsLikeStorage):
def get_file(self, from_path, to_path):

View File

@ -15,15 +15,13 @@
# PyCharm will not recognize queue. Puts red squiggle line under it. That's OK.
from six.moves import queue
from oslo_config import cfg
from oslo_log import log
from freezer.storage import base
from freezer.storage.exceptions import StorageException
from freezer.utils import streaming
CONF = cfg.CONF
logging = log.getLogger(__name__)
LOG = log.getLogger(__name__)
class MultipleStorage(base.Storage):
@ -59,7 +57,7 @@ class MultipleStorage(base.Storage):
if not except_queue.empty:
while not except_queue.empty():
e = except_queue.get_nowait()
logging.critical('Storage error: {0}'.format(e))
LOG.critical('Storage error: {0}'.format(e))
return True
else:
return False
@ -133,7 +131,7 @@ class StorageManager(object):
else:
output_queue.put(message)
except Exception as e:
logging.exception(e)
LOG.exception(e)
StorageManager.one_fails_all_fail(
self.input_queue, self.output_queues)
self.broken_output_queues.add(output_queue)

View File

@ -20,10 +20,14 @@ import stat
import paramiko
from oslo_log import log
from freezer.storage import fslike
from freezer.utils import utils
LOG = log.getLogger(__name__)
class SshStorage(fslike.FsLikeStorage):
"""

View File

@ -16,15 +16,13 @@ limitations under the License.
"""
import json
from oslo_config import cfg
from oslo_log import log
import requests.exceptions
import time
from freezer.storage import base
CONF = cfg.CONF
logging = log.getLogger(__name__)
LOG = log.getLogger(__name__)
class SwiftStorage(base.Storage):
@ -71,23 +69,23 @@ class SwiftStorage(base.Storage):
success = False
while not success:
try:
logging.info(
'[*] Uploading file chunk index: {0}'.format(path))
LOG.info(
'Uploading file chunk index: {0}'.format(path))
self.swift().put_object(
self.segments, path, content,
content_type='application/octet-stream',
content_length=len(content))
logging.info('[*] Data successfully uploaded!')
LOG.info('Data successfully uploaded!')
success = True
except Exception as error:
logging.info(
'[*] Retrying to upload file chunk index: {0}'.format(
LOG.info(
'Retrying to upload file chunk index: {0}'.format(
path))
time.sleep(60)
self.client_manager.create_swift()
count += 1
if count == 10:
logging.critical('[*] Error: add_object: {0}'
LOG.critical('Error: add_object: {0}'
.format(error))
raise Exception("cannot add object to storage")
@ -101,10 +99,10 @@ class SwiftStorage(base.Storage):
self.client_manager.create_swift()
headers = {'x-object-manifest':
u'{0}/{1}'.format(self.segments, backup)}
logging.info('[*] Uploading Swift Manifest: {0}'.format(backup))
LOG.info('Uploading Swift Manifest: {0}'.format(backup))
self.swift().put_object(container=self.container, obj=str(backup),
contents=u'', headers=headers)
logging.info('[*] Manifest successfully uploaded!')
LOG.info('Manifest successfully uploaded!')
def upload_meta_file(self, backup, meta_file):
# Upload swift manifest for segments
@ -113,7 +111,7 @@ class SwiftStorage(base.Storage):
self.client_manager.create_swift()
# Upload tar incremental meta data file and remove it
logging.info('[*] Uploading tar meta data file: {0}'.format(
LOG.info('Uploading tar meta data file: {0}'.format(
backup.tar()))
with open(meta_file, 'r') as meta_fd:
self.swift().put_object(
@ -203,7 +201,7 @@ class SwiftStorage(base.Storage):
return [b for b in base.Backup.parse_backups(names, self)
if b.hostname_backup_name == hostname_backup_name]
except Exception as error:
raise Exception('[*] Error: get_object_list: {0}'.format(error))
raise Exception('Error: get_object_list: {0}'.format(error))
def backup_blocks(self, backup):
"""
@ -217,7 +215,7 @@ class SwiftStorage(base.Storage):
self.container, str(backup),
resp_chunk_size=self.max_segment_size)[1]
except requests.exceptions.SSLError as e:
logging.warning(e)
LOG.warning(e)
chunks = self.client_manager.create_swift().get_object(
self.container, str(backup),
resp_chunk_size=self.max_segment_size)[1]

View File

@ -13,15 +13,18 @@
# limitations under the License.
import logging
import os
import re
from six.moves import configparser
from six.moves import cStringIO
from oslo_log import log
from freezer.utils import utils
LOG = log.getLogger(__name__)
class Config(object):
@ -29,7 +32,7 @@ class Config(object):
def parse(config_path):
if config_path:
if not os.path.exists(config_path):
logging.error("[*] Critical Error: Configuration file {0} not"
LOG.error("Critical Error: Configuration file {0} not"
" found".format(config_path))
raise Exception("Configuration file {0} not found !".format(
config_path))
@ -93,12 +96,12 @@ def ini_parse(lines):
try:
# TODO(ANONYMOUS): Remove the parsing of ini-like file via regex
conf = find_all(INI, lines)
logging.warning("Using non-INI files for database configuration "
LOG.warning("Using non-INI files for database configuration "
"file is deprecated. Falling back to Regex.")
logging.warning("INI parser error was: {}".format(str(e)))
LOG.warning("INI parser error was: {}".format(str(e)))
return conf
except Exception:
logging.warning("Couldn't parse non-INI config file using Regex")
LOG.warning("Couldn't parse non-INI config file using Regex")
raise

View File

@ -18,6 +18,10 @@ Freezer general utils functions
from six.moves import queue
import threading
from oslo_log import log
LOG = log.getLogger(__name__)
class Wait(Exception):
pass
@ -104,6 +108,7 @@ class QueuedThread(threading.Thread):
try:
super(QueuedThread, self).run()
except Exception as e:
LOG.exception(e)
self._exception_queue.put_nowait(e)
self.rich_queue.force_stop()
# Thread will exit at this point.

View File

@ -19,7 +19,6 @@ Freezer general utils functions
import datetime
import errno
import fnmatch as fn
import logging
import os
import subprocess
import sys
@ -31,7 +30,6 @@ from oslo_config import cfg
from oslo_log import log
from six.moves import configparser
CONF = cfg.CONF
LOG = log.getLogger(__name__)
@ -59,15 +57,15 @@ def create_dir(directory, do_log=True):
try:
if not os.path.isdir(expanded_dir_name):
if do_log:
logging.warning('[*] Directory {0} does not exists,\
LOG.warning('Directory {0} does not exists,\
creating...'.format(expanded_dir_name))
os.makedirs(expanded_dir_name)
else:
if do_log:
logging.warning('[*] Directory {0} found!'.format(
LOG.warning('Directory {0} found!'.format(
expanded_dir_name))
except Exception as error:
err = '[*] Error while creating directory {0}: {1}\
err = 'Error while creating directory {0}: {1}\
'.format(expanded_dir_name, error)
raise Exception(err)
@ -126,7 +124,7 @@ def get_mount_from_path(path):
"""
if not os.path.exists(path):
logging.critical('[*] Error: provided path does not exist: {0}'
LOG.critical('Error: provided path does not exist: {0}'
.format(path))
raise IOError
@ -230,7 +228,7 @@ class ReSizeStream(object):
return self
def next(self):
logging.info("Transmitted {0} of {1}".format(self.transmitted,
LOG.info("Transmitted {0} of {1}".format(self.transmitted,
self.length))
chunk_size = self.chunk_size
if len(self.reminder) > chunk_size:
@ -338,7 +336,7 @@ def alter_proxy(proxy):
os.environ.pop("HTTPS_PROXY", None)
if proxy_value.startswith('http://') or \
proxy_value.startswith('https://'):
logging.info('[*] Using proxy {0}'.format(proxy_value))
LOG.info('Using proxy {0}'.format(proxy_value))
os.environ['HTTP_PROXY'] = str(proxy_value)
os.environ['HTTPS_PROXY'] = str(proxy_value)
else:
@ -356,7 +354,7 @@ def shield(func):
try:
return func(*args, **kwargs)
except Exception as error:
logging.error(error)
LOG.error(error)
return wrapper
@ -366,7 +364,7 @@ def delete_file(path_to_file):
try:
os.remove(path_to_file)
except Exception:
logging.warning("Error deleting file {0}".format(path_to_file))
LOG.warning("Error deleting file {0}".format(path_to_file))
def walk_path(path, exclude, ignorelinks, callback, *kargs, **kwargs):
@ -498,7 +496,7 @@ def set_max_process_priority():
# children processes inherit niceness from father
try:
LOG.warning(
'[*] Setting freezer execution with high CPU and I/O priority')
'Setting freezer execution with high CPU and I/O priority')
PID = os.getpid()
# Set cpu priority
os.nice(-19)
@ -509,4 +507,4 @@ def set_max_process_priority():
u'-p', u'{0}'.format(PID)
])
except Exception as priority_error:
LOG.warning('[*] Priority: {0}'.format(priority_error))
LOG.warning('Priority: {0}'.format(priority_error))