Fix Freezer Agent Logging
Standardize on oslo.log. Log exception stack traces where appropriate. Add logging to all files. Remove '[*]' from log messages. Log command line arguments at Freezer Agent start so that errors can be traced to specific invocations. Leave scheduler module mostly unchanged because it is doing python logging configuration. Change-Id: I23de0558409e63978303963d592a4e5ee4dee2b5
This commit is contained in:
parent
c682addec0
commit
00db2b0d95
|
@ -18,8 +18,12 @@ limitations under the License.
|
||||||
import json
|
import json
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
from freezer.apiclient import exceptions
|
from freezer.apiclient import exceptions
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class ActionManager(object):
|
class ActionManager(object):
|
||||||
|
|
||||||
|
|
|
@ -17,8 +17,12 @@ limitations under the License.
|
||||||
import json
|
import json
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
from freezer.apiclient import exceptions
|
from freezer.apiclient import exceptions
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class BackupsManager(object):
|
class BackupsManager(object):
|
||||||
|
|
||||||
|
@ -52,7 +56,7 @@ class BackupsManager(object):
|
||||||
|
|
||||||
def list_all(self, limit=10, offset=0, search=None):
|
def list_all(self, limit=10, offset=0, search=None):
|
||||||
"""
|
"""
|
||||||
Retrieves a list of backup infos
|
Retrieves a list of backups
|
||||||
|
|
||||||
:param limit: number of result to return (optional, default 10)
|
:param limit: number of result to return (optional, default 10)
|
||||||
:param offset: order of first document (optional, default 0)
|
:param offset: order of first document (optional, default 0)
|
||||||
|
|
|
@ -23,6 +23,7 @@ from keystoneauth1.identity import v2
|
||||||
from keystoneauth1.identity import v3
|
from keystoneauth1.identity import v3
|
||||||
from keystoneauth1 import session as ksa_session
|
from keystoneauth1 import session as ksa_session
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
from freezer.apiclient import actions
|
from freezer.apiclient import actions
|
||||||
from freezer.apiclient import backups
|
from freezer.apiclient import backups
|
||||||
|
@ -31,8 +32,7 @@ from freezer.apiclient import registration
|
||||||
from freezer.apiclient import sessions
|
from freezer.apiclient import sessions
|
||||||
from freezer.utils import utils
|
from freezer.utils import utils
|
||||||
|
|
||||||
CONF = cfg.CONF
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
FREEZER_SERVICE_TYPE = 'backup'
|
FREEZER_SERVICE_TYPE = 'backup'
|
||||||
|
|
||||||
|
|
|
@ -16,6 +16,9 @@ limitations under the License.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import json
|
import json
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class ApiClientException(Exception):
|
class ApiClientException(Exception):
|
||||||
|
@ -35,7 +38,7 @@ class ApiClientException(Exception):
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
body = json.loads(r.text)
|
body = json.loads(r.text)
|
||||||
message = "[*] Error {0}: {1}".format(
|
message = "Error {0}: {1}".format(
|
||||||
r.status_code,
|
r.status_code,
|
||||||
body['description'])
|
body['description'])
|
||||||
except Exception:
|
except Exception:
|
||||||
|
@ -52,7 +55,7 @@ class ApiClientException(Exception):
|
||||||
:return: string with error message or None if it fails
|
:return: string with error message or None if it fails
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
message = "[*] Error {0}: {1}".format(
|
message = "Error {0}: {1}".format(
|
||||||
r.status_code,
|
r.status_code,
|
||||||
r.text)
|
r.text)
|
||||||
except Exception:
|
except Exception:
|
||||||
|
|
|
@ -17,8 +17,12 @@ limitations under the License.
|
||||||
import json
|
import json
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
from freezer.apiclient import exceptions
|
from freezer.apiclient import exceptions
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class JobManager(object):
|
class JobManager(object):
|
||||||
|
|
||||||
|
|
|
@ -17,8 +17,12 @@ limitations under the License.
|
||||||
import json
|
import json
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
from freezer.apiclient import exceptions
|
from freezer.apiclient import exceptions
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class RegistrationManager(object):
|
class RegistrationManager(object):
|
||||||
|
|
||||||
|
|
|
@ -17,8 +17,12 @@ limitations under the License.
|
||||||
import json
|
import json
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
from freezer.apiclient import exceptions
|
from freezer.apiclient import exceptions
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class SessionManager(object):
|
class SessionManager(object):
|
||||||
|
|
||||||
|
|
|
@ -552,7 +552,7 @@ def get_backup_args():
|
||||||
'trickle', path=":".join(os.environ.get('PATH')))
|
'trickle', path=":".join(os.environ.get('PATH')))
|
||||||
|
|
||||||
if trickle_executable:
|
if trickle_executable:
|
||||||
LOG.info("[*] Info: Starting trickle ...")
|
LOG.info("Info: Starting trickle ...")
|
||||||
trickle_command = '{0} -d {1} -u {2} '.\
|
trickle_command = '{0} -d {1} -u {2} '.\
|
||||||
format(trickle_executable,
|
format(trickle_executable,
|
||||||
getattr(backup_args, 'download_limit') or -1,
|
getattr(backup_args, 'download_limit') or -1,
|
||||||
|
@ -570,7 +570,7 @@ def get_backup_args():
|
||||||
else:
|
else:
|
||||||
os.environ["tricklecount"] = str(1)
|
os.environ["tricklecount"] = str(1)
|
||||||
else:
|
else:
|
||||||
LOG.warn("[*] Trickle not found. Switching to normal mode without "
|
LOG.warn("Trickle not found. Switching to normal mode without "
|
||||||
"limiting bandwidth")
|
"limiting bandwidth")
|
||||||
if backup_args.config:
|
if backup_args.config:
|
||||||
# remove index tmp_file from backup arguments dict
|
# remove index tmp_file from backup arguments dict
|
||||||
|
|
|
@ -24,15 +24,13 @@ import six
|
||||||
from six.moves import queue
|
from six.moves import queue
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from freezer.engine.exceptions import EngineException
|
from freezer.engine.exceptions import EngineException
|
||||||
from freezer.utils import streaming
|
from freezer.utils import streaming
|
||||||
from freezer.utils import utils
|
from freezer.utils import utils
|
||||||
|
|
||||||
CONF = cfg.CONF
|
LOG = log.getLogger(__name__)
|
||||||
logging = log.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
@six.add_metaclass(abc.ABCMeta)
|
@six.add_metaclass(abc.ABCMeta)
|
||||||
|
@ -116,7 +114,7 @@ class BackupEngine(object):
|
||||||
if not except_queue.empty():
|
if not except_queue.empty():
|
||||||
while not except_queue.empty():
|
while not except_queue.empty():
|
||||||
e = except_queue.get_nowait()
|
e = except_queue.get_nowait()
|
||||||
logging.critical('Engine error: {0}'.format(e))
|
LOG.exception('Engine error: {0}'.format(e))
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
@ -170,16 +168,16 @@ class BackupEngine(object):
|
||||||
"""
|
"""
|
||||||
:type backup: freezer.storage.Backup
|
:type backup: freezer.storage.Backup
|
||||||
"""
|
"""
|
||||||
logging.info("Creation restore path: {0}".format(restore_path))
|
LOG.info("Creation restore path: {0}".format(restore_path))
|
||||||
utils.create_dir_tree(restore_path)
|
utils.create_dir_tree(restore_path)
|
||||||
if not overwrite and not utils.is_empty_dir(restore_path):
|
if not overwrite and not utils.is_empty_dir(restore_path):
|
||||||
raise Exception(
|
raise Exception(
|
||||||
"Restore dir is not empty. "
|
"Restore dir is not empty. "
|
||||||
"Please use --overwrite or provide different path.")
|
"Please use --overwrite or provide different path.")
|
||||||
logging.info("Creation restore path completed")
|
LOG.info("Creation restore path completed")
|
||||||
for level in range(0, backup.level + 1):
|
for level in range(0, backup.level + 1):
|
||||||
b = backup.full_backup.increments[level]
|
b = backup.full_backup.increments[level]
|
||||||
logging.info("Restore backup {0}".format(b))
|
LOG.info("Restore backup {0}".format(b))
|
||||||
|
|
||||||
# Use SimpleQueue because Queue does not work on Mac OS X.
|
# Use SimpleQueue because Queue does not work on Mac OS X.
|
||||||
read_except_queue = SimpleQueue()
|
read_except_queue = SimpleQueue()
|
||||||
|
@ -214,7 +212,7 @@ class BackupEngine(object):
|
||||||
if not except_queue.empty():
|
if not except_queue.empty():
|
||||||
while not except_queue.empty():
|
while not except_queue.empty():
|
||||||
e = except_queue.get()
|
e = except_queue.get()
|
||||||
logging.critical('Engine error: {0}'.format(e))
|
LOG.exception('Engine error: {0}'.format(e))
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
@ -228,8 +226,8 @@ class BackupEngine(object):
|
||||||
if tar_stream.exitcode or got_exception:
|
if tar_stream.exitcode or got_exception:
|
||||||
raise EngineException("Engine error. Failed to restore.")
|
raise EngineException("Engine error. Failed to restore.")
|
||||||
|
|
||||||
logging.info(
|
LOG.info(
|
||||||
'[*] Restore execution successfully executed \
|
'Restore execution successfully executed \
|
||||||
for backup name {0}'.format(backup))
|
for backup name {0}'.format(backup))
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
|
|
|
@ -17,6 +17,10 @@ Freezer Tar related functions
|
||||||
"""
|
"""
|
||||||
from freezer.utils import utils
|
from freezer.utils import utils
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class TarCommandBuilder(object):
|
class TarCommandBuilder(object):
|
||||||
"""
|
"""
|
||||||
|
@ -159,6 +163,6 @@ def get_tar_flag_from_algo(compression):
|
||||||
}
|
}
|
||||||
compression_exec = utils.get_executable_path(compression)
|
compression_exec = utils.get_executable_path(compression)
|
||||||
if not compression_exec:
|
if not compression_exec:
|
||||||
raise Exception("[*] Critical Error: [*] {0} executable not found ".
|
raise Exception("Critical Error: {0} executable not found ".
|
||||||
format(compression))
|
format(compression))
|
||||||
return algo.get(compression)
|
return algo.get(compression)
|
||||||
|
|
|
@ -15,14 +15,17 @@ limitations under the License.
|
||||||
|
|
||||||
Freezer general utils functions
|
Freezer general utils functions
|
||||||
"""
|
"""
|
||||||
import logging
|
|
||||||
import os
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
from freezer.engine import engine
|
from freezer.engine import engine
|
||||||
from freezer.engine.tar import tar_builders
|
from freezer.engine.tar import tar_builders
|
||||||
from freezer.utils import winutils
|
from freezer.utils import winutils
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class TarBackupEngine(engine.BackupEngine):
|
class TarBackupEngine(engine.BackupEngine):
|
||||||
|
|
||||||
|
@ -53,7 +56,7 @@ class TarBackupEngine(engine.BackupEngine):
|
||||||
self.storage.upload_freezer_meta_data(backup, metadata)
|
self.storage.upload_freezer_meta_data(backup, metadata)
|
||||||
|
|
||||||
def backup_data(self, backup_path, manifest_path):
|
def backup_data(self, backup_path, manifest_path):
|
||||||
logging.info("Tar engine backup stream enter")
|
LOG.info("Tar engine backup stream enter")
|
||||||
tar_command = tar_builders.TarCommandBuilder(
|
tar_command = tar_builders.TarCommandBuilder(
|
||||||
backup_path, self.compression_algo, self.is_windows)
|
backup_path, self.compression_algo, self.is_windows)
|
||||||
if self.encrypt_pass_file:
|
if self.encrypt_pass_file:
|
||||||
|
@ -65,7 +68,7 @@ class TarBackupEngine(engine.BackupEngine):
|
||||||
|
|
||||||
command = tar_command.build()
|
command = tar_command.build()
|
||||||
|
|
||||||
logging.info("Execution command: \n{}".format(command))
|
LOG.info("Execution command: \n{}".format(command))
|
||||||
|
|
||||||
tar_process = subprocess.Popen(command, stdout=subprocess.PIPE,
|
tar_process = subprocess.Popen(command, stdout=subprocess.PIPE,
|
||||||
stderr=subprocess.PIPE, shell=True)
|
stderr=subprocess.PIPE, shell=True)
|
||||||
|
@ -77,7 +80,7 @@ class TarBackupEngine(engine.BackupEngine):
|
||||||
|
|
||||||
self.check_process_output(tar_process, 'Backup')
|
self.check_process_output(tar_process, 'Backup')
|
||||||
|
|
||||||
logging.info("Tar engine streaming end")
|
LOG.info("Tar engine streaming end")
|
||||||
|
|
||||||
def restore_level(self, restore_path, read_pipe, backup, except_queue):
|
def restore_level(self, restore_path, read_pipe, backup, except_queue):
|
||||||
"""
|
"""
|
||||||
|
@ -119,12 +122,13 @@ class TarBackupEngine(engine.BackupEngine):
|
||||||
while True:
|
while True:
|
||||||
tar_process.stdin.write(read_pipe.recv_bytes())
|
tar_process.stdin.write(read_pipe.recv_bytes())
|
||||||
except EOFError:
|
except EOFError:
|
||||||
logging.info('[*] Pipe closed as EOF reached. '
|
LOG.info('Pipe closed as EOF reached. '
|
||||||
'Data transmitted successfully')
|
'Data transmitted successfully')
|
||||||
finally:
|
finally:
|
||||||
self.check_process_output(tar_process, 'Restore')
|
self.check_process_output(tar_process, 'Restore')
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
LOG.exception(e)
|
||||||
except_queue.put(e)
|
except_queue.put(e)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
@ -144,10 +148,10 @@ class TarBackupEngine(engine.BackupEngine):
|
||||||
tar_err = process.communicate()[1]
|
tar_err = process.communicate()[1]
|
||||||
|
|
||||||
if tar_err:
|
if tar_err:
|
||||||
logging.error('{0} error: {1}'.format(function, tar_err))
|
LOG.error('{0} error: {1}'.format(function, tar_err))
|
||||||
|
|
||||||
if process.returncode:
|
if process.returncode:
|
||||||
logging.error('{0} return code is not 0'
|
LOG.error('{0} return code is not 0'
|
||||||
.format(process.returncode))
|
.format(process.returncode))
|
||||||
raise Exception('{0} process failed with return code: {1}'
|
raise Exception('{0} process failed with return code: {1}'
|
||||||
.format(function, process.returncode))
|
.format(function, process.returncode))
|
||||||
|
|
|
@ -34,7 +34,7 @@ from oslo_log import log
|
||||||
from oslo_utils import importutils
|
from oslo_utils import importutils
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
logging = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@six.add_metaclass(abc.ABCMeta)
|
@six.add_metaclass(abc.ABCMeta)
|
||||||
|
@ -51,11 +51,11 @@ class Job(object):
|
||||||
|
|
||||||
def execute(self):
|
def execute(self):
|
||||||
start_time = utils.DateTime.now()
|
start_time = utils.DateTime.now()
|
||||||
logging.info('[*] Job execution Started at: {0}'.format(start_time))
|
LOG.info('Job execution Started at: {0}'.format(start_time))
|
||||||
retval = self.execute_method()
|
retval = self.execute_method()
|
||||||
end_time = utils.DateTime.now()
|
end_time = utils.DateTime.now()
|
||||||
logging.info('[*] Job execution Finished, at: {0}'.format(end_time))
|
LOG.info('Job execution Finished, at: {0}'.format(end_time))
|
||||||
logging.info('[*] Job time Elapsed: {0}'.format(end_time - start_time))
|
LOG.info('Job time Elapsed: {0}'.format(end_time - start_time))
|
||||||
return retval
|
return retval
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
|
@ -73,9 +73,9 @@ class BackupJob(Job):
|
||||||
try:
|
try:
|
||||||
(out, err) = utils.create_subprocess('sync')
|
(out, err) = utils.create_subprocess('sync')
|
||||||
if err:
|
if err:
|
||||||
logging.error('Error while sync exec: {0}'.format(err))
|
LOG.error('Error while sync exec: {0}'.format(err))
|
||||||
except Exception as error:
|
except Exception as error:
|
||||||
logging.error('Error while sync exec: {0}'.format(error))
|
LOG.error('Error while sync exec: {0}'.format(error))
|
||||||
if not self.conf.mode:
|
if not self.conf.mode:
|
||||||
raise ValueError("Empty mode")
|
raise ValueError("Empty mode")
|
||||||
mod_name = 'freezer.mode.{0}.{1}'.format(
|
mod_name = 'freezer.mode.{0}.{1}'.format(
|
||||||
|
@ -154,8 +154,8 @@ class BackupJob(Job):
|
||||||
self.conf.dereference_symlink == 'hard')
|
self.conf.dereference_symlink == 'hard')
|
||||||
consistency_checksum = CheckSum(
|
consistency_checksum = CheckSum(
|
||||||
filepath, ignorelinks=ignorelinks).compute()
|
filepath, ignorelinks=ignorelinks).compute()
|
||||||
logging.info('[*] Computed checksum for consistency {0}'.
|
LOG.info('Computed checksum for consistency {0}'.
|
||||||
format(consistency_checksum))
|
format(consistency_checksum))
|
||||||
self.conf.consistency_checksum = consistency_checksum
|
self.conf.consistency_checksum = consistency_checksum
|
||||||
|
|
||||||
hostname_backup_name = self.conf.hostname_backup_name
|
hostname_backup_name = self.conf.hostname_backup_name
|
||||||
|
@ -181,13 +181,13 @@ class BackupJob(Job):
|
||||||
self.storage)
|
self.storage)
|
||||||
|
|
||||||
if backup_media == 'nova':
|
if backup_media == 'nova':
|
||||||
logging.info('[*] Executing nova backup')
|
LOG.info('Executing nova backup')
|
||||||
backup_os.backup_nova(self.conf.nova_inst_id)
|
backup_os.backup_nova(self.conf.nova_inst_id)
|
||||||
elif backup_media == 'cindernative':
|
elif backup_media == 'cindernative':
|
||||||
logging.info('[*] Executing cinder backup')
|
LOG.info('Executing cinder backup')
|
||||||
backup_os.backup_cinder(self.conf.cindernative_vol_id)
|
backup_os.backup_cinder(self.conf.cindernative_vol_id)
|
||||||
elif backup_media == 'cinder':
|
elif backup_media == 'cinder':
|
||||||
logging.info('[*] Executing cinder snapshot')
|
LOG.info('Executing cinder snapshot')
|
||||||
backup_os.backup_cinder_by_glance(self.conf.cinder_vol_id)
|
backup_os.backup_cinder_by_glance(self.conf.cinder_vol_id)
|
||||||
else:
|
else:
|
||||||
raise Exception('unknown parameter backup_media %s' % backup_media)
|
raise Exception('unknown parameter backup_media %s' % backup_media)
|
||||||
|
@ -198,7 +198,7 @@ class RestoreJob(Job):
|
||||||
|
|
||||||
def execute_method(self):
|
def execute_method(self):
|
||||||
conf = self.conf
|
conf = self.conf
|
||||||
logging.info('[*] Executing FS restore...')
|
LOG.info('Executing FS restore...')
|
||||||
restore_timestamp = None
|
restore_timestamp = None
|
||||||
|
|
||||||
restore_abs_path = conf.restore_abs_path
|
restore_abs_path = conf.restore_abs_path
|
||||||
|
@ -215,7 +215,7 @@ class RestoreJob(Job):
|
||||||
restore_checksum = CheckSum(restore_abs_path,
|
restore_checksum = CheckSum(restore_abs_path,
|
||||||
ignorelinks=True)
|
ignorelinks=True)
|
||||||
if restore_checksum.compare(backup_checksum):
|
if restore_checksum.compare(backup_checksum):
|
||||||
logging.info('[*] Consistency check success.')
|
LOG.info('Consistency check success.')
|
||||||
else:
|
else:
|
||||||
raise ConsistencyCheckException(
|
raise ConsistencyCheckException(
|
||||||
"Backup Consistency Check failed: backup checksum "
|
"Backup Consistency Check failed: backup checksum "
|
||||||
|
@ -261,11 +261,11 @@ class AdminJob(Job):
|
||||||
class ExecJob(Job):
|
class ExecJob(Job):
|
||||||
|
|
||||||
def execute_method(self):
|
def execute_method(self):
|
||||||
logging.info('[*] exec job....')
|
LOG.info('exec job....')
|
||||||
if self.conf.command:
|
if self.conf.command:
|
||||||
logging.info('[*] Executing exec job....')
|
LOG.info('Executing exec job....')
|
||||||
exec_cmd.execute(self.conf.command)
|
exec_cmd.execute(self.conf.command)
|
||||||
else:
|
else:
|
||||||
logging.warning(
|
LOG.warning(
|
||||||
'[*] No command info options were set. Exiting.')
|
'No command info options were set. Exiting.')
|
||||||
return {}
|
return {}
|
||||||
|
|
|
@ -46,6 +46,7 @@ def freezer_main(backup_args):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not backup_args.quiet:
|
if not backup_args.quiet:
|
||||||
|
LOG.info("Begin freezer agent process with args: {0}".format(sys.argv))
|
||||||
LOG.info('log file at {0}'.format(CONF.get('log_file')))
|
LOG.info('log file at {0}'.format(CONF.get('log_file')))
|
||||||
|
|
||||||
if backup_args.max_priority:
|
if backup_args.max_priority:
|
||||||
|
@ -86,7 +87,7 @@ def freezer_main(backup_args):
|
||||||
if hasattr(backup_args, 'trickle_command'):
|
if hasattr(backup_args, 'trickle_command'):
|
||||||
if "tricklecount" in os.environ:
|
if "tricklecount" in os.environ:
|
||||||
if int(os.environ.get("tricklecount")) > 1:
|
if int(os.environ.get("tricklecount")) > 1:
|
||||||
LOG.critical("[*] Trickle seems to be not working, Switching "
|
LOG.critical("Trickle seems to be not working, Switching "
|
||||||
"to normal mode ")
|
"to normal mode ")
|
||||||
return run_job(backup_args, storage)
|
return run_job(backup_args, storage)
|
||||||
|
|
||||||
|
@ -107,12 +108,16 @@ def freezer_main(backup_args):
|
||||||
utils.delete_file(backup_args.tmp_file)
|
utils.delete_file(backup_args.tmp_file)
|
||||||
|
|
||||||
if process.returncode:
|
if process.returncode:
|
||||||
LOG.warn("[*] Trickle Error: {0}".format(error))
|
LOG.warn("Trickle Error: {0}".format(error))
|
||||||
LOG.info("[*] Switching to work without trickle ...")
|
LOG.info("Switching to work without trickle ...")
|
||||||
return run_job(backup_args, storage)
|
return run_job(backup_args, storage)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
return run_job(backup_args, storage)
|
|
||||||
|
run_job(backup_args, storage)
|
||||||
|
|
||||||
|
if not backup_args.quiet:
|
||||||
|
LOG.info("End freezer agent process successfully")
|
||||||
|
|
||||||
|
|
||||||
def run_job(conf, storage):
|
def run_job(conf, storage):
|
||||||
|
@ -134,7 +139,7 @@ def run_job(conf, storage):
|
||||||
|
|
||||||
def fail(exit_code, e, quiet, do_log=True):
|
def fail(exit_code, e, quiet, do_log=True):
|
||||||
""" Catch the exceptions and write it to log """
|
""" Catch the exceptions and write it to log """
|
||||||
msg = '[*] Critical Error: {0}\n'.format(e)
|
msg = 'Critical Error: {0}\n'.format(e)
|
||||||
if not quiet:
|
if not quiet:
|
||||||
sys.stderr.write(msg)
|
sys.stderr.write(msg)
|
||||||
if do_log:
|
if do_log:
|
||||||
|
@ -189,6 +194,7 @@ def main():
|
||||||
"""freezer-agent binary main execution"""
|
"""freezer-agent binary main execution"""
|
||||||
backup_args = None
|
backup_args = None
|
||||||
try:
|
try:
|
||||||
|
|
||||||
freezer_config.config()
|
freezer_config.config()
|
||||||
freezer_config.setup_logging()
|
freezer_config.setup_logging()
|
||||||
backup_args = freezer_config.get_backup_args()
|
backup_args = freezer_config.get_backup_args()
|
||||||
|
@ -200,8 +206,11 @@ def main():
|
||||||
CONF.print_help()
|
CONF.print_help()
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
freezer_main(backup_args)
|
freezer_main(backup_args)
|
||||||
|
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
quiet = backup_args.quiet if backup_args else False
|
quiet = backup_args.quiet if backup_args else False
|
||||||
|
LOG.exception(err)
|
||||||
|
LOG.critical("End freezer agent process unsuccessfully")
|
||||||
return fail(1, err, quiet)
|
return fail(1, err, quiet)
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
|
@ -12,8 +12,12 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
from freezer.mode import mode
|
from freezer.mode import mode
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class FsMode(mode.Mode):
|
class FsMode(mode.Mode):
|
||||||
|
|
||||||
|
|
|
@ -15,6 +15,10 @@
|
||||||
import abc
|
import abc
|
||||||
import six
|
import six
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@six.add_metaclass(abc.ABCMeta)
|
@six.add_metaclass(abc.ABCMeta)
|
||||||
class Mode(object):
|
class Mode(object):
|
||||||
|
|
|
@ -12,10 +12,12 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import logging
|
from oslo_log import log
|
||||||
|
|
||||||
from freezer.mode import mode
|
from freezer.mode import mode
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class MongoDbMode(mode.Mode):
|
class MongoDbMode(mode.Mode):
|
||||||
"""
|
"""
|
||||||
|
@ -42,8 +44,8 @@ class MongoDbMode(mode.Mode):
|
||||||
except ImportError:
|
except ImportError:
|
||||||
raise ImportError('please install pymongo module')
|
raise ImportError('please install pymongo module')
|
||||||
|
|
||||||
logging.info('[*] MongoDB backup is being executed...')
|
LOG.info('MongoDB backup is being executed...')
|
||||||
logging.info('[*] Checking is the localhost is Master/Primary...')
|
LOG.info('Checking is the localhost is Master/Primary...')
|
||||||
# todo unhardcode this
|
# todo unhardcode this
|
||||||
mongodb_port = '27017'
|
mongodb_port = '27017'
|
||||||
local_hostname = conf.hostname
|
local_hostname = conf.hostname
|
||||||
|
@ -51,5 +53,5 @@ class MongoDbMode(mode.Mode):
|
||||||
mongo_client = pymongo.MongoClient(db_host_port)
|
mongo_client = pymongo.MongoClient(db_host_port)
|
||||||
master_dict = dict(mongo_client.admin.command("isMaster"))
|
master_dict = dict(mongo_client.admin.command("isMaster"))
|
||||||
if master_dict['me'] != master_dict['primary']:
|
if master_dict['me'] != master_dict['primary']:
|
||||||
raise Exception('[*] localhost {0} is not Master/Primary,\
|
raise Exception('localhost {0} is not Master/Primary,\
|
||||||
exiting...'.format(local_hostname))
|
exiting...'.format(local_hostname))
|
||||||
|
|
|
@ -12,9 +12,13 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
from freezer.mode import mode
|
from freezer.mode import mode
|
||||||
from freezer.utils import config
|
from freezer.utils import config
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class MysqlMode(mode.Mode):
|
class MysqlMode(mode.Mode):
|
||||||
"""
|
"""
|
||||||
|
@ -68,4 +72,4 @@ class MysqlMode(mode.Mode):
|
||||||
passwd=parsed_config.get("password", False))
|
passwd=parsed_config.get("password", False))
|
||||||
self.cursor = None
|
self.cursor = None
|
||||||
except Exception as error:
|
except Exception as error:
|
||||||
raise Exception('[*] MySQL: {0}'.format(error))
|
raise Exception('MySQL: {0}'.format(error))
|
||||||
|
|
|
@ -12,13 +12,16 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import logging
|
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
from freezer.mode import mode
|
from freezer.mode import mode
|
||||||
from freezer.utils import config
|
from freezer.utils import config
|
||||||
from freezer.utils import utils
|
from freezer.utils import utils
|
||||||
from freezer.utils import winutils
|
from freezer.utils import winutils
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class SqlserverMode(mode.Mode):
|
class SqlserverMode(mode.Mode):
|
||||||
"""
|
"""
|
||||||
|
@ -46,13 +49,13 @@ class SqlserverMode(mode.Mode):
|
||||||
""" Stop a SQL Server instance to
|
""" Stop a SQL Server instance to
|
||||||
perform the backup of the db files """
|
perform the backup of the db files """
|
||||||
|
|
||||||
logging.info('[*] Stopping SQL Server for backup')
|
LOG.info('Stopping SQL Server for backup')
|
||||||
with winutils.DisableFileSystemRedirection():
|
with winutils.DisableFileSystemRedirection():
|
||||||
cmd = 'net stop "SQL Server ({0})"'\
|
cmd = 'net stop "SQL Server ({0})"'\
|
||||||
.format(self.sql_server_instance)
|
.format(self.sql_server_instance)
|
||||||
(out, err) = utils.create_subprocess(cmd)
|
(out, err) = utils.create_subprocess(cmd)
|
||||||
if err != '':
|
if err != '':
|
||||||
raise Exception('[*] Error while stopping SQL Server,'
|
raise Exception('Error while stopping SQL Server,'
|
||||||
', error {0}'.format(err))
|
', error {0}'.format(err))
|
||||||
|
|
||||||
def start_sql_server(self):
|
def start_sql_server(self):
|
||||||
|
@ -63,9 +66,9 @@ class SqlserverMode(mode.Mode):
|
||||||
self.sql_server_instance)
|
self.sql_server_instance)
|
||||||
(out, err) = utils.create_subprocess(cmd)
|
(out, err) = utils.create_subprocess(cmd)
|
||||||
if err != '':
|
if err != '':
|
||||||
raise Exception('[*] Error while starting SQL Server'
|
raise Exception('Error while starting SQL Server'
|
||||||
', error {0}'.format(err))
|
', error {0}'.format(err))
|
||||||
logging.info('[*] SQL Server back to normal')
|
LOG.info('SQL Server back to normal')
|
||||||
|
|
||||||
def prepare(self):
|
def prepare(self):
|
||||||
self.stop_sql_server()
|
self.stop_sql_server()
|
||||||
|
|
|
@ -17,13 +17,11 @@ Freezer Backup modes related functions
|
||||||
"""
|
"""
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from freezer.utils import utils
|
from freezer.utils import utils
|
||||||
|
|
||||||
CONF = cfg.CONF
|
LOG = log.getLogger(__name__)
|
||||||
logging = log.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class BackupOs(object):
|
class BackupOs(object):
|
||||||
|
@ -66,15 +64,15 @@ class BackupOs(object):
|
||||||
try:
|
try:
|
||||||
image = glance.images.get(image_id)
|
image = glance.images.get(image_id)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.error(e)
|
LOG.error(e)
|
||||||
|
|
||||||
stream = client_manager.download_image(image)
|
stream = client_manager.download_image(image)
|
||||||
package = "{0}/{1}".format(instance_id, utils.DateTime.now().timestamp)
|
package = "{0}/{1}".format(instance_id, utils.DateTime.now().timestamp)
|
||||||
logging.info("[*] Uploading image to swift")
|
LOG.info("Uploading image to swift")
|
||||||
headers = {"x-object-meta-name": instance._info['name'],
|
headers = {"x-object-meta-name": instance._info['name'],
|
||||||
"x-object-meta-flavor-id": instance._info['flavor']['id']}
|
"x-object-meta-flavor-id": instance._info['flavor']['id']}
|
||||||
self.storage.add_stream(stream, package, headers)
|
self.storage.add_stream(stream, package, headers)
|
||||||
logging.info("[*] Deleting temporary image {0}".format(image))
|
LOG.info("Deleting temporary image {0}".format(image))
|
||||||
glance.images.delete(image.id)
|
glance.images.delete(image.id)
|
||||||
|
|
||||||
def backup_cinder_by_glance(self, volume_id):
|
def backup_cinder_by_glance(self, volume_id):
|
||||||
|
@ -89,24 +87,24 @@ class BackupOs(object):
|
||||||
cinder = client_manager.get_cinder()
|
cinder = client_manager.get_cinder()
|
||||||
|
|
||||||
volume = cinder.volumes.get(volume_id)
|
volume = cinder.volumes.get(volume_id)
|
||||||
logging.debug("Creation temporary snapshot")
|
LOG.debug("Creation temporary snapshot")
|
||||||
snapshot = client_manager.provide_snapshot(
|
snapshot = client_manager.provide_snapshot(
|
||||||
volume, "backup_snapshot_for_volume_%s" % volume_id)
|
volume, "backup_snapshot_for_volume_%s" % volume_id)
|
||||||
logging.debug("Creation temporary volume")
|
LOG.debug("Creation temporary volume")
|
||||||
copied_volume = client_manager.do_copy_volume(snapshot)
|
copied_volume = client_manager.do_copy_volume(snapshot)
|
||||||
logging.debug("Creation temporary glance image")
|
LOG.debug("Creation temporary glance image")
|
||||||
image = client_manager.make_glance_image("name", copied_volume)
|
image = client_manager.make_glance_image("name", copied_volume)
|
||||||
logging.debug("Download temporary glance image {0}".format(image.id))
|
LOG.debug("Download temporary glance image {0}".format(image.id))
|
||||||
stream = client_manager.download_image(image)
|
stream = client_manager.download_image(image)
|
||||||
package = "{0}/{1}".format(volume_id, utils.DateTime.now().timestamp)
|
package = "{0}/{1}".format(volume_id, utils.DateTime.now().timestamp)
|
||||||
logging.debug("Uploading image to swift")
|
LOG.debug("Uploading image to swift")
|
||||||
headers = {}
|
headers = {}
|
||||||
self.storage.add_stream(stream, package, headers=headers)
|
self.storage.add_stream(stream, package, headers=headers)
|
||||||
logging.debug("Deleting temporary snapshot")
|
LOG.debug("Deleting temporary snapshot")
|
||||||
client_manager.clean_snapshot(snapshot)
|
client_manager.clean_snapshot(snapshot)
|
||||||
logging.debug("Deleting temporary volume")
|
LOG.debug("Deleting temporary volume")
|
||||||
cinder.volumes.delete(copied_volume)
|
cinder.volumes.delete(copied_volume)
|
||||||
logging.debug("Deleting temporary image")
|
LOG.debug("Deleting temporary image")
|
||||||
client_manager.get_glance().images.delete(image.id)
|
client_manager.get_glance().images.delete(image.id)
|
||||||
|
|
||||||
def backup_cinder(self, volume_id, name=None, description=None):
|
def backup_cinder(self, volume_id, name=None, description=None):
|
||||||
|
|
|
@ -22,13 +22,11 @@ from glanceclient.client import Client as glance_client
|
||||||
from keystoneauth1 import loading
|
from keystoneauth1 import loading
|
||||||
from keystoneauth1 import session
|
from keystoneauth1 import session
|
||||||
from novaclient.client import Client as nova_client
|
from novaclient.client import Client as nova_client
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from freezer.utils import utils
|
from freezer.utils import utils
|
||||||
|
|
||||||
CONF = cfg.CONF
|
LOG = log.getLogger(__name__)
|
||||||
logging = log.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class OSClientManager(object):
|
class OSClientManager(object):
|
||||||
|
@ -194,11 +192,11 @@ class OSClientManager(object):
|
||||||
name=snapshot_name,
|
name=snapshot_name,
|
||||||
force=True)
|
force=True)
|
||||||
|
|
||||||
logging.debug("Snapshot for volume with id {0}".format(volume.id))
|
LOG.debug("Snapshot for volume with id {0}".format(volume.id))
|
||||||
|
|
||||||
while snapshot.status != "available":
|
while snapshot.status != "available":
|
||||||
try:
|
try:
|
||||||
logging.debug("Snapshot status: " + snapshot.status)
|
LOG.debug("Snapshot status: " + snapshot.status)
|
||||||
snapshot = self.get_cinder().volume_snapshots.get(snapshot.id)
|
snapshot = self.get_cinder().volume_snapshots.get(snapshot.id)
|
||||||
if snapshot.status == "error":
|
if snapshot.status == "error":
|
||||||
raise Exception("snapshot has error state")
|
raise Exception("snapshot has error state")
|
||||||
|
@ -206,7 +204,7 @@ class OSClientManager(object):
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if str(e) == "snapshot has error state":
|
if str(e) == "snapshot has error state":
|
||||||
raise e
|
raise e
|
||||||
logging.exception(e)
|
LOG.exception(e)
|
||||||
return snapshot
|
return snapshot
|
||||||
|
|
||||||
def do_copy_volume(self, snapshot):
|
def do_copy_volume(self, snapshot):
|
||||||
|
@ -221,12 +219,12 @@ class OSClientManager(object):
|
||||||
|
|
||||||
while volume.status != "available":
|
while volume.status != "available":
|
||||||
try:
|
try:
|
||||||
logging.info("[*] Volume copy status: " + volume.status)
|
LOG.info("Volume copy status: " + volume.status)
|
||||||
volume = self.get_cinder().volumes.get(volume.id)
|
volume = self.get_cinder().volumes.get(volume.id)
|
||||||
time.sleep(5)
|
time.sleep(5)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.exception(e)
|
LOG.exception(e)
|
||||||
logging.warn("[*] Exception getting volume status")
|
LOG.warn("Exception getting volume status")
|
||||||
return volume
|
return volume
|
||||||
|
|
||||||
def make_glance_image(self, image_volume_name, copy_volume):
|
def make_glance_image(self, image_volume_name, copy_volume):
|
||||||
|
@ -246,15 +244,15 @@ class OSClientManager(object):
|
||||||
while image.status != "active":
|
while image.status != "active":
|
||||||
try:
|
try:
|
||||||
time.sleep(5)
|
time.sleep(5)
|
||||||
logging.info("Image status: " + image.status)
|
LOG.info("Image status: " + image.status)
|
||||||
image = self.get_glance().images.get(image.id)
|
image = self.get_glance().images.get(image.id)
|
||||||
if image.status in ("killed", "deleted"):
|
if image.status in ("killed", "deleted"):
|
||||||
raise Exception("Image have killed state")
|
raise Exception("Image have killed state")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if image.status in ("killed", "deleted"):
|
if image.status in ("killed", "deleted"):
|
||||||
raise e
|
raise e
|
||||||
logging.exception(e)
|
LOG.exception(e)
|
||||||
logging.warn("Exception getting image status")
|
LOG.warn("Exception getting image status")
|
||||||
return image
|
return image
|
||||||
|
|
||||||
def clean_snapshot(self, snapshot):
|
def clean_snapshot(self, snapshot):
|
||||||
|
@ -262,7 +260,7 @@ class OSClientManager(object):
|
||||||
Deletes snapshot
|
Deletes snapshot
|
||||||
:param snapshot: snapshot name
|
:param snapshot: snapshot name
|
||||||
"""
|
"""
|
||||||
logging.info("[*] Deleting existed snapshot: " + snapshot.id)
|
LOG.info("Deleting existed snapshot: " + snapshot.id)
|
||||||
self.get_cinder().volume_snapshots.delete(snapshot)
|
self.get_cinder().volume_snapshots.delete(snapshot)
|
||||||
|
|
||||||
def download_image(self, image):
|
def download_image(self, image):
|
||||||
|
@ -271,9 +269,9 @@ class OSClientManager(object):
|
||||||
:param image: Image object for downloading
|
:param image: Image object for downloading
|
||||||
:return: stream of image data
|
:return: stream of image data
|
||||||
"""
|
"""
|
||||||
logging.debug("Download image enter")
|
LOG.debug("Download image enter")
|
||||||
stream = self.get_glance().images.data(image.id)
|
stream = self.get_glance().images.data(image.id)
|
||||||
logging.debug("Stream with size {0}".format(image.size))
|
LOG.debug("Stream with size {0}".format(image.size))
|
||||||
return utils.ReSizeStream(stream, image.size, 1000000)
|
return utils.ReSizeStream(stream, image.size, 1000000)
|
||||||
|
|
||||||
|
|
||||||
|
@ -365,10 +363,10 @@ class OpenstackOpts(object):
|
||||||
'Generated from auth_url: {1}'
|
'Generated from auth_url: {1}'
|
||||||
.format(version, auth_url))
|
.format(version, auth_url))
|
||||||
|
|
||||||
logging.info('Authenticating with Keystone version: '
|
LOG.info('Authenticating with Keystone version: '
|
||||||
'{0}, auth_url: {1}, username: {2}, project: {3}'.
|
'{0}, auth_url: {1}, username: {2}, project: {3}'.
|
||||||
format(self.auth_version, self.auth_url,
|
format(self.auth_version, self.auth_url,
|
||||||
self.username, self.project_name))
|
self.username, self.project_name))
|
||||||
|
|
||||||
def get_opts_dicts(self):
|
def get_opts_dicts(self):
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -16,13 +16,11 @@ limitations under the License.
|
||||||
Freezer restore modes related functions
|
Freezer restore modes related functions
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from freezer.utils import utils
|
from freezer.utils import utils
|
||||||
|
|
||||||
CONF = cfg.CONF
|
LOG = log.getLogger(__name__)
|
||||||
logging = log.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class RestoreOs(object):
|
class RestoreOs(object):
|
||||||
|
@ -46,7 +44,7 @@ class RestoreOs(object):
|
||||||
|
|
||||||
if not backups:
|
if not backups:
|
||||||
msg = "Cannot find backups for path: %s" % path
|
msg = "Cannot find backups for path: %s" % path
|
||||||
logging.error(msg)
|
LOG.error(msg)
|
||||||
raise BaseException(msg)
|
raise BaseException(msg)
|
||||||
return backups[-1]
|
return backups[-1]
|
||||||
|
|
||||||
|
@ -63,7 +61,7 @@ class RestoreOs(object):
|
||||||
stream = swift.get_object(self.container, "%s/%s" % (path, backup),
|
stream = swift.get_object(self.container, "%s/%s" % (path, backup),
|
||||||
resp_chunk_size=10000000)
|
resp_chunk_size=10000000)
|
||||||
length = int(stream[0]["x-object-meta-length"])
|
length = int(stream[0]["x-object-meta-length"])
|
||||||
logging.info("[*] Creation glance image")
|
LOG.info("Creation glance image")
|
||||||
image = glance.images.create(
|
image = glance.images.create(
|
||||||
data=utils.ReSizeStream(stream[1], length, 1),
|
data=utils.ReSizeStream(stream[1], length, 1),
|
||||||
container_format="bare", disk_format="raw")
|
container_format="bare", disk_format="raw")
|
||||||
|
@ -83,8 +81,8 @@ class RestoreOs(object):
|
||||||
x.created_at.split('.')[0]) >= restore_from_timestamp])
|
x.created_at.split('.')[0]) >= restore_from_timestamp])
|
||||||
|
|
||||||
if not backups_filter:
|
if not backups_filter:
|
||||||
logging.warning("no available backups for cinder volume,"
|
LOG.warning("no available backups for cinder volume,"
|
||||||
"restore newest backup")
|
"restore newest backup")
|
||||||
backup = max(backups, key=lambda x: x.created_at)
|
backup = max(backups, key=lambda x: x.created_at)
|
||||||
cinder.restores.restore(backup_id=backup.id)
|
cinder.restores.restore(backup_id=backup.id)
|
||||||
else:
|
else:
|
||||||
|
@ -107,10 +105,10 @@ class RestoreOs(object):
|
||||||
size = length / gb
|
size = length / gb
|
||||||
if length % gb > 0:
|
if length % gb > 0:
|
||||||
size += 1
|
size += 1
|
||||||
logging.info("[*] Creation volume from image")
|
LOG.info("Creation volume from image")
|
||||||
self.client_manager.get_cinder().volumes.create(size,
|
self.client_manager.get_cinder().volumes.create(size,
|
||||||
imageRef=image.id)
|
imageRef=image.id)
|
||||||
logging.info("[*] Deleting temporary image")
|
LOG.info("Deleting temporary image")
|
||||||
self.client_manager.get_glance().images.delete(image)
|
self.client_manager.get_glance().images.delete(image)
|
||||||
|
|
||||||
def restore_nova(self, instance_id, restore_from_timestamp):
|
def restore_nova(self, instance_id, restore_from_timestamp):
|
||||||
|
@ -123,5 +121,5 @@ class RestoreOs(object):
|
||||||
(info, image) = self._create_image(instance_id, restore_from_timestamp)
|
(info, image) = self._create_image(instance_id, restore_from_timestamp)
|
||||||
nova = self.client_manager.get_nova()
|
nova = self.client_manager.get_nova()
|
||||||
flavor = nova.flavors.get(info['x-object-meta-flavor-id'])
|
flavor = nova.flavors.get(info['x-object-meta-flavor-id'])
|
||||||
logging.info("[*] Creation an instance")
|
LOG.info("Creation an instance")
|
||||||
nova.servers.create(info['x-object-meta-name'], image, flavor)
|
nova.servers.create(info['x-object-meta-name'], image, flavor)
|
||||||
|
|
|
@ -95,7 +95,7 @@ class NoDaemon(object):
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def handle_program_exit(signum, frame):
|
def handle_program_exit(signum, frame):
|
||||||
logging.info('[*] Got signal {0}. Exiting ...'.format(signum))
|
logging.info('Got signal {0}. Exiting ...'.format(signum))
|
||||||
NoDaemon.exit_flag = True
|
NoDaemon.exit_flag = True
|
||||||
NoDaemon.instance.daemonizable.stop()
|
NoDaemon.instance.daemonizable.stop()
|
||||||
|
|
||||||
|
@ -107,16 +107,16 @@ class NoDaemon(object):
|
||||||
setup_logging(log_file)
|
setup_logging(log_file)
|
||||||
while not NoDaemon.exit_flag:
|
while not NoDaemon.exit_flag:
|
||||||
try:
|
try:
|
||||||
logging.info('[*] Starting in no-daemon mode')
|
logging.info('Starting in no-daemon mode')
|
||||||
self.daemonizable.start()
|
self.daemonizable.start()
|
||||||
NoDaemon.exit_flag = True
|
NoDaemon.exit_flag = True
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if dump_stack_trace:
|
if dump_stack_trace:
|
||||||
logging.error(traceback.format_exc(e))
|
logging.error(traceback.format_exc(e))
|
||||||
logging.error('[*] Restarting procedure in no-daemon mode '
|
logging.error('Restarting procedure in no-daemon mode '
|
||||||
'after Fatal Error: {0}'.format(e))
|
'after Fatal Error: {0}'.format(e))
|
||||||
sleep(10)
|
sleep(10)
|
||||||
logging.info('[*] Done exiting')
|
logging.info('Done exiting')
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
pass
|
pass
|
||||||
|
@ -178,17 +178,17 @@ class Daemon(object):
|
||||||
setup_logging(log_file)
|
setup_logging(log_file)
|
||||||
while not Daemon.exit_flag:
|
while not Daemon.exit_flag:
|
||||||
try:
|
try:
|
||||||
logging.info('[*] freezer daemon starting, pid: {0}'.
|
logging.info('freezer daemon starting, pid: {0}'.
|
||||||
format(self.pid))
|
format(self.pid))
|
||||||
self.daemonizable.start()
|
self.daemonizable.start()
|
||||||
Daemon.exit_flag = True
|
Daemon.exit_flag = True
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if dump_stack_trace:
|
if dump_stack_trace:
|
||||||
logging.error(traceback.format_exc(e))
|
logging.error(traceback.format_exc(e))
|
||||||
logging.error('[*] Restarting daemonized procedure '
|
logging.error('Restarting daemonized procedure '
|
||||||
'after Fatal Error: {0}'.format(e))
|
'after Fatal Error: {0}'.format(e))
|
||||||
sleep(10)
|
sleep(10)
|
||||||
logging.info('[*] freezer daemon done, pid: {0}'.format(self.pid))
|
logging.info('freezer daemon done, pid: {0}'.format(self.pid))
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
pid = self.pid
|
pid = self.pid
|
||||||
|
|
|
@ -114,7 +114,7 @@ class FreezerScheduler(object):
|
||||||
try:
|
try:
|
||||||
return self.client.jobs.update(job_id, job_doc)
|
return self.client.jobs.update(job_id, job_doc)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error("[*] Job update error: {0}".format(e))
|
LOG.error("Job update error: {0}".format(e))
|
||||||
|
|
||||||
def update_job_status(self, job_id, status):
|
def update_job_status(self, job_id, status):
|
||||||
doc = {'job_schedule': {'status': status}}
|
doc = {'job_schedule': {'status': status}}
|
||||||
|
@ -134,7 +134,7 @@ class FreezerScheduler(object):
|
||||||
try:
|
try:
|
||||||
work_job_doc_list = self.get_jobs()
|
work_job_doc_list = self.get_jobs()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error("[*] Unable to get jobs: {0}".format(e))
|
LOG.error("Unable to get jobs: {0}".format(e))
|
||||||
return
|
return
|
||||||
|
|
||||||
work_job_id_list = []
|
work_job_id_list = []
|
||||||
|
|
|
@ -29,7 +29,7 @@ from six.moves import configparser
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
logging = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class StopState(object):
|
class StopState(object):
|
||||||
|
@ -133,13 +133,13 @@ class Job(object):
|
||||||
def create(scheduler, executable, job_doc):
|
def create(scheduler, executable, job_doc):
|
||||||
job = Job(scheduler, executable, job_doc)
|
job = Job(scheduler, executable, job_doc)
|
||||||
if job.job_doc_status in ['running', 'scheduled']:
|
if job.job_doc_status in ['running', 'scheduled']:
|
||||||
logging.warning('Resetting {0} status from job {1}'
|
LOG.warning('Resetting {0} status from job {1}'
|
||||||
.format(job.job_doc_status, job.id))
|
.format(job.job_doc_status, job.id))
|
||||||
if job.job_doc_status == 'stop' and not job.event:
|
if job.job_doc_status == 'stop' and not job.event:
|
||||||
logging.info('Job {0} was stopped.'.format(job.id))
|
LOG.info('Job {0} was stopped.'.format(job.id))
|
||||||
job.event = Job.STOP_EVENT
|
job.event = Job.STOP_EVENT
|
||||||
elif not job.event:
|
elif not job.event:
|
||||||
logging.info('Autostart Job {0}'.format(job.id))
|
LOG.info('Autostart Job {0}'.format(job.id))
|
||||||
job.event = Job.START_EVENT
|
job.event = Job.START_EVENT
|
||||||
return job
|
return job
|
||||||
|
|
||||||
|
@ -153,7 +153,7 @@ class Job(object):
|
||||||
def remove(self):
|
def remove(self):
|
||||||
with self.scheduler.lock:
|
with self.scheduler.lock:
|
||||||
# delegate to state object
|
# delegate to state object
|
||||||
logging.info('REMOVE job {0}'.format(self.id))
|
LOG.info('REMOVE job {0}'.format(self.id))
|
||||||
self.state.remove(self)
|
self.state.remove(self)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
@ -262,13 +262,13 @@ class Job(object):
|
||||||
next_event = job_doc['job_schedule'].get('event', '')
|
next_event = job_doc['job_schedule'].get('event', '')
|
||||||
while next_event:
|
while next_event:
|
||||||
if next_event == Job.STOP_EVENT:
|
if next_event == Job.STOP_EVENT:
|
||||||
logging.info('JOB {0} event: STOP'.format(self.id))
|
LOG.info('JOB {0} event: STOP'.format(self.id))
|
||||||
next_event = self.state.stop(self, job_doc)
|
next_event = self.state.stop(self, job_doc)
|
||||||
elif next_event == Job.START_EVENT:
|
elif next_event == Job.START_EVENT:
|
||||||
logging.info('JOB {0} event: START'.format(self.id))
|
LOG.info('JOB {0} event: START'.format(self.id))
|
||||||
next_event = self.state.start(self, job_doc)
|
next_event = self.state.start(self, job_doc)
|
||||||
elif next_event == Job.ABORT_EVENT:
|
elif next_event == Job.ABORT_EVENT:
|
||||||
logging.info('JOB {0} event: ABORT'.format(self.id))
|
LOG.info('JOB {0} event: ABORT'.format(self.id))
|
||||||
next_event = self.state.abort(self, job_doc)
|
next_event = self.state.abort(self, job_doc)
|
||||||
|
|
||||||
def upload_metadata(self, metadata_string):
|
def upload_metadata(self, metadata_string):
|
||||||
|
@ -277,10 +277,10 @@ class Job(object):
|
||||||
if metadata:
|
if metadata:
|
||||||
metadata['job_id'] = self.id
|
metadata['job_id'] = self.id
|
||||||
self.scheduler.upload_metadata(metadata)
|
self.scheduler.upload_metadata(metadata)
|
||||||
logging.info("[*] Job {0}, freezer action metadata uploaded"
|
LOG.info("Job {0}, freezer action metadata uploaded"
|
||||||
.format(self.id))
|
.format(self.id))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.error('[*] metrics upload error: {0}'.format(e))
|
LOG.error('metrics upload error: {0}'.format(e))
|
||||||
|
|
||||||
def execute_job_action(self, job_action):
|
def execute_job_action(self, job_action):
|
||||||
max_retries = job_action.get('max_retries', 1)
|
max_retries = job_action.get('max_retries', 1)
|
||||||
|
@ -305,7 +305,7 @@ class Job(object):
|
||||||
utils.delete_file(config_file_name)
|
utils.delete_file(config_file_name)
|
||||||
|
|
||||||
if error:
|
if error:
|
||||||
logging.error("[*] Freezer client error: {0}".format(error))
|
LOG.error("Freezer client error: {0}".format(error))
|
||||||
elif output:
|
elif output:
|
||||||
self.upload_metadata(output)
|
self.upload_metadata(output)
|
||||||
|
|
||||||
|
@ -313,21 +313,21 @@ class Job(object):
|
||||||
# ERROR
|
# ERROR
|
||||||
tries -= 1
|
tries -= 1
|
||||||
if tries:
|
if tries:
|
||||||
logging.warning('[*] Job {0} failed {1} action,'
|
LOG.warning('Job {0} failed {1} action,'
|
||||||
' retrying in {2} seconds'
|
' retrying in {2} seconds'
|
||||||
.format(self.id, action_name,
|
.format(self.id, action_name,
|
||||||
max_retries_interval))
|
max_retries_interval))
|
||||||
# sleeping with the bloody lock, but we don't want other
|
# sleeping with the bloody lock, but we don't want other
|
||||||
# actions to mess with our stuff like fs snapshots, do we ?
|
# actions to mess with our stuff like fs snapshots, do we ?
|
||||||
time.sleep(max_retries_interval)
|
time.sleep(max_retries_interval)
|
||||||
else:
|
else:
|
||||||
# SUCCESS
|
# SUCCESS
|
||||||
logging.info('[*] Job {0} action {1}'
|
LOG.info('Job {0} action {1}'
|
||||||
' returned success exit code'.
|
' returned success exit code'.
|
||||||
format(self.id, action_name))
|
format(self.id, action_name))
|
||||||
return Job.SUCCESS_RESULT
|
return Job.SUCCESS_RESULT
|
||||||
logging.error('[*] Job {0} action {1} failed after {2} tries'
|
LOG.error('Job {0} action {1} failed after {2} tries'
|
||||||
.format(self.id, action_name, max_retries))
|
.format(self.id, action_name, max_retries))
|
||||||
|
|
||||||
return Job.FAIL_RESULT
|
return Job.FAIL_RESULT
|
||||||
|
|
||||||
|
@ -344,7 +344,7 @@ class Job(object):
|
||||||
result = Job.SUCCESS_RESULT
|
result = Job.SUCCESS_RESULT
|
||||||
with self.scheduler.execution_lock:
|
with self.scheduler.execution_lock:
|
||||||
with self.scheduler.lock:
|
with self.scheduler.lock:
|
||||||
logging.info('job {0} running'.format(self.id))
|
LOG.info('job {0} running'.format(self.id))
|
||||||
self.state = RunningState
|
self.state = RunningState
|
||||||
self.job_doc_status = Job.RUNNING_STATUS
|
self.job_doc_status = Job.RUNNING_STATUS
|
||||||
self.scheduler.update_job_status(self.id, self.job_doc_status)
|
self.scheduler.update_job_status(self.id, self.job_doc_status)
|
||||||
|
@ -353,9 +353,9 @@ class Job(object):
|
||||||
# if the job contains exec action and the scheduler passes the
|
# if the job contains exec action and the scheduler passes the
|
||||||
# parameter --disable-exec job execution should fail
|
# parameter --disable-exec job execution should fail
|
||||||
if self.contains_exec() and CONF.disable_exec:
|
if self.contains_exec() and CONF.disable_exec:
|
||||||
logging.info("Job {0} failed because it contains exec action "
|
LOG.info("Job {0} failed because it contains exec action "
|
||||||
"and exec actions are disabled by scheduler"
|
"and exec actions are disabled by scheduler"
|
||||||
.format(self.id))
|
.format(self.id))
|
||||||
self.result = Job.FAIL_RESULT
|
self.result = Job.FAIL_RESULT
|
||||||
self.finish()
|
self.finish()
|
||||||
return
|
return
|
||||||
|
@ -369,8 +369,8 @@ class Job(object):
|
||||||
else:
|
else:
|
||||||
freezer_action = job_action.get('freezer_action', {})
|
freezer_action = job_action.get('freezer_action', {})
|
||||||
action_name = freezer_action.get('action', '')
|
action_name = freezer_action.get('action', '')
|
||||||
logging.warning("[*]skipping {0} action".
|
LOG.warning("skipping {0} action".
|
||||||
format(action_name))
|
format(action_name))
|
||||||
self.result = result
|
self.result = result
|
||||||
self.finish()
|
self.finish()
|
||||||
|
|
||||||
|
@ -410,12 +410,12 @@ class Job(object):
|
||||||
self.session_tag = resp['session_tag']
|
self.session_tag = resp['session_tag']
|
||||||
return
|
return
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.error('[*]Error while starting session {0}. {1}'.
|
LOG.error('Error while starting session {0}. {1}'.
|
||||||
format(self.session_id, e))
|
format(self.session_id, e))
|
||||||
logging.warning('[*]Retrying to start session {0}'.
|
LOG.warning('Retrying to start session {0}'.
|
||||||
format(self.session_id))
|
format(self.session_id))
|
||||||
retry -= 1
|
retry -= 1
|
||||||
logging.error('[*]Unable to start session {0}'.format(self.session_id))
|
LOG.error('Unable to start session {0}'.format(self.session_id))
|
||||||
|
|
||||||
def end_session(self, result):
|
def end_session(self, result):
|
||||||
if not self.session_id:
|
if not self.session_id:
|
||||||
|
@ -430,22 +430,22 @@ class Job(object):
|
||||||
if resp['result'] == 'success':
|
if resp['result'] == 'success':
|
||||||
return
|
return
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.error('[*]Error while ending session {0}. {1}'.
|
LOG.error('Error while ending session {0}. {1}'.
|
||||||
format(self.session_id, e))
|
format(self.session_id, e))
|
||||||
logging.warning('[*]Retrying to end session {0}'.
|
LOG.warning('Retrying to end session {0}'.
|
||||||
format(self.session_id))
|
format(self.session_id))
|
||||||
retry -= 1
|
retry -= 1
|
||||||
logging.error('[*]Unable to end session {0}'.format(self.session_id))
|
LOG.error('Unable to end session {0}'.format(self.session_id))
|
||||||
|
|
||||||
def schedule(self):
|
def schedule(self):
|
||||||
try:
|
try:
|
||||||
kwargs = self.get_schedule_args()
|
kwargs = self.get_schedule_args()
|
||||||
self.scheduler.add_job(self.execute, id=self.id, **kwargs)
|
self.scheduler.add_job(self.execute, id=self.id, **kwargs)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.error("[*] Unable to schedule job {0}: {1}".
|
LOG.error("Unable to schedule job {0}: {1}".
|
||||||
format(self.id, e))
|
format(self.id, e))
|
||||||
|
|
||||||
logging.info('scheduler job with parameters {0}'.format(kwargs))
|
LOG.info('scheduler job with parameters {0}'.format(kwargs))
|
||||||
|
|
||||||
if self.scheduled:
|
if self.scheduled:
|
||||||
self.job_doc_status = Job.SCHEDULED_STATUS
|
self.job_doc_status = Job.SCHEDULED_STATUS
|
||||||
|
|
|
@ -16,15 +16,18 @@ limitations under the License.
|
||||||
Freezer LVM related functions
|
Freezer LVM related functions
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import subprocess
|
import subprocess
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
from freezer.common import config as freezer_config
|
from freezer.common import config as freezer_config
|
||||||
from freezer.utils import utils
|
from freezer.utils import utils
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def lvm_snap_remove(backup_opt_dict):
|
def lvm_snap_remove(backup_opt_dict):
|
||||||
"""
|
"""
|
||||||
|
@ -39,12 +42,12 @@ def lvm_snap_remove(backup_opt_dict):
|
||||||
try:
|
try:
|
||||||
_umount(backup_opt_dict.lvm_dirmount)
|
_umount(backup_opt_dict.lvm_dirmount)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.warning("Snapshot unmount errror: {0}".format(e))
|
LOG.warning("Snapshot unmount errror: {0}".format(e))
|
||||||
lv = os.path.join('/dev',
|
lv = os.path.join('/dev',
|
||||||
backup_opt_dict.lvm_volgroup,
|
backup_opt_dict.lvm_volgroup,
|
||||||
backup_opt_dict.lvm_snapname)
|
backup_opt_dict.lvm_snapname)
|
||||||
_lvremove(lv)
|
_lvremove(lv)
|
||||||
logging.info('[*] Snapshot volume {0} removed'.format(lv))
|
LOG.info('Snapshot volume {0} removed'.format(lv))
|
||||||
|
|
||||||
|
|
||||||
def get_vol_fs_type(vol_name):
|
def get_vol_fs_type(vol_name):
|
||||||
|
@ -54,8 +57,8 @@ def get_vol_fs_type(vol_name):
|
||||||
file system type
|
file system type
|
||||||
"""
|
"""
|
||||||
if os.path.exists(vol_name) is False:
|
if os.path.exists(vol_name) is False:
|
||||||
err = '[*] Provided volume name not found: {0} '.format(vol_name)
|
err = 'Provided volume name not found: {0} '.format(vol_name)
|
||||||
logging.exception(err)
|
LOG.exception(err)
|
||||||
raise Exception(err)
|
raise Exception(err)
|
||||||
|
|
||||||
file_cmd = '{0} -0 -bLs --no-pad --no-buffer --preserve-date \
|
file_cmd = '{0} -0 -bLs --no-pad --no-buffer --preserve-date \
|
||||||
|
@ -67,12 +70,12 @@ def get_vol_fs_type(vol_name):
|
||||||
(file_out, file_err) = file_process.communicate()
|
(file_out, file_err) = file_process.communicate()
|
||||||
file_match = re.search(r'(\S+?) filesystem data', file_out, re.I)
|
file_match = re.search(r'(\S+?) filesystem data', file_out, re.I)
|
||||||
if file_match is None:
|
if file_match is None:
|
||||||
err = '[*] File system type not guessable: {0}'.format(file_err)
|
err = 'File system type not guessable: {0}'.format(file_err)
|
||||||
logging.exception(err)
|
LOG.exception(err)
|
||||||
raise Exception(err)
|
raise Exception(err)
|
||||||
else:
|
else:
|
||||||
filesys_type = file_match.group(1)
|
filesys_type = file_match.group(1)
|
||||||
logging.info('[*] File system {0} found for volume {1}'.format(
|
LOG.info('File system {0} found for volume {1}'.format(
|
||||||
filesys_type, vol_name))
|
filesys_type, vol_name))
|
||||||
return filesys_type.lower().strip()
|
return filesys_type.lower().strip()
|
||||||
|
|
||||||
|
@ -115,7 +118,7 @@ def lvm_snap(backup_opt_dict):
|
||||||
lvm_info['snap_path'])
|
lvm_info['snap_path'])
|
||||||
|
|
||||||
if not validate_lvm_params(backup_opt_dict):
|
if not validate_lvm_params(backup_opt_dict):
|
||||||
logging.info('[*] No LVM requested/configured')
|
LOG.info('No LVM requested/configured')
|
||||||
return False
|
return False
|
||||||
|
|
||||||
utils.create_dir(backup_opt_dict.lvm_dirmount)
|
utils.create_dir(backup_opt_dict.lvm_dirmount)
|
||||||
|
@ -145,9 +148,9 @@ def lvm_snap(backup_opt_dict):
|
||||||
if lvm_process.returncode:
|
if lvm_process.returncode:
|
||||||
raise Exception('lvm snapshot creation error: {0}'.format(lvm_err))
|
raise Exception('lvm snapshot creation error: {0}'.format(lvm_err))
|
||||||
|
|
||||||
logging.debug('[*] {0}'.format(lvm_out))
|
LOG.debug('{0}'.format(lvm_out))
|
||||||
logging.warning('[*] Logical volume "{0}" created'.
|
LOG.warning('Logical volume "{0}" created'.
|
||||||
format(backup_opt_dict.lvm_snapname))
|
format(backup_opt_dict.lvm_snapname))
|
||||||
|
|
||||||
# Guess the file system of the provided source volume and st mount
|
# Guess the file system of the provided source volume and st mount
|
||||||
# options accordingly
|
# options accordingly
|
||||||
|
@ -170,16 +173,16 @@ def lvm_snap(backup_opt_dict):
|
||||||
executable=utils.find_executable('bash'))
|
executable=utils.find_executable('bash'))
|
||||||
mount_err = mount_process.communicate()[1]
|
mount_err = mount_process.communicate()[1]
|
||||||
if 'already mounted' in mount_err:
|
if 'already mounted' in mount_err:
|
||||||
logging.warning('[*] Volume {0} already mounted on {1}\
|
LOG.warning('Volume {0} already mounted on {1}\
|
||||||
'.format(abs_snap_name, backup_opt_dict.lvm_dirmount))
|
'.format(abs_snap_name, backup_opt_dict.lvm_dirmount))
|
||||||
return True
|
return True
|
||||||
if mount_err:
|
if mount_err:
|
||||||
logging.error("[*] Snapshot mount error. Removing snapshot")
|
LOG.error("Snapshot mount error. Removing snapshot")
|
||||||
lvm_snap_remove(backup_opt_dict)
|
lvm_snap_remove(backup_opt_dict)
|
||||||
raise Exception('lvm snapshot mounting error: {0}'.format(mount_err))
|
raise Exception('lvm snapshot mounting error: {0}'.format(mount_err))
|
||||||
else:
|
else:
|
||||||
logging.warning(
|
LOG.warning(
|
||||||
'[*] Volume {0} succesfully mounted on {1}'.format(
|
'Volume {0} succesfully mounted on {1}'.format(
|
||||||
abs_snap_name, backup_opt_dict.lvm_dirmount))
|
abs_snap_name, backup_opt_dict.lvm_dirmount))
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
@ -264,11 +267,11 @@ def validate_lvm_params(backup_opt_dict):
|
||||||
True snapshot is requested and parameters are valid
|
True snapshot is requested and parameters are valid
|
||||||
"""
|
"""
|
||||||
if backup_opt_dict.lvm_snapperm not in ('ro', 'rw'):
|
if backup_opt_dict.lvm_snapperm not in ('ro', 'rw'):
|
||||||
raise ValueError('[*] Error: Invalid value for option lvm-snap-perm: '
|
raise ValueError('Error: Invalid value for option lvm-snap-perm: '
|
||||||
'{}'.format(backup_opt_dict.lvm_snapperm))
|
'{}'.format(backup_opt_dict.lvm_snapperm))
|
||||||
|
|
||||||
if not backup_opt_dict.path_to_backup:
|
if not backup_opt_dict.path_to_backup:
|
||||||
raise ValueError('[*] Error: no path-to-backup and '
|
raise ValueError('Error: no path-to-backup and '
|
||||||
'no lvm-auto-snap provided')
|
'no lvm-auto-snap provided')
|
||||||
|
|
||||||
if not backup_opt_dict.lvm_srcvol and not backup_opt_dict.lvm_volgroup:
|
if not backup_opt_dict.lvm_srcvol and not backup_opt_dict.lvm_volgroup:
|
||||||
|
@ -276,24 +279,24 @@ def validate_lvm_params(backup_opt_dict):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if not backup_opt_dict.lvm_srcvol:
|
if not backup_opt_dict.lvm_srcvol:
|
||||||
raise ValueError('[*] Error: no lvm-srcvol and '
|
raise ValueError('Error: no lvm-srcvol and '
|
||||||
'no lvm-auto-snap provided')
|
'no lvm-auto-snap provided')
|
||||||
if not backup_opt_dict.lvm_volgroup:
|
if not backup_opt_dict.lvm_volgroup:
|
||||||
raise ValueError('[*] Error: no lvm-volgroup and '
|
raise ValueError('Error: no lvm-volgroup and '
|
||||||
'no lvm-auto-snap provided')
|
'no lvm-auto-snap provided')
|
||||||
|
|
||||||
logging.info('[*] Source LVM Volume: {0}'.format(
|
LOG.info('Source LVM Volume: {0}'.format(
|
||||||
backup_opt_dict.lvm_srcvol))
|
backup_opt_dict.lvm_srcvol))
|
||||||
logging.info('[*] LVM Volume Group: {0}'.format(
|
LOG.info('LVM Volume Group: {0}'.format(
|
||||||
backup_opt_dict.lvm_volgroup))
|
backup_opt_dict.lvm_volgroup))
|
||||||
logging.info('[*] Snapshot name: {0}'.format(
|
LOG.info('Snapshot name: {0}'.format(
|
||||||
backup_opt_dict.lvm_snapname))
|
backup_opt_dict.lvm_snapname))
|
||||||
logging.info('[*] Snapshot size: {0}'.format(
|
LOG.info('Snapshot size: {0}'.format(
|
||||||
backup_opt_dict.lvm_snapsize))
|
backup_opt_dict.lvm_snapsize))
|
||||||
logging.info('[*] Directory where the lvm snaphost will be mounted on:'
|
LOG.info('Directory where the lvm snaphost will be mounted on:'
|
||||||
' {0}'.format(backup_opt_dict.lvm_dirmount.strip()))
|
' {0}'.format(backup_opt_dict.lvm_dirmount.strip()))
|
||||||
logging.info('[*] Path to backup (including snapshot): {0}'
|
LOG.info('Path to backup (including snapshot): {0}'
|
||||||
.format(backup_opt_dict.path_to_backup))
|
.format(backup_opt_dict.path_to_backup))
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
@ -312,7 +315,7 @@ def _umount(path):
|
||||||
raise Exception('Impossible to umount {0}. {1}'
|
raise Exception('Impossible to umount {0}. {1}'
|
||||||
.format(path, mount_err))
|
.format(path, mount_err))
|
||||||
os.rmdir(path)
|
os.rmdir(path)
|
||||||
logging.info('[*] Volume {0} unmounted'.format(path))
|
LOG.info('Volume {0} unmounted'.format(path))
|
||||||
|
|
||||||
|
|
||||||
def _lvremove(lv):
|
def _lvremove(lv):
|
||||||
|
@ -326,8 +329,8 @@ def _lvremove(lv):
|
||||||
output, error = lvremove_proc.communicate()
|
output, error = lvremove_proc.communicate()
|
||||||
if lvremove_proc.returncode:
|
if lvremove_proc.returncode:
|
||||||
if "contains a filesystem in use" in error:
|
if "contains a filesystem in use" in error:
|
||||||
logging.warning("Couldn't remove volume {0}. "
|
LOG.warning("Couldn't remove volume {0}. "
|
||||||
"It is still in use.".format(lv))
|
"It is still in use.".format(lv))
|
||||||
log_volume_holding_process(lv)
|
log_volume_holding_process(lv)
|
||||||
else:
|
else:
|
||||||
break
|
break
|
||||||
|
@ -349,7 +352,7 @@ def log_volume_holding_process(lv):
|
||||||
# lsof is quite long, so no need to add a sleep here
|
# lsof is quite long, so no need to add a sleep here
|
||||||
utils.find_executable('lsof'), dev_id[0], dev_id[1])
|
utils.find_executable('lsof'), dev_id[0], dev_id[1])
|
||||||
process = subprocess.check_output([command], shell=True)
|
process = subprocess.check_output([command], shell=True)
|
||||||
logging.warning("Process holding the volume is '{}'".format(process))
|
LOG.warning("Process holding the volume is '{}'".format(process))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.warning("Could not get informations on the process holding the"
|
LOG.warning("Could not get informations on the process holding the"
|
||||||
" volume: {}".format(str(e)))
|
" volume: {}".format(str(e)))
|
||||||
|
|
|
@ -16,16 +16,13 @@ limitations under the License.
|
||||||
Freezer Backup modes related functions
|
Freezer Backup modes related functions
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from freezer.snapshot import lvm
|
from freezer.snapshot import lvm
|
||||||
from freezer.snapshot import vss
|
from freezer.snapshot import vss
|
||||||
from freezer.utils import winutils
|
from freezer.utils import winutils
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
CONF = cfg.CONF
|
|
||||||
logging = log.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def snapshot_create(backup_opt_dict):
|
def snapshot_create(backup_opt_dict):
|
||||||
|
|
|
@ -12,12 +12,15 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import logging
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
from freezer.utils import utils
|
from freezer.utils import utils
|
||||||
from freezer.utils import winutils
|
from freezer.utils import winutils
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def vss_create_shadow_copy(windows_volume):
|
def vss_create_shadow_copy(windows_volume):
|
||||||
"""
|
"""
|
||||||
|
@ -68,7 +71,7 @@ def vss_create_shadow_copy(windows_volume):
|
||||||
['powershell.exe', '-executionpolicy', 'unrestricted',
|
['powershell.exe', '-executionpolicy', 'unrestricted',
|
||||||
'-command', script, '-volume', windows_volume])
|
'-command', script, '-volume', windows_volume])
|
||||||
if err != '':
|
if err != '':
|
||||||
raise Exception('[*] Error creating a new shadow copy on {0}'
|
raise Exception('Error creating a new shadow copy on {0}'
|
||||||
', error {1}' .format(windows_volume, err))
|
', error {1}' .format(windows_volume, err))
|
||||||
|
|
||||||
for line in out.split('\n'):
|
for line in out.split('\n'):
|
||||||
|
@ -78,8 +81,7 @@ def vss_create_shadow_copy(windows_volume):
|
||||||
shadow_id = line.split('=')[1].strip().lower() + '}'
|
shadow_id = line.split('=')[1].strip().lower() + '}'
|
||||||
shadow_id = shadow_id[1:]
|
shadow_id = shadow_id[1:]
|
||||||
|
|
||||||
logging.info('[*] Created shadow copy {0}'.
|
LOG.info('Created shadow copy {0}'.format(shadow_id))
|
||||||
format(shadow_id))
|
|
||||||
|
|
||||||
return shadow_path, shadow_id
|
return shadow_path, shadow_id
|
||||||
|
|
||||||
|
@ -96,13 +98,12 @@ def vss_delete_shadow_copy(shadow_id, windows_volume):
|
||||||
'/shadow={0}'.format(shadow_id), '/quiet']
|
'/shadow={0}'.format(shadow_id), '/quiet']
|
||||||
(out, err) = utils.create_subprocess(cmd)
|
(out, err) = utils.create_subprocess(cmd)
|
||||||
if err != '':
|
if err != '':
|
||||||
raise Exception('[*] Error deleting shadow copy with id {0}'
|
raise Exception('Error deleting shadow copy with id {0}'
|
||||||
', error {1}' .format(shadow_id, err))
|
', error {1}' .format(shadow_id, err))
|
||||||
|
|
||||||
vss_delete_symlink(windows_volume)
|
vss_delete_symlink(windows_volume)
|
||||||
|
|
||||||
logging.info('[*] Deleting shadow copy {0}'.
|
LOG.info('Deleting shadow copy {0}'.format(shadow_id))
|
||||||
format(shadow_id))
|
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
@ -114,6 +115,6 @@ def vss_delete_symlink(windows_volume):
|
||||||
if os.path.exists(path):
|
if os.path.exists(path):
|
||||||
os.rmdir(path)
|
os.rmdir(path)
|
||||||
except Exception:
|
except Exception:
|
||||||
logging.error('Failed to delete shadow copy symlink {0}'.
|
LOG.error('Failed to delete shadow copy symlink {0}'.
|
||||||
format(os.path.join(windows_volume,
|
format(os.path.join(windows_volume,
|
||||||
'freezer_shadowcopy')))
|
'freezer_shadowcopy')))
|
||||||
|
|
|
@ -14,13 +14,16 @@
|
||||||
|
|
||||||
|
|
||||||
import abc
|
import abc
|
||||||
import logging
|
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import six
|
import six
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
from freezer.utils import utils
|
from freezer.utils import utils
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@six.add_metaclass(abc.ABCMeta)
|
@six.add_metaclass(abc.ABCMeta)
|
||||||
class Storage(object):
|
class Storage(object):
|
||||||
|
@ -122,7 +125,7 @@ class Storage(object):
|
||||||
if recent_to_date:
|
if recent_to_date:
|
||||||
backups = [b for b in backups
|
backups = [b for b in backups
|
||||||
if b.timestamp <= recent_to_date]
|
if b.timestamp <= recent_to_date]
|
||||||
err_msg = '[*] No matching backup name "{0}" found'\
|
err_msg = 'No matching backup name "{0}" found'\
|
||||||
.format(hostname_backup_name)
|
.format(hostname_backup_name)
|
||||||
if not backups:
|
if not backups:
|
||||||
raise IndexError(err_msg)
|
raise IndexError(err_msg)
|
||||||
|
@ -338,9 +341,8 @@ class Backup(object):
|
||||||
backup.tar_meta = name in tar_names
|
backup.tar_meta = name in tar_names
|
||||||
backups.append(backup)
|
backups.append(backup)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.exception(e)
|
LOG.exception(e)
|
||||||
logging.error("cannot parse backup name: {0}"
|
LOG.error("cannot parse backup name: {0}".format(name))
|
||||||
.format(name))
|
|
||||||
backups.sort(
|
backups.sort(
|
||||||
key=lambda x: (x.hostname_backup_name, x.timestamp, x.level))
|
key=lambda x: (x.hostname_backup_name, x.timestamp, x.level))
|
||||||
zero_backups = []
|
zero_backups = []
|
||||||
|
@ -357,8 +359,8 @@ class Backup(object):
|
||||||
last_backup.add_increment(backup.backup(storage,
|
last_backup.add_increment(backup.backup(storage,
|
||||||
last_backup))
|
last_backup))
|
||||||
else:
|
else:
|
||||||
logging.error("Incremental backup without parent: {0}"
|
LOG.error("Incremental backup without parent: {0}"
|
||||||
.format(backup))
|
.format(backup))
|
||||||
|
|
||||||
return zero_backups
|
return zero_backups
|
||||||
|
|
||||||
|
|
|
@ -15,9 +15,13 @@
|
||||||
import abc
|
import abc
|
||||||
import six
|
import six
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
from freezer.storage import base
|
from freezer.storage import base
|
||||||
from freezer.utils import utils
|
from freezer.utils import utils
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@six.add_metaclass(abc.ABCMeta)
|
@six.add_metaclass(abc.ABCMeta)
|
||||||
class FsLikeStorage(base.Storage):
|
class FsLikeStorage(base.Storage):
|
||||||
|
|
|
@ -19,9 +19,13 @@ import io
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
from freezer.storage import fslike
|
from freezer.storage import fslike
|
||||||
from freezer.utils import utils
|
from freezer.utils import utils
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class LocalStorage(fslike.FsLikeStorage):
|
class LocalStorage(fslike.FsLikeStorage):
|
||||||
def get_file(self, from_path, to_path):
|
def get_file(self, from_path, to_path):
|
||||||
|
|
|
@ -15,15 +15,13 @@
|
||||||
# PyCharm will not recognize queue. Puts red squiggle line under it. That's OK.
|
# PyCharm will not recognize queue. Puts red squiggle line under it. That's OK.
|
||||||
from six.moves import queue
|
from six.moves import queue
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from freezer.storage import base
|
from freezer.storage import base
|
||||||
from freezer.storage.exceptions import StorageException
|
from freezer.storage.exceptions import StorageException
|
||||||
from freezer.utils import streaming
|
from freezer.utils import streaming
|
||||||
|
|
||||||
CONF = cfg.CONF
|
LOG = log.getLogger(__name__)
|
||||||
logging = log.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class MultipleStorage(base.Storage):
|
class MultipleStorage(base.Storage):
|
||||||
|
@ -59,7 +57,7 @@ class MultipleStorage(base.Storage):
|
||||||
if not except_queue.empty:
|
if not except_queue.empty:
|
||||||
while not except_queue.empty():
|
while not except_queue.empty():
|
||||||
e = except_queue.get_nowait()
|
e = except_queue.get_nowait()
|
||||||
logging.critical('Storage error: {0}'.format(e))
|
LOG.critical('Storage error: {0}'.format(e))
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
@ -133,7 +131,7 @@ class StorageManager(object):
|
||||||
else:
|
else:
|
||||||
output_queue.put(message)
|
output_queue.put(message)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.exception(e)
|
LOG.exception(e)
|
||||||
StorageManager.one_fails_all_fail(
|
StorageManager.one_fails_all_fail(
|
||||||
self.input_queue, self.output_queues)
|
self.input_queue, self.output_queues)
|
||||||
self.broken_output_queues.add(output_queue)
|
self.broken_output_queues.add(output_queue)
|
||||||
|
|
|
@ -20,10 +20,14 @@ import stat
|
||||||
|
|
||||||
import paramiko
|
import paramiko
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
from freezer.storage import fslike
|
from freezer.storage import fslike
|
||||||
|
|
||||||
from freezer.utils import utils
|
from freezer.utils import utils
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class SshStorage(fslike.FsLikeStorage):
|
class SshStorage(fslike.FsLikeStorage):
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -16,15 +16,13 @@ limitations under the License.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import json
|
import json
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
import requests.exceptions
|
import requests.exceptions
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from freezer.storage import base
|
from freezer.storage import base
|
||||||
|
|
||||||
CONF = cfg.CONF
|
LOG = log.getLogger(__name__)
|
||||||
logging = log.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class SwiftStorage(base.Storage):
|
class SwiftStorage(base.Storage):
|
||||||
|
@ -71,24 +69,24 @@ class SwiftStorage(base.Storage):
|
||||||
success = False
|
success = False
|
||||||
while not success:
|
while not success:
|
||||||
try:
|
try:
|
||||||
logging.info(
|
LOG.info(
|
||||||
'[*] Uploading file chunk index: {0}'.format(path))
|
'Uploading file chunk index: {0}'.format(path))
|
||||||
self.swift().put_object(
|
self.swift().put_object(
|
||||||
self.segments, path, content,
|
self.segments, path, content,
|
||||||
content_type='application/octet-stream',
|
content_type='application/octet-stream',
|
||||||
content_length=len(content))
|
content_length=len(content))
|
||||||
logging.info('[*] Data successfully uploaded!')
|
LOG.info('Data successfully uploaded!')
|
||||||
success = True
|
success = True
|
||||||
except Exception as error:
|
except Exception as error:
|
||||||
logging.info(
|
LOG.info(
|
||||||
'[*] Retrying to upload file chunk index: {0}'.format(
|
'Retrying to upload file chunk index: {0}'.format(
|
||||||
path))
|
path))
|
||||||
time.sleep(60)
|
time.sleep(60)
|
||||||
self.client_manager.create_swift()
|
self.client_manager.create_swift()
|
||||||
count += 1
|
count += 1
|
||||||
if count == 10:
|
if count == 10:
|
||||||
logging.critical('[*] Error: add_object: {0}'
|
LOG.critical('Error: add_object: {0}'
|
||||||
.format(error))
|
.format(error))
|
||||||
raise Exception("cannot add object to storage")
|
raise Exception("cannot add object to storage")
|
||||||
|
|
||||||
def upload_manifest(self, backup):
|
def upload_manifest(self, backup):
|
||||||
|
@ -101,10 +99,10 @@ class SwiftStorage(base.Storage):
|
||||||
self.client_manager.create_swift()
|
self.client_manager.create_swift()
|
||||||
headers = {'x-object-manifest':
|
headers = {'x-object-manifest':
|
||||||
u'{0}/{1}'.format(self.segments, backup)}
|
u'{0}/{1}'.format(self.segments, backup)}
|
||||||
logging.info('[*] Uploading Swift Manifest: {0}'.format(backup))
|
LOG.info('Uploading Swift Manifest: {0}'.format(backup))
|
||||||
self.swift().put_object(container=self.container, obj=str(backup),
|
self.swift().put_object(container=self.container, obj=str(backup),
|
||||||
contents=u'', headers=headers)
|
contents=u'', headers=headers)
|
||||||
logging.info('[*] Manifest successfully uploaded!')
|
LOG.info('Manifest successfully uploaded!')
|
||||||
|
|
||||||
def upload_meta_file(self, backup, meta_file):
|
def upload_meta_file(self, backup, meta_file):
|
||||||
# Upload swift manifest for segments
|
# Upload swift manifest for segments
|
||||||
|
@ -113,7 +111,7 @@ class SwiftStorage(base.Storage):
|
||||||
self.client_manager.create_swift()
|
self.client_manager.create_swift()
|
||||||
|
|
||||||
# Upload tar incremental meta data file and remove it
|
# Upload tar incremental meta data file and remove it
|
||||||
logging.info('[*] Uploading tar meta data file: {0}'.format(
|
LOG.info('Uploading tar meta data file: {0}'.format(
|
||||||
backup.tar()))
|
backup.tar()))
|
||||||
with open(meta_file, 'r') as meta_fd:
|
with open(meta_file, 'r') as meta_fd:
|
||||||
self.swift().put_object(
|
self.swift().put_object(
|
||||||
|
@ -203,7 +201,7 @@ class SwiftStorage(base.Storage):
|
||||||
return [b for b in base.Backup.parse_backups(names, self)
|
return [b for b in base.Backup.parse_backups(names, self)
|
||||||
if b.hostname_backup_name == hostname_backup_name]
|
if b.hostname_backup_name == hostname_backup_name]
|
||||||
except Exception as error:
|
except Exception as error:
|
||||||
raise Exception('[*] Error: get_object_list: {0}'.format(error))
|
raise Exception('Error: get_object_list: {0}'.format(error))
|
||||||
|
|
||||||
def backup_blocks(self, backup):
|
def backup_blocks(self, backup):
|
||||||
"""
|
"""
|
||||||
|
@ -217,7 +215,7 @@ class SwiftStorage(base.Storage):
|
||||||
self.container, str(backup),
|
self.container, str(backup),
|
||||||
resp_chunk_size=self.max_segment_size)[1]
|
resp_chunk_size=self.max_segment_size)[1]
|
||||||
except requests.exceptions.SSLError as e:
|
except requests.exceptions.SSLError as e:
|
||||||
logging.warning(e)
|
LOG.warning(e)
|
||||||
chunks = self.client_manager.create_swift().get_object(
|
chunks = self.client_manager.create_swift().get_object(
|
||||||
self.container, str(backup),
|
self.container, str(backup),
|
||||||
resp_chunk_size=self.max_segment_size)[1]
|
resp_chunk_size=self.max_segment_size)[1]
|
||||||
|
|
|
@ -13,15 +13,18 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from six.moves import configparser
|
from six.moves import configparser
|
||||||
from six.moves import cStringIO
|
from six.moves import cStringIO
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
from freezer.utils import utils
|
from freezer.utils import utils
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class Config(object):
|
class Config(object):
|
||||||
|
|
||||||
|
@ -29,8 +32,8 @@ class Config(object):
|
||||||
def parse(config_path):
|
def parse(config_path):
|
||||||
if config_path:
|
if config_path:
|
||||||
if not os.path.exists(config_path):
|
if not os.path.exists(config_path):
|
||||||
logging.error("[*] Critical Error: Configuration file {0} not"
|
LOG.error("Critical Error: Configuration file {0} not"
|
||||||
" found".format(config_path))
|
" found".format(config_path))
|
||||||
raise Exception("Configuration file {0} not found !".format(
|
raise Exception("Configuration file {0} not found !".format(
|
||||||
config_path))
|
config_path))
|
||||||
config = configparser.SafeConfigParser()
|
config = configparser.SafeConfigParser()
|
||||||
|
@ -93,12 +96,12 @@ def ini_parse(lines):
|
||||||
try:
|
try:
|
||||||
# TODO(ANONYMOUS): Remove the parsing of ini-like file via regex
|
# TODO(ANONYMOUS): Remove the parsing of ini-like file via regex
|
||||||
conf = find_all(INI, lines)
|
conf = find_all(INI, lines)
|
||||||
logging.warning("Using non-INI files for database configuration "
|
LOG.warning("Using non-INI files for database configuration "
|
||||||
"file is deprecated. Falling back to Regex.")
|
"file is deprecated. Falling back to Regex.")
|
||||||
logging.warning("INI parser error was: {}".format(str(e)))
|
LOG.warning("INI parser error was: {}".format(str(e)))
|
||||||
return conf
|
return conf
|
||||||
except Exception:
|
except Exception:
|
||||||
logging.warning("Couldn't parse non-INI config file using Regex")
|
LOG.warning("Couldn't parse non-INI config file using Regex")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -18,6 +18,10 @@ Freezer general utils functions
|
||||||
from six.moves import queue
|
from six.moves import queue
|
||||||
import threading
|
import threading
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class Wait(Exception):
|
class Wait(Exception):
|
||||||
pass
|
pass
|
||||||
|
@ -104,6 +108,7 @@ class QueuedThread(threading.Thread):
|
||||||
try:
|
try:
|
||||||
super(QueuedThread, self).run()
|
super(QueuedThread, self).run()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
LOG.exception(e)
|
||||||
self._exception_queue.put_nowait(e)
|
self._exception_queue.put_nowait(e)
|
||||||
self.rich_queue.force_stop()
|
self.rich_queue.force_stop()
|
||||||
# Thread will exit at this point.
|
# Thread will exit at this point.
|
||||||
|
|
|
@ -19,7 +19,6 @@ Freezer general utils functions
|
||||||
import datetime
|
import datetime
|
||||||
import errno
|
import errno
|
||||||
import fnmatch as fn
|
import fnmatch as fn
|
||||||
import logging
|
|
||||||
import os
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
|
@ -31,7 +30,6 @@ from oslo_config import cfg
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
from six.moves import configparser
|
from six.moves import configparser
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -59,15 +57,15 @@ def create_dir(directory, do_log=True):
|
||||||
try:
|
try:
|
||||||
if not os.path.isdir(expanded_dir_name):
|
if not os.path.isdir(expanded_dir_name):
|
||||||
if do_log:
|
if do_log:
|
||||||
logging.warning('[*] Directory {0} does not exists,\
|
LOG.warning('Directory {0} does not exists,\
|
||||||
creating...'.format(expanded_dir_name))
|
creating...'.format(expanded_dir_name))
|
||||||
os.makedirs(expanded_dir_name)
|
os.makedirs(expanded_dir_name)
|
||||||
else:
|
else:
|
||||||
if do_log:
|
if do_log:
|
||||||
logging.warning('[*] Directory {0} found!'.format(
|
LOG.warning('Directory {0} found!'.format(
|
||||||
expanded_dir_name))
|
expanded_dir_name))
|
||||||
except Exception as error:
|
except Exception as error:
|
||||||
err = '[*] Error while creating directory {0}: {1}\
|
err = 'Error while creating directory {0}: {1}\
|
||||||
'.format(expanded_dir_name, error)
|
'.format(expanded_dir_name, error)
|
||||||
raise Exception(err)
|
raise Exception(err)
|
||||||
|
|
||||||
|
@ -126,8 +124,8 @@ def get_mount_from_path(path):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not os.path.exists(path):
|
if not os.path.exists(path):
|
||||||
logging.critical('[*] Error: provided path does not exist: {0}'
|
LOG.critical('Error: provided path does not exist: {0}'
|
||||||
.format(path))
|
.format(path))
|
||||||
raise IOError
|
raise IOError
|
||||||
|
|
||||||
mount_point_path = os.path.abspath(path)
|
mount_point_path = os.path.abspath(path)
|
||||||
|
@ -230,8 +228,8 @@ class ReSizeStream(object):
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def next(self):
|
def next(self):
|
||||||
logging.info("Transmitted {0} of {1}".format(self.transmitted,
|
LOG.info("Transmitted {0} of {1}".format(self.transmitted,
|
||||||
self.length))
|
self.length))
|
||||||
chunk_size = self.chunk_size
|
chunk_size = self.chunk_size
|
||||||
if len(self.reminder) > chunk_size:
|
if len(self.reminder) > chunk_size:
|
||||||
result = self.reminder[:chunk_size]
|
result = self.reminder[:chunk_size]
|
||||||
|
@ -338,7 +336,7 @@ def alter_proxy(proxy):
|
||||||
os.environ.pop("HTTPS_PROXY", None)
|
os.environ.pop("HTTPS_PROXY", None)
|
||||||
if proxy_value.startswith('http://') or \
|
if proxy_value.startswith('http://') or \
|
||||||
proxy_value.startswith('https://'):
|
proxy_value.startswith('https://'):
|
||||||
logging.info('[*] Using proxy {0}'.format(proxy_value))
|
LOG.info('Using proxy {0}'.format(proxy_value))
|
||||||
os.environ['HTTP_PROXY'] = str(proxy_value)
|
os.environ['HTTP_PROXY'] = str(proxy_value)
|
||||||
os.environ['HTTPS_PROXY'] = str(proxy_value)
|
os.environ['HTTPS_PROXY'] = str(proxy_value)
|
||||||
else:
|
else:
|
||||||
|
@ -356,7 +354,7 @@ def shield(func):
|
||||||
try:
|
try:
|
||||||
return func(*args, **kwargs)
|
return func(*args, **kwargs)
|
||||||
except Exception as error:
|
except Exception as error:
|
||||||
logging.error(error)
|
LOG.error(error)
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
@ -366,7 +364,7 @@ def delete_file(path_to_file):
|
||||||
try:
|
try:
|
||||||
os.remove(path_to_file)
|
os.remove(path_to_file)
|
||||||
except Exception:
|
except Exception:
|
||||||
logging.warning("Error deleting file {0}".format(path_to_file))
|
LOG.warning("Error deleting file {0}".format(path_to_file))
|
||||||
|
|
||||||
|
|
||||||
def walk_path(path, exclude, ignorelinks, callback, *kargs, **kwargs):
|
def walk_path(path, exclude, ignorelinks, callback, *kargs, **kwargs):
|
||||||
|
@ -498,7 +496,7 @@ def set_max_process_priority():
|
||||||
# children processes inherit niceness from father
|
# children processes inherit niceness from father
|
||||||
try:
|
try:
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
'[*] Setting freezer execution with high CPU and I/O priority')
|
'Setting freezer execution with high CPU and I/O priority')
|
||||||
PID = os.getpid()
|
PID = os.getpid()
|
||||||
# Set cpu priority
|
# Set cpu priority
|
||||||
os.nice(-19)
|
os.nice(-19)
|
||||||
|
@ -509,4 +507,4 @@ def set_max_process_priority():
|
||||||
u'-p', u'{0}'.format(PID)
|
u'-p', u'{0}'.format(PID)
|
||||||
])
|
])
|
||||||
except Exception as priority_error:
|
except Exception as priority_error:
|
||||||
LOG.warning('[*] Priority: {0}'.format(priority_error))
|
LOG.warning('Priority: {0}'.format(priority_error))
|
||||||
|
|
Loading…
Reference in New Issue