Switch freezer-agent to oslo.config and oslo.log
Freezer-agent supports normal argparser for parsing cli options. Switching to oslo.config for CLI options and oslo.log for logging options Change-Id: I0cab1b038dc1122e7e021e777d72c88ef542cf5b Implements: blueprint using-oslo-libs
This commit is contained in:
parent
c4218f0385
commit
5fa735409f
@ -1,514 +0,0 @@
|
||||
# (c) Copyright 2014,2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Arguments and general parameters definitions
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import os
|
||||
from six.moves import configparser
|
||||
import socket
|
||||
import sys
|
||||
|
||||
from distutils import spawn as distspawn
|
||||
from oslo_utils import encodeutils
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
from freezer import config
|
||||
from freezer import utils
|
||||
from freezer import winutils
|
||||
|
||||
home = os.path.expanduser("~")
|
||||
|
||||
DEFAULT_LVM_SNAPNAME = 'freezer_backup_snap'
|
||||
DEFAULT_LVM_SNAPSIZE = '1G'
|
||||
DEFAULT_LVM_DIRMOUNT = '/var/lib/freezer'
|
||||
|
||||
DEFAULT_PARAMS = {
|
||||
'os_identity_api_version': None,
|
||||
'lvm_auto_snap': False, 'lvm_volgroup': False,
|
||||
'exclude': False, 'sql_server_conf': False,
|
||||
'backup_name': False, 'quiet': False,
|
||||
'container': 'freezer_backups', 'no_incremental': False,
|
||||
'max_segment_size': 67108864, 'lvm_srcvol': False,
|
||||
'download_limit': None, 'hostname': False, 'remove_from_date': False,
|
||||
'restart_always_level': False, 'lvm_dirmount': DEFAULT_LVM_DIRMOUNT,
|
||||
'dereference_symlink': '',
|
||||
'config': False, 'mysql_conf': False,
|
||||
'insecure': False, 'lvm_snapname': DEFAULT_LVM_SNAPNAME,
|
||||
'lvm_snapperm': 'ro', 'snapshot': False,
|
||||
'max_priority': False, 'max_level': False, 'path_to_backup': False,
|
||||
'encrypt_pass_file': False, 'volume': False, 'proxy': False,
|
||||
'cinder_vol_id': '', 'cindernative_vol_id': '',
|
||||
'nova_inst_id': '',
|
||||
'remove_older_than': None, 'restore_from_date': False,
|
||||
'upload_limit': None, 'always_level': False, 'version': False,
|
||||
'dry_run': False, 'lvm_snapsize': DEFAULT_LVM_SNAPSIZE,
|
||||
'restore_abs_path': False, 'log_file': None, 'log_level': "info",
|
||||
'mode': 'fs', 'action': 'backup', 'shadow': '', 'shadow_path': '',
|
||||
'windows_volume': '', 'command': None, 'metadata_out': False,
|
||||
'storage': 'swift', 'ssh_key': '', 'ssh_username': '', 'ssh_host': '',
|
||||
'ssh_port': 22, 'compression': 'gzip'
|
||||
}
|
||||
|
||||
|
||||
def backup_arguments():
|
||||
"""
|
||||
Default arguments and command line options interface. The function return
|
||||
a name space called backup_args.
|
||||
"""
|
||||
|
||||
conf_parser = argparse.ArgumentParser(
|
||||
description=__doc__,
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
add_help=False, prog='freezerc')
|
||||
|
||||
conf_parser.add_argument(
|
||||
'--config', action='store', dest='config', default=False,
|
||||
help=("Config file abs path. Option arguments are provided "
|
||||
"from config file. When config file is used any option "
|
||||
"from command line provided take precedence."))
|
||||
|
||||
args, remaining_argv = conf_parser.parse_known_args()
|
||||
defaults = DEFAULT_PARAMS.copy()
|
||||
conf = None
|
||||
if args.config:
|
||||
conf = config.Config.parse(args.config)
|
||||
defaults.update(conf.default)
|
||||
# TODO: restore_from_host is deprecated and to be removed
|
||||
defaults['hostname'] = conf.default.get('hostname') or \
|
||||
conf.default.get('restore_from_host')
|
||||
|
||||
# Generate a new argparse istance and inherit options from config parse
|
||||
arg_parser = argparse.ArgumentParser(
|
||||
parents=[conf_parser])
|
||||
|
||||
arg_parser.add_argument(
|
||||
'--action', choices=['backup', 'restore', 'info', 'admin',
|
||||
'exec'],
|
||||
help=(
|
||||
"Set the action to be taken. backup and restore are"
|
||||
" self explanatory, info is used to retrieve info from the"
|
||||
" storage media, exec is used to execute a script,"
|
||||
" while admin is used to delete old backups"
|
||||
" and other admin actions. Default backup."),
|
||||
dest='action', default='backup')
|
||||
arg_parser.add_argument(
|
||||
'-F', '--path-to-backup', '--file-to-backup', action='store',
|
||||
help="The file or directory you want to back up to Swift",
|
||||
dest='path_to_backup', default=False)
|
||||
arg_parser.add_argument(
|
||||
'-N', '--backup-name', action='store',
|
||||
help="The backup name you want to use to identify your backup \
|
||||
on Swift", dest='backup_name', default=False)
|
||||
arg_parser.add_argument(
|
||||
'-m', '--mode', action='store',
|
||||
help="Set the technology to back from. Options are, fs (filesystem),\
|
||||
mongo (MongoDB), mysql (MySQL), sqlserver (SQL Server)\
|
||||
Default set to fs", dest='mode',
|
||||
default='fs')
|
||||
arg_parser.add_argument(
|
||||
'-C', '--container', action='store',
|
||||
help="The Swift container (or path to local storage) "
|
||||
"used to upload files to",
|
||||
dest='container', default='freezer_backups')
|
||||
arg_parser.add_argument(
|
||||
'-s', '--snapshot', action='store_true',
|
||||
help=('Create a snapshot of the fs containing the resource to backup.'
|
||||
' When used, the lvm parameters will be guessed and/or the '
|
||||
'default values will be used, on windows it will invoke '
|
||||
'vssadmin'),
|
||||
dest='snapshot', default=False)
|
||||
arg_parser.add_argument(
|
||||
'--lvm-auto-snap', action='store',
|
||||
help=("Automatically guess the volume group and volume name for "
|
||||
"given PATH."),
|
||||
dest='lvm_auto_snap',
|
||||
default=False)
|
||||
arg_parser.add_argument(
|
||||
'--lvm-srcvol', action='store',
|
||||
help="Set the lvm volume you want to take a snaphost from. Default\
|
||||
no volume", dest='lvm_srcvol', default=False)
|
||||
arg_parser.add_argument(
|
||||
'--lvm-snapname', action='store',
|
||||
help="Set the lvm snapshot name to use. If the snapshot name already\
|
||||
exists, the old one will be used a no new one will be created. Default\
|
||||
freezer_backup_snap.", dest='lvm_snapname', default=False)
|
||||
arg_parser.add_argument(
|
||||
'--lvm-snap-perm', action='store', choices=['ro', 'rw'],
|
||||
help="Set the lvm snapshot permission to use. If the permission\
|
||||
is set to ro The snapshot will be immutable - read only -.\
|
||||
If the permission is set to rw it will be mutable",
|
||||
dest='lvm_snapperm', default='ro')
|
||||
arg_parser.add_argument(
|
||||
'--lvm-snapsize', action='store',
|
||||
help=('Set the lvm snapshot size when creating a new snapshot. '
|
||||
'Please add G for Gigabytes or M for Megabytes, i.e. 500M or 8G.'
|
||||
' Default {0}.'.format(DEFAULT_LVM_SNAPSIZE)),
|
||||
dest='lvm_snapsize', default=DEFAULT_LVM_SNAPSIZE)
|
||||
arg_parser.add_argument(
|
||||
'--lvm-dirmount', action='store',
|
||||
help=("Set the directory you want to mount the lvm snapshot to. "
|
||||
"Default to {0}".format(DEFAULT_LVM_DIRMOUNT)),
|
||||
dest='lvm_dirmount', default=DEFAULT_LVM_DIRMOUNT)
|
||||
arg_parser.add_argument(
|
||||
'--lvm-volgroup', action='store',
|
||||
help="Specify the volume group of your logical volume.\
|
||||
This is important to mount your snapshot volume. Default not set",
|
||||
dest='lvm_volgroup', default=False)
|
||||
arg_parser.add_argument(
|
||||
'--max-level', action='store',
|
||||
help="Set the backup level used with tar to implement incremental \
|
||||
backup. If a level 1 is specified but no level 0 is already \
|
||||
available, a level 0 will be done and subsequently backs to level 1.\
|
||||
Default 0 (No Incremental)", dest='max_level',
|
||||
type=int, default=False)
|
||||
arg_parser.add_argument(
|
||||
'--always-level', action='store', help="Set backup\
|
||||
maximum level used with tar to implement incremental backup. If a \
|
||||
level 3 is specified, the backup will be executed from level 0 to \
|
||||
level 3 and to that point always a backup level 3 will be executed. \
|
||||
It will not restart from level 0. This option has precedence over \
|
||||
--max-backup-level. Default False (Disabled)",
|
||||
dest='always_level', type=int, default=False)
|
||||
arg_parser.add_argument(
|
||||
'--restart-always-level', action='store', help="Restart the backup \
|
||||
from level 0 after n days. Valid only if --always-level option \
|
||||
if set. If --always-level is used together with --remove-older-then, \
|
||||
there might be the chance where the initial level 0 will be removed \
|
||||
Default False (Disabled)",
|
||||
dest='restart_always_level', type=float, default=False)
|
||||
arg_parser.add_argument(
|
||||
'-R', '--remove-older-then', '--remove-older-than', action='store',
|
||||
help=('Checks in the specified container for object older than the '
|
||||
'specified days.'
|
||||
'If i.e. 30 is specified, it will remove the remote object '
|
||||
'older than 30 days. Default False (Disabled) '
|
||||
'The option --remove-older-then is deprecated '
|
||||
'and will be removed soon'),
|
||||
dest='remove_older_than', type=float, default=None)
|
||||
arg_parser.add_argument(
|
||||
'--remove-from-date', action='store',
|
||||
help=('Checks the specified container and removes objects older than '
|
||||
'the provided datetime in the form "YYYY-MM-DDThh:mm:ss '
|
||||
'i.e. "1974-03-25T23:23:23". '
|
||||
'Make sure the "T" is between date and time '),
|
||||
dest='remove_from_date', default=False)
|
||||
arg_parser.add_argument(
|
||||
'--no-incremental', action='store_true',
|
||||
help='''Disable incremental feature. By default freezer build the
|
||||
meta data even for level 0 backup. By setting this option incremental
|
||||
meta data is not created at all. Default disabled''',
|
||||
dest='no_incremental', default=False)
|
||||
arg_parser.add_argument(
|
||||
'--hostname', action='store',
|
||||
help='''Set hostname to execute actions. If you are executing freezer
|
||||
from one host but you want to delete objects belonging to another
|
||||
host then you can set this option that hostname and execute appropriate
|
||||
actions. Default current node hostname.''',
|
||||
dest='hostname', default=False)
|
||||
arg_parser.add_argument(
|
||||
'--mysql-conf', action='store',
|
||||
help='''Set the MySQL configuration file where freezer retrieve
|
||||
important information as db_name, user, password, host, port.
|
||||
Following is an example of config file:
|
||||
# backup_mysql_conf
|
||||
host = <db-host>
|
||||
user = <mysqluser>
|
||||
password = <mysqlpass>
|
||||
port = <db-port>''',
|
||||
dest='mysql_conf', default=False)
|
||||
arg_parser.add_argument(
|
||||
'--metadata-out', action='store',
|
||||
help=('Set the filename to which write the metadata regarding '
|
||||
'the backup metrics. Use "-" to output to standard output.'),
|
||||
dest='metadata_out', default=False)
|
||||
|
||||
if winutils.is_windows():
|
||||
arg_parser.add_argument(
|
||||
'--log-file', action='store',
|
||||
help='Set log file. By default logs to ~/freezer.log',
|
||||
dest='log_file', default=os.path.join(home,
|
||||
'.freezer',
|
||||
'freezer.log'))
|
||||
else:
|
||||
arg_parser.add_argument(
|
||||
'--log-file', action='store',
|
||||
help='Set log file. By default logs to /var/log/freezer.log'
|
||||
'If that file is not writable, freezer tries to log'
|
||||
'to ~/.freezer/freezer.log',
|
||||
dest='log_file', default=None)
|
||||
arg_parser.add_argument(
|
||||
'--log-level', action='store', dest="log_level",
|
||||
default=DEFAULT_PARAMS['log_level'],
|
||||
choices=['all', 'debug', 'info', 'warn', 'error', 'critical'],
|
||||
help='Set logging level. Can be all, debug, info, warn,'
|
||||
'error, critical. Default value - info')
|
||||
arg_parser.add_argument(
|
||||
'--exclude', action='store', help="Exclude files,\
|
||||
given as a PATTERN.Ex: --exclude '*.log' will exclude any file \
|
||||
with name ending with .log. Default no exclude", dest='exclude',
|
||||
default=False)
|
||||
arg_parser.add_argument(
|
||||
'--dereference-symlink', choices=['none', 'soft', 'hard', 'all'],
|
||||
help=(
|
||||
"Follow hard and soft links and archive and dump the files they "
|
||||
" refer to. Default False."),
|
||||
dest='dereference_symlink', default='')
|
||||
arg_parser.add_argument(
|
||||
'--encrypt-pass-file', action='store',
|
||||
help="Passing a private key to this option, allow you to encrypt the \
|
||||
files before to be uploaded in Swift. Default do not encrypt.",
|
||||
dest='encrypt_pass_file', default=False)
|
||||
arg_parser.add_argument(
|
||||
'-M', '--max-segment-size', action='store',
|
||||
help="Set the maximum file chunk size in bytes to upload to swift\
|
||||
Default 33554432 bytes (32MB)",
|
||||
dest='max_segment_size', type=int, default=33554432)
|
||||
arg_parser.add_argument(
|
||||
'--restore-abs-path', action='store',
|
||||
help=('Set the absolute path where you want your data restored. '
|
||||
'Default False.'),
|
||||
dest='restore_abs_path', default=False)
|
||||
arg_parser.add_argument(
|
||||
'--restore-from-host', action='store',
|
||||
help='''Set the hostname used to identify the data you want to restore
|
||||
from. If you want to restore data in the same host where the backup
|
||||
was executed just type from your shell: "$ hostname" and the output is
|
||||
the value that needs to be passed to this option. Mandatory with
|
||||
Restore Default False. (Deprecated use "hostname" instead) ''',
|
||||
dest='hostname', default=False)
|
||||
arg_parser.add_argument(
|
||||
'--restore-from-date', action='store',
|
||||
help='''Set the absolute path where you want your data restored.
|
||||
Please provide datetime in format "YYYY-MM-DDThh:mm:ss"
|
||||
i.e. "1979-10-03T23:23:23". Make sure the "T" is between date and time
|
||||
Default None.''', dest='restore_from_date', default=None)
|
||||
arg_parser.add_argument(
|
||||
'--max-priority', action='store_true',
|
||||
help='''Set the cpu process to the highest priority (i.e. -20 on Linux)
|
||||
and real-time for I/O. The process priority will be set only if nice
|
||||
and ionice are installed Default disabled. Use with caution.''',
|
||||
dest='max_priority', default=False)
|
||||
arg_parser.add_argument(
|
||||
'-V', '--version', action='store_true',
|
||||
help='''Print the release version and exit''',
|
||||
dest='version', default=False)
|
||||
arg_parser.add_argument(
|
||||
'-q', '--quiet', action='store_true',
|
||||
help='''Suppress error messages''',
|
||||
dest='quiet', default=False)
|
||||
arg_parser.add_argument(
|
||||
'--insecure', action='store_true',
|
||||
help='Allow to access swift servers without checking SSL certs.',
|
||||
dest='insecure', default=False)
|
||||
arg_parser.add_argument(
|
||||
'--os-auth-ver', '--os-identity-api-version',
|
||||
choices=['1', '2', '2.0', '3'], action='store',
|
||||
help='Openstack identity api version, can be 1, 2, 2.0 or 3',
|
||||
dest='os_identity_api_version', default=None)
|
||||
arg_parser.add_argument(
|
||||
'--proxy', action='store',
|
||||
help='''Enforce proxy that alters system HTTP_PROXY and HTTPS_PROXY,
|
||||
use \'\' to eliminate all system proxies''',
|
||||
dest='proxy', default=False)
|
||||
arg_parser.add_argument(
|
||||
'--dry-run', action='store_true',
|
||||
help='Do everything except writing or removing objects',
|
||||
dest='dry_run', default=False)
|
||||
arg_parser.add_argument(
|
||||
'--upload-limit', action='store',
|
||||
help='''Upload bandwidth limit in Bytes per sec.
|
||||
Can be invoked with dimensions (10K, 120M, 10G).''',
|
||||
dest='upload_limit',
|
||||
type=utils.human2bytes,
|
||||
default=-1)
|
||||
arg_parser.add_argument(
|
||||
"--cinder-vol-id", action='store',
|
||||
help='Id of cinder volume for backup',
|
||||
dest="cinder_vol_id",
|
||||
default='')
|
||||
arg_parser.add_argument(
|
||||
"--nova-inst-id", action='store',
|
||||
help='Id of nova instance for backup',
|
||||
dest="nova_inst_id",
|
||||
default='')
|
||||
arg_parser.add_argument(
|
||||
"--cindernative-vol-id", action='store',
|
||||
help='Id of cinder volume for native backup',
|
||||
dest="cindernative_vol_id",
|
||||
default='')
|
||||
arg_parser.add_argument(
|
||||
'--download-limit', action='store',
|
||||
help='''Download bandwidth limit in Bytes per sec.
|
||||
Can be invoked with dimensions (10K, 120M, 10G).''',
|
||||
dest='download_limit',
|
||||
type=utils.human2bytes,
|
||||
default=-1)
|
||||
arg_parser.add_argument(
|
||||
'--sql-server-conf', action='store',
|
||||
help='''Set the SQL Server configuration file where freezer retrieve
|
||||
the sql server instance.
|
||||
Following is an example of config file:
|
||||
instance = <db-instance>''',
|
||||
dest='sql_server_conf', default=False)
|
||||
arg_parser.add_argument(
|
||||
'--command', action='store',
|
||||
help='Command executed by exec action',
|
||||
dest='command', default=None)
|
||||
arg_parser.add_argument(
|
||||
'--compression', action='store',
|
||||
choices=['gzip', 'bzip2', 'xz'],
|
||||
help='compression algorithm to use. gzip is default algorithm',
|
||||
dest='compression', default='gzip')
|
||||
|
||||
arg_parser.add_argument(
|
||||
'--storage', action='store',
|
||||
choices=['local', 'swift', 'ssh'],
|
||||
help="Storage for backups. Can be Swift or Local now. Swift is default"
|
||||
"storage now. Local stores backups on the same defined path and"
|
||||
"swift will store files in container.",
|
||||
dest='storage', default='swift')
|
||||
arg_parser.add_argument(
|
||||
'--ssh-key', action='store',
|
||||
help="Path to ssh-key for ssh storage only",
|
||||
dest='ssh_key', default=DEFAULT_PARAMS['ssh_key'])
|
||||
arg_parser.add_argument(
|
||||
'--ssh-username', action='store',
|
||||
help="Remote username for ssh storage only",
|
||||
dest='ssh_username', default=DEFAULT_PARAMS['ssh_username'])
|
||||
arg_parser.add_argument(
|
||||
'--ssh-host', action='store',
|
||||
help="Remote host for ssh storage only",
|
||||
dest='ssh_host', default=DEFAULT_PARAMS['ssh_host'])
|
||||
arg_parser.add_argument(
|
||||
'--ssh-port', action='store',
|
||||
help="Remote port for ssh storage only (default 22)", type=int,
|
||||
dest='ssh_port', default=DEFAULT_PARAMS['ssh_port'])
|
||||
|
||||
arg_parser.set_defaults(**defaults)
|
||||
backup_args = arg_parser.parse_args()
|
||||
|
||||
# Set default working directory to ~/.freezer. If the directory
|
||||
# does not exists it is created
|
||||
work_dir = os.path.join(home, '.freezer')
|
||||
backup_args.__dict__['work_dir'] = work_dir
|
||||
if not os.path.exists(work_dir):
|
||||
try:
|
||||
os.makedirs(work_dir)
|
||||
except (OSError, IOError) as err_msg:
|
||||
# This avoids freezer-agent to crash if it can't write to
|
||||
# ~/.freezer, which may happen on some env (for me,
|
||||
# it happens in Jenkins, as freezer-agent can't write to
|
||||
# /var/lib/jenkins).
|
||||
print(encodeutils.safe_decode(
|
||||
'{}'.format(err_msg)), file=sys.stderr)
|
||||
|
||||
# If hostname is not set, hostname of the current node will be used
|
||||
if not backup_args.hostname:
|
||||
backup_args.__dict__['hostname'] = socket.gethostname()
|
||||
|
||||
# If we have provided --proxy then overwrite the system HTTP_PROXY and
|
||||
# HTTPS_PROXY
|
||||
if backup_args.proxy:
|
||||
utils.alter_proxy(backup_args.proxy)
|
||||
|
||||
# MySQLdb object
|
||||
backup_args.__dict__['mysql_db_inst'] = ''
|
||||
backup_args.__dict__['storages'] = None
|
||||
if conf and conf.storages:
|
||||
backup_args.__dict__['storages'] = conf.storages
|
||||
|
||||
# Windows volume
|
||||
backup_args.__dict__['shadow'] = ''
|
||||
backup_args.__dict__['shadow_path'] = ''
|
||||
backup_args.__dict__['file_name'] = ''
|
||||
if winutils.is_windows():
|
||||
if backup_args.path_to_backup:
|
||||
backup_args.__dict__['windows_volume'] = \
|
||||
backup_args.path_to_backup[:3]
|
||||
|
||||
# Freezer version
|
||||
backup_args.__dict__['__version__'] = '1.2.0'
|
||||
|
||||
# todo(enugaev) move it to new command line param backup_media
|
||||
backup_media = 'fs'
|
||||
if backup_args.cinder_vol_id:
|
||||
backup_media = 'cinder'
|
||||
elif backup_args.cindernative_vol_id:
|
||||
backup_media = 'cindernative'
|
||||
elif backup_args.nova_inst_id:
|
||||
backup_media = 'nova'
|
||||
|
||||
backup_args.__dict__['backup_media'] = backup_media
|
||||
|
||||
backup_args.__dict__['time_stamp'] = None
|
||||
|
||||
if backup_args.upload_limit or backup_args.download_limit and not\
|
||||
winutils.is_windows():
|
||||
if backup_args.config:
|
||||
conf_file = NamedTemporaryFile(prefix='freezer_job_', delete=False)
|
||||
defaults['upload_limit'] = defaults['download_limit'] = -1
|
||||
utils.save_config_to_file(defaults, conf_file)
|
||||
conf_index = sys.argv.index('--config') + 1
|
||||
sys.argv[conf_index] = conf_file.name
|
||||
|
||||
if '--upload-limit' in sys.argv:
|
||||
index = sys.argv.index('--upload-limit')
|
||||
sys.argv.pop(index)
|
||||
sys.argv.pop(index)
|
||||
if '--download-limit' in sys.argv:
|
||||
index = sys.argv.index('--download-limit')
|
||||
sys.argv.pop(index)
|
||||
sys.argv.pop(index)
|
||||
|
||||
trickle_executable = distspawn.find_executable('trickle')
|
||||
if trickle_executable is None:
|
||||
trickle_executable = distspawn.find_executable(
|
||||
'trickle', path=":".join(sys.path))
|
||||
if trickle_executable is None:
|
||||
trickle_executable = distspawn.find_executable(
|
||||
'trickle', path=":".join(os.environ.get('PATH')))
|
||||
|
||||
trickle_lib = distspawn.find_executable('trickle-overload.so')
|
||||
if trickle_lib is None:
|
||||
trickle_lib = distspawn.find_executable(
|
||||
'trickle-overload.so', path=":".join(sys.path))
|
||||
if trickle_lib is None:
|
||||
trickle_lib = distspawn.find_executable(
|
||||
'trickle-overload.so', path=":".join(
|
||||
os.environ.get('PATH')))
|
||||
if trickle_executable and trickle_lib:
|
||||
logging.info("[*] Info: Starting trickle ...")
|
||||
os.environ['LD_PRELOAD'] = trickle_lib
|
||||
trickle_command = '{0} -d {1} -u {2} '.\
|
||||
format(trickle_executable,
|
||||
getattr(backup_args, 'download_limit') or -1,
|
||||
getattr(backup_args, 'upload_limit') or -1)
|
||||
backup_args.__dict__['trickle_command'] = trickle_command
|
||||
if "tricklecount" in os.environ:
|
||||
tricklecount = int(os.environ.get("tricklecount", 1))
|
||||
tricklecount += 1
|
||||
os.environ["tricklecount"] = str(tricklecount)
|
||||
|
||||
else:
|
||||
os.environ["tricklecount"] = str(1)
|
||||
else:
|
||||
logging.critical("[*] Trickle or Trickle library not found"
|
||||
". Switching to normal mode without limiting"
|
||||
" bandwidth")
|
||||
|
||||
return backup_args, arg_parser
|
@ -15,7 +15,6 @@ limitations under the License.
|
||||
|
||||
Freezer Backup modes related functions
|
||||
"""
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
|
||||
@ -24,7 +23,11 @@ from freezer import lvm
|
||||
from freezer import utils
|
||||
from freezer import vss
|
||||
from freezer import winutils
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
|
||||
CONF = cfg.CONF
|
||||
logging = log.getLogger(__name__)
|
||||
home = os.path.expanduser("~")
|
||||
|
||||
|
||||
|
0
freezer/common/__init__.py
Normal file
0
freezer/common/__init__.py
Normal file
585
freezer/common/config.py
Normal file
585
freezer/common/config.py
Normal file
@ -0,0 +1,585 @@
|
||||
# (c) Copyright 2014,2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import print_function
|
||||
|
||||
from distutils import spawn as distspawn
|
||||
import os
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from oslo_utils import encodeutils
|
||||
import socket
|
||||
import sys
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
from freezer import __version__ as FREEZER_VERSION
|
||||
from freezer import config as freezer_config
|
||||
from freezer import utils
|
||||
from freezer import winutils
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
home = os.path.expanduser("~")
|
||||
|
||||
DEFAULT_LVM_SNAPNAME = 'freezer_backup_snap'
|
||||
DEFAULT_LVM_SNAPSIZE = '1G'
|
||||
DEFAULT_LVM_DIRMOUNT = '/var/lib/freezer'
|
||||
|
||||
DEFAULT_PARAMS = {
|
||||
'os_identity_api_version': None,
|
||||
'lvm_auto_snap': False, 'lvm_volgroup': False,
|
||||
'exclude': False, 'sql_server_conf': False,
|
||||
'backup_name': False, 'quiet': False,
|
||||
'container': 'freezer_backups', 'no_incremental': False,
|
||||
'max_segment_size': 67108864, 'lvm_srcvol': False,
|
||||
'download_limit': None, 'hostname': False, 'remove_from_date': False,
|
||||
'restart_always_level': False, 'lvm_dirmount': DEFAULT_LVM_DIRMOUNT,
|
||||
'dereference_symlink': '',
|
||||
'config': False, 'mysql_conf': False,
|
||||
'insecure': False, 'lvm_snapname': DEFAULT_LVM_SNAPNAME,
|
||||
'lvm_snapperm': 'ro', 'snapshot': False,
|
||||
'max_priority': False, 'max_level': False, 'path_to_backup': False,
|
||||
'encrypt_pass_file': False, 'volume': False, 'proxy': False,
|
||||
'cinder_vol_id': '', 'cindernative_vol_id': '',
|
||||
'nova_inst_id': '',
|
||||
'remove_older_than': None, 'restore_from_date': False,
|
||||
'upload_limit': None, 'always_level': False, 'version': False,
|
||||
'dry_run': False, 'lvm_snapsize': DEFAULT_LVM_SNAPSIZE,
|
||||
'restore_abs_path': False, 'log_file': None, 'log_level': "info",
|
||||
'mode': 'fs', 'action': 'backup', 'shadow': '', 'shadow_path': '',
|
||||
'windows_volume': '', 'command': None, 'metadata_out': False,
|
||||
'storage': 'swift', 'ssh_key': '', 'ssh_username': '', 'ssh_host': '',
|
||||
'ssh_port': 22, 'compression': 'gzip'
|
||||
}
|
||||
|
||||
|
||||
_COMMON = [
|
||||
cfg.StrOpt('action',
|
||||
choices=['backup', 'restore', 'info', 'admin', 'exec'],
|
||||
default='backup',
|
||||
dest='action',
|
||||
help="Set the action to be taken. backup and restore are self "
|
||||
"explanatory, info is used to retrieve info from the "
|
||||
"storage media, exec is used to execute a script, while "
|
||||
"admin is used to delete old backups and other admin "
|
||||
"actions. Default backup."
|
||||
),
|
||||
cfg.StrOpt('path-to-backup',
|
||||
short='F',
|
||||
default=False,
|
||||
dest='path_to_backup',
|
||||
help='The file or directory you want to back up to Swift'
|
||||
),
|
||||
cfg.StrOpt('backup-name',
|
||||
short='N',
|
||||
default=False,
|
||||
dest='backup_name',
|
||||
help="The backup name you want to use to identify your backup "
|
||||
"on Swift"
|
||||
),
|
||||
cfg.StrOpt('mode',
|
||||
short='m',
|
||||
default='fs',
|
||||
dest='mode',
|
||||
help="Set the technology to back from. Options are, fs "
|
||||
"(filesystem),mongo (MongoDB), mysql (MySQL), sqlserver "
|
||||
"(SQL Server) Default set to fs"),
|
||||
cfg.StrOpt('container',
|
||||
short='C',
|
||||
default='freezer_backups',
|
||||
dest='container',
|
||||
help="The Swift container (or path to local storage) used to "
|
||||
"upload files to"),
|
||||
cfg.StrOpt('snapshot',
|
||||
short='s',
|
||||
default=False,
|
||||
dest='snapshot',
|
||||
help="Create a snapshot of the fs containing the resource to "
|
||||
"backup. When used, the lvm parameters will be guessed "
|
||||
"and/or the default values will be used, on windows it "
|
||||
"will invoke vssadmin"),
|
||||
cfg.StrOpt('lvm-auto-snap',
|
||||
default=False,
|
||||
dest='lvm_auto_snap',
|
||||
help="Automatically guess the volume group and volume name for "
|
||||
"given PATH."),
|
||||
cfg.StrOpt('lvm-srcvol',
|
||||
default=False,
|
||||
dest='lvm_srcvol',
|
||||
help="Set the lvm volume you want to take a snaphost from. "
|
||||
"Default no volume"),
|
||||
cfg.StrOpt('lvm-snapname',
|
||||
default=DEFAULT_LVM_SNAPNAME,
|
||||
dest='lvm_snapname',
|
||||
help="Set the lvm snapshot name to use. If the snapshot name "
|
||||
"already exists, the old one will be used a no new one will"
|
||||
" be created. Default {0}.".format(DEFAULT_LVM_SNAPNAME)),
|
||||
cfg.StrOpt('lvm-snap-perm',
|
||||
choices=['ro', 'rw'],
|
||||
default='ro',
|
||||
dest='lvm_snapperm',
|
||||
help="Set the lvm snapshot permission to use. If the permission"
|
||||
" is set to ro The snapshot will be immutable - read only"
|
||||
" -. If the permission is set to rw it will be mutable"),
|
||||
cfg.StrOpt('lvm-snapsize',
|
||||
default=DEFAULT_LVM_SNAPSIZE,
|
||||
dest='lvm_snapsize',
|
||||
help="Set the lvm snapshot size when creating a new snapshot. "
|
||||
"Please add G for Gigabytes or M for Megabytes, i.e. 500M "
|
||||
"or 8G. Default {0}.".format(DEFAULT_LVM_SNAPSIZE)),
|
||||
cfg.StrOpt('lvm-dirmount',
|
||||
default=DEFAULT_LVM_DIRMOUNT,
|
||||
dest='lvm_dirmount',
|
||||
help="Set the directory you want to mount the lvm snapshot to. "
|
||||
"Default to {0}".format(DEFAULT_LVM_DIRMOUNT)),
|
||||
cfg.StrOpt('lvm-volgroup',
|
||||
default=False,
|
||||
dest='lvm_volgroup',
|
||||
help="Specify the volume group of your logical volume. This is "
|
||||
"important to mount your snapshot volume. Default not set"),
|
||||
cfg.IntOpt('max-level',
|
||||
default=0,
|
||||
dest='max_level',
|
||||
help="Set the backup level used with tar to implement "
|
||||
"incremental backup. If a level 1 is specified but no level"
|
||||
" 0 is already available, a level 0 will be done and "
|
||||
"subsequently backs to level 1. Default 0 (No Incremental)"
|
||||
),
|
||||
cfg.IntOpt('always-level',
|
||||
default=False,
|
||||
dest='always_level',
|
||||
help="Set backup maximum level used with tar to implement "
|
||||
"incremental backup. If a level 3 is specified, the backup"
|
||||
" will be executed from level 0 to level 3 and to that "
|
||||
"point always a backup level 3 will be executed. It will "
|
||||
"not restart from level 0. This option has precedence over"
|
||||
" --max-backup-level. Default False (Disabled)"),
|
||||
cfg.FloatOpt('restart-always-level',
|
||||
default=False,
|
||||
dest='restart_always_level',
|
||||
help="Restart the backup from level 0 after n days. Valid only"
|
||||
" if --always-level option if set. If --always-level is "
|
||||
"used together with --remove-older-then, there might be "
|
||||
"the chance where the initial level 0 will be removed. "
|
||||
"Default False (Disabled)"),
|
||||
cfg.FloatOpt('remove-older-than',
|
||||
short='R',
|
||||
default=False,
|
||||
dest='remove_older_than',
|
||||
help="Checks in the specified container for object older than "
|
||||
"the specified days. If i.e. 30 is specified, it will "
|
||||
"remove the remote object older than 30 days. Default "
|
||||
"False (Disabled) The option --remove-older-then is "
|
||||
"deprecated and will be removed soon",
|
||||
deprecated_for_removal=True),
|
||||
cfg.StrOpt('remove-from-date',
|
||||
default=False,
|
||||
dest='remove_from_date',
|
||||
help="Checks the specified container and removes objects older "
|
||||
"than the provided datetime in the form "
|
||||
"'YYYY-MM-DDThh:mm:ss' i.e. '1974-03-25T23:23:23'. "
|
||||
"Make sure the 'T' is between date and time "),
|
||||
cfg.StrOpt('no-incremental',
|
||||
default=False,
|
||||
dest='no_incremental',
|
||||
help="Disable incremental feature. By default freezer build the"
|
||||
" meta data even for level 0 backup. By setting this option"
|
||||
" incremental meta data is not created at all. Default "
|
||||
"disabled"),
|
||||
cfg.StrOpt('hostname',
|
||||
default=False,
|
||||
dest='hostname',
|
||||
deprecated_name='restore-from-host',
|
||||
help="Set hostname to execute actions. If you are executing "
|
||||
"freezer from one host but you want to delete objects "
|
||||
"belonging to another host then you can set this option "
|
||||
"that hostname and execute appropriate actions. Default "
|
||||
"current node hostname."),
|
||||
cfg.StrOpt('mysql-conf',
|
||||
default=False,
|
||||
dest='mysql_conf',
|
||||
help="Set the MySQL configuration file where freezer retrieve "
|
||||
"important information as db_name, user, password, host, "
|
||||
"port. Following is an example of config file: "
|
||||
"# backup_mysql_conf"
|
||||
"host = <db-host>"
|
||||
"user = <mysqluser>"
|
||||
"password = <mysqlpass>"
|
||||
"port = <db-port>"),
|
||||
cfg.StrOpt('metadata-out',
|
||||
default=False,
|
||||
dest='metadata_out',
|
||||
help="Set the filename to which write the metadata regarding the"
|
||||
" backup metrics. Use '-' to output to standard output."),
|
||||
cfg.StrOpt('exclude',
|
||||
default='',
|
||||
dest='exclude',
|
||||
help="Exclude files,given as a PATTERN.Ex: --exclude '*.log' "
|
||||
"will exclude any file with name ending with .log. "
|
||||
"Default no exclude"
|
||||
),
|
||||
cfg.StrOpt('dereference-symlink',
|
||||
dest='dereference_symlink',
|
||||
choices=['none', 'soft', 'hard', 'all'],
|
||||
help="Follow hard and soft links and archive and dump the files"
|
||||
" they refer to. Default False."
|
||||
),
|
||||
cfg.StrOpt('encrypt-pass-file',
|
||||
default=False,
|
||||
dest='encrypt_pass_file',
|
||||
help="Passing a private key to this option, allow you to encrypt"
|
||||
" the files before to be uploaded in Swift. Default do "
|
||||
"not encrypt."
|
||||
),
|
||||
cfg.IntOpt('max-segment-size',
|
||||
short='M',
|
||||
default=33554432,
|
||||
dest='max_segment_size',
|
||||
help="Set the maximum file chunk size in bytes to upload to "
|
||||
"swift Default 33554432 bytes (32MB)"
|
||||
),
|
||||
cfg.StrOpt('restore-abs-path',
|
||||
default=False,
|
||||
dest='restore_abs_path',
|
||||
help="Set the absolute path where you want your data restored. "
|
||||
"Default False."
|
||||
),
|
||||
cfg.StrOpt('restore-from-date',
|
||||
default=None,
|
||||
dest='restore_from_date',
|
||||
help="Set the absolute path where you want your data restored. "
|
||||
"Please provide datetime in format 'YYYY-MM-DDThh:mm:ss' "
|
||||
"i.e. '1979-10-03T23:23:23'. Make sure the 'T' is between "
|
||||
"date and time Default None."
|
||||
),
|
||||
cfg.StrOpt('max-priority',
|
||||
default=False,
|
||||
dest='max_priority',
|
||||
help="Set the cpu process to the highest priority (i.e. -20 on "
|
||||
"Linux) and real-time for I/O. The process priority will be"
|
||||
" set only if nice and ionice are installed Default "
|
||||
"disabled. Use with caution."
|
||||
),
|
||||
cfg.StrOpt('quiet',
|
||||
short='q',
|
||||
default=False,
|
||||
dest='quiet',
|
||||
help="Suppress error messages"
|
||||
),
|
||||
cfg.StrOpt('insecure',
|
||||
default=False,
|
||||
dest='insecure',
|
||||
help="Allow to access swift servers without checking SSL certs."
|
||||
),
|
||||
cfg.StrOpt('os-identity-api-version',
|
||||
default=None,
|
||||
deprecated_name='os-auth-ver',
|
||||
dest='os_identity_api_version',
|
||||
choices=['1', '2', '2.0', '3'],
|
||||
help="Openstack identity api version, can be 1, 2, 2.0 or 3"
|
||||
),
|
||||
cfg.StrOpt('proxy',
|
||||
default=False,
|
||||
dest='proxy',
|
||||
help="Enforce proxy that alters system HTTP_PROXY and "
|
||||
"HTTPS_PROXY, use \'\' to eliminate all system proxies"
|
||||
),
|
||||
cfg.StrOpt('dry-run',
|
||||
default=False,
|
||||
dest='dry_run',
|
||||
help="Do everything except writing or removing objects"
|
||||
),
|
||||
cfg.IntOpt('upload-limit',
|
||||
default=-1,
|
||||
dest='upload_limit',
|
||||
help="Upload bandwidth limit in Bytes per sec. Can be invoked with "
|
||||
"dimensions (10K, 120M, 10G)."
|
||||
),
|
||||
cfg.IntOpt('download-limit',
|
||||
default=-1,
|
||||
dest='download_limit',
|
||||
help="Download bandwidth limit in Bytes per sec. Can be invoked "
|
||||
" with dimensions (10K, 120M, 10G)."),
|
||||
cfg.StrOpt('cinder-vol-id',
|
||||
default='',
|
||||
dest='cinder_vol_id',
|
||||
help="Id of cinder volume for backup"
|
||||
),
|
||||
cfg.StrOpt('cindernative-vol-id',
|
||||
default='',
|
||||
dest='cindernative_vol_id',
|
||||
help="Id of cinder volume for native backup"
|
||||
),
|
||||
cfg.StrOpt('nova-inst-id',
|
||||
default='',
|
||||
dest='nova_inst_id',
|
||||
help="Id of nova instance for backup"
|
||||
),
|
||||
cfg.StrOpt('sql-server-conf',
|
||||
default=False,
|
||||
dest='sql_server_conf',
|
||||
help="Set the SQL Server configuration file where freezer "
|
||||
"retrieve the sql server instance. Following is an example"
|
||||
" of config file: instance = <db-instance>"
|
||||
),
|
||||
cfg.StrOpt('command',
|
||||
default=None,
|
||||
dest='command',
|
||||
help="Command executed by exec action"
|
||||
),
|
||||
cfg.StrOpt('compression',
|
||||
default='gzip',
|
||||
dest='compression',
|
||||
choices=['gzip', 'bzip2', 'xz'],
|
||||
help="compression algorithm to use. gzip is default algorithm"
|
||||
),
|
||||
cfg.StrOpt('storage',
|
||||
default='swift',
|
||||
dest='storage',
|
||||
choices=['local', 'swift', 'ssh'],
|
||||
help="Storage for backups. Can be Swift or Local now. Swift is "
|
||||
"default storage now. Local stores backups on the same "
|
||||
"defined path and swift will store files in container."
|
||||
),
|
||||
cfg.StrOpt('ssh-key',
|
||||
default=DEFAULT_PARAMS['ssh_key'],
|
||||
dest='ssh_key',
|
||||
help="Path to ssh-key for ssh storage only"
|
||||
),
|
||||
cfg.StrOpt('ssh-username',
|
||||
default=DEFAULT_PARAMS['ssh_username'],
|
||||
dest='ssh_username',
|
||||
help="Remote username for ssh storage only"
|
||||
),
|
||||
cfg.StrOpt('ssh-host',
|
||||
default=DEFAULT_PARAMS['ssh_host'],
|
||||
dest='ssh_host',
|
||||
help="Remote host for ssh storage only"
|
||||
),
|
||||
cfg.IntOpt('ssh-port',
|
||||
default=DEFAULT_PARAMS['ssh_port'],
|
||||
dest='ssh_port',
|
||||
help="Remote port for ssh storage only (default 22)"
|
||||
),
|
||||
cfg.StrOpt('config',
|
||||
default='',
|
||||
dest='config',
|
||||
help="Config file abs path. Option arguments are provided from "
|
||||
"config file. When config file is used any option from "
|
||||
"command line provided take precedence."
|
||||
)
|
||||
|
||||
]
|
||||
|
||||
|
||||
def config():
|
||||
CONF.register_opts(_COMMON)
|
||||
CONF.register_cli_opts(_COMMON)
|
||||
default_conf = None
|
||||
log.register_options(CONF)
|
||||
CONF(args=sys.argv[1:],
|
||||
project='freezer',
|
||||
default_config_files=default_conf,
|
||||
version=FREEZER_VERSION)
|
||||
|
||||
|
||||
def setup_logging():
|
||||
"""Set some oslo log defaults."""
|
||||
_DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN',
|
||||
'qpid=WARN', 'stevedore=WARN', 'oslo_log=INFO',
|
||||
'iso8601=WARN',
|
||||
'requests.packages.urllib3.connectionpool=WARN',
|
||||
'urllib3.connectionpool=WARN', 'websocket=WARN',
|
||||
'keystonemiddleware=WARN', 'freezer=INFO']
|
||||
|
||||
_DEFAULT_LOGGING_CONTEXT_FORMAT = (
|
||||
'%(asctime)s.%(msecs)03d %(process)d '
|
||||
'%(levelname)s %(name)s [%(request_id)s '
|
||||
'%(user_identity)s] %(instance)s'
|
||||
'%(message)s')
|
||||
log.set_defaults(_DEFAULT_LOGGING_CONTEXT_FORMAT, _DEFAULT_LOG_LEVELS)
|
||||
log.setup(CONF, 'freezer', version=FREEZER_VERSION)
|
||||
|
||||
|
||||
def get_backup_args():
|
||||
defaults = DEFAULT_PARAMS.copy()
|
||||
|
||||
class FreezerConfig(object):
|
||||
def __init__(self, args):
|
||||
self.__dict__.update(args)
|
||||
|
||||
conf = None
|
||||
if CONF.get('config'):
|
||||
conf = freezer_config.Config.parse(CONF.get('config'))
|
||||
defaults.update(conf.default)
|
||||
# TODO: restore_from_host is deprecated and to be removed
|
||||
defaults['hostname'] = conf.default.get('hostname') or \
|
||||
conf.default.get('restore_from_host')
|
||||
|
||||
# override default oslo values
|
||||
levels = {
|
||||
'all': log.NOTSET,
|
||||
'debug': log.DEBUG,
|
||||
'warn': log.WARN,
|
||||
'info': log.INFO,
|
||||
'error': log.ERROR,
|
||||
'critical': log.CRITICAL
|
||||
}
|
||||
CONF.set_override('log_file', levels.get(defaults['log_file'],
|
||||
log.NOTSET))
|
||||
CONF.set_override('default_log_levels', defaults['log_level'])
|
||||
else:
|
||||
cli_opts = dict([(x,y) for x, y in CONF._namespace._cli.iteritems()
|
||||
if y is not None])
|
||||
defaults.update(cli_opts)
|
||||
|
||||
if not CONF.get('log_file'):
|
||||
log_file = None
|
||||
for file_name in ['/var/log/freezer.log', '~/.freezer/freezer.log']:
|
||||
try:
|
||||
log_file = prepare_logging(file_name)
|
||||
except IOError:
|
||||
pass
|
||||
if log_file:
|
||||
CONF.set_override('log_file', log_file)
|
||||
else:
|
||||
LOG.warn("log file cannot be created. Freezer will proceed with "
|
||||
"default stdout and stderr")
|
||||
|
||||
backup_args = FreezerConfig(defaults)
|
||||
# Set default working directory to ~/.freezer. If the directory
|
||||
# does not exists it is created
|
||||
work_dir = os.path.join(home, '.freezer')
|
||||
backup_args.__dict__['work_dir'] = work_dir
|
||||
if not os.path.exists(work_dir):
|
||||
try:
|
||||
os.makedirs(work_dir)
|
||||
except (OSError, IOError) as err_msg:
|
||||
# This avoids freezer-agent to crash if it can't write to
|
||||
# ~/.freezer, which may happen on some env (for me,
|
||||
# it happens in Jenkins, as freezer-agent can't write to
|
||||
# /var/lib/jenkins).
|
||||
print(encodeutils.safe_decode('{}'.format(err_msg)),
|
||||
file=sys.stderr)
|
||||
|
||||
# If hostname is not set, hostname of the current node will be used
|
||||
if not backup_args.hostname:
|
||||
backup_args.__dict__['hostname'] = socket.gethostname()
|
||||
|
||||
# If we have provided --proxy then overwrite the system HTTP_PROXY and
|
||||
# HTTPS_PROXY
|
||||
if backup_args.proxy:
|
||||
utils.alter_proxy(backup_args.proxy)
|
||||
|
||||
# MySQLdb object
|
||||
backup_args.__dict__['mysql_db_inst'] = ''
|
||||
backup_args.__dict__['storages'] = None
|
||||
if conf and conf.storages:
|
||||
backup_args.__dict__['storages'] = conf.storages
|
||||
|
||||
# Windows volume
|
||||
backup_args.__dict__['shadow'] = ''
|
||||
backup_args.__dict__['shadow_path'] = ''
|
||||
backup_args.__dict__['file_name'] = ''
|
||||
if winutils.is_windows():
|
||||
if backup_args.path_to_backup:
|
||||
backup_args.__dict__['windows_volume'] = \
|
||||
backup_args.path_to_backup[:3]
|
||||
|
||||
# todo(enugaev) move it to new command line param backup_media
|
||||
backup_media = 'fs'
|
||||
if backup_args.cinder_vol_id:
|
||||
backup_media = 'cinder'
|
||||
elif backup_args.cindernative_vol_id:
|
||||
backup_media = 'cindernative'
|
||||
elif backup_args.nova_inst_id:
|
||||
backup_media = 'nova'
|
||||
|
||||
backup_args.__dict__['backup_media'] = backup_media
|
||||
|
||||
backup_args.__dict__['time_stamp'] = None
|
||||
|
||||
if backup_args.upload_limit or backup_args.download_limit and not\
|
||||
winutils.is_windows():
|
||||
if backup_args.config:
|
||||
conf_file = NamedTemporaryFile(prefix='freezer_job_', delete=False)
|
||||
defaults['upload_limit'] = defaults['download_limit'] = -1
|
||||
utils.save_config_to_file(defaults, conf_file)
|
||||
conf_index = sys.argv.index('--config') + 1
|
||||
sys.argv[conf_index] = conf_file.name
|
||||
|
||||
if '--upload-limit' in sys.argv:
|
||||
index = sys.argv.index('--upload-limit')
|
||||
sys.argv.pop(index)
|
||||
sys.argv.pop(index)
|
||||
if '--download-limit' in sys.argv:
|
||||
index = sys.argv.index('--download-limit')
|
||||
sys.argv.pop(index)
|
||||
sys.argv.pop(index)
|
||||
|
||||
trickle_executable = distspawn.find_executable('trickle')
|
||||
if trickle_executable is None:
|
||||
trickle_executable = distspawn.find_executable(
|
||||
'trickle', path=":".join(sys.path))
|
||||
if trickle_executable is None:
|
||||
trickle_executable = distspawn.find_executable(
|
||||
'trickle', path=":".join(os.environ.get('PATH')))
|
||||
|
||||
trickle_lib = distspawn.find_executable('trickle-overload.so')
|
||||
if trickle_lib is None:
|
||||
trickle_lib = distspawn.find_executable(
|
||||
'trickle-overload.so', path=":".join(sys.path))
|
||||
if trickle_lib is None:
|
||||
trickle_lib = distspawn.find_executable(
|
||||
'trickle-overload.so', path=":".join(
|
||||
os.environ.get('PATH')))
|
||||
if trickle_executable and trickle_lib:
|
||||
LOG.info("[*] Info: Starting trickle ...")
|
||||
os.environ['LD_PRELOAD'] = trickle_lib
|
||||
trickle_command = '{0} -d {1} -u {2} '.\
|
||||
format(trickle_executable,
|
||||
getattr(backup_args, 'download_limit') or -1,
|
||||
getattr(backup_args, 'upload_limit') or -1)
|
||||
backup_args.__dict__['trickle_command'] = trickle_command
|
||||
if "tricklecount" in os.environ:
|
||||
tricklecount = int(os.environ.get("tricklecount", 1))
|
||||
tricklecount += 1
|
||||
os.environ["tricklecount"] = str(tricklecount)
|
||||
|
||||
else:
|
||||
os.environ["tricklecount"] = str(1)
|
||||
else:
|
||||
LOG.critical("[*] Trickle or Trickle library not found. Switching "
|
||||
"to normal mode without limiting bandwidth")
|
||||
return backup_args
|
||||
|
||||
|
||||
def prepare_logging(log_file='~/.freezer/freezer.log'):
|
||||
"""
|
||||
Creates log directory and log file if no log files provided
|
||||
:return:
|
||||
"""
|
||||
expanded_file_name = os.path.expanduser(log_file)
|
||||
expanded_dir_name = os.path.dirname(expanded_file_name)
|
||||
utils.create_dir(expanded_dir_name, do_log=False)
|
||||
return expanded_file_name
|
||||
|
||||
|
||||
def list_opts():
|
||||
_OPTS = {
|
||||
None: _COMMON
|
||||
}
|
||||
|
||||
return _OPTS.items()
|
@ -20,6 +20,7 @@ import re
|
||||
from six.moves import configparser
|
||||
from six.moves import cStringIO
|
||||
|
||||
|
||||
class Config:
|
||||
|
||||
@staticmethod
|
||||
|
@ -15,12 +15,16 @@ limitations under the License.
|
||||
|
||||
Freezer general utils functions
|
||||
"""
|
||||
import logging
|
||||
import multiprocessing
|
||||
import time
|
||||
|
||||
from freezer import streaming
|
||||
from freezer import utils
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
|
||||
CONF = cfg.CONF
|
||||
logging = log.getLogger(__name__)
|
||||
|
||||
|
||||
class BackupEngine(object):
|
||||
|
@ -24,7 +24,11 @@ from freezer import exec_cmd
|
||||
from freezer import restore
|
||||
from freezer import utils
|
||||
|
||||
import logging
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
|
||||
CONF = cfg.CONF
|
||||
logging = log.getLogger(__name__)
|
||||
|
||||
|
||||
class Job:
|
||||
|
@ -16,13 +16,14 @@ limitations under the License.
|
||||
Freezer main execution function
|
||||
"""
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from freezer import arguments
|
||||
from freezer import bandwidth
|
||||
from freezer.common import config as freezer_config
|
||||
from freezer import config
|
||||
from freezer.engine.tar import tar_engine
|
||||
from freezer import job
|
||||
@ -35,55 +36,18 @@ from freezer import utils
|
||||
from freezer import validator
|
||||
from freezer import winutils
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
def freezer_main(backup_args):
|
||||
"""Freezer main loop for job execution.
|
||||
"""
|
||||
|
||||
def configure_log_file_using_defaults():
|
||||
""" Configure log file for freezer """
|
||||
|
||||
dry_run_message = ''
|
||||
if backup_args.dry_run:
|
||||
dry_run_message = '[DRY_RUN] '
|
||||
|
||||
def configure_logging(log_file, str_level):
|
||||
levels = {
|
||||
'all': logging.NOTSET,
|
||||
'debug': logging.DEBUG,
|
||||
'warn': logging.WARN,
|
||||
'info': logging.INFO,
|
||||
'error': logging.ERROR,
|
||||
'critical': logging.CRITICAL
|
||||
}
|
||||
|
||||
expanded_file_name = os.path.expanduser(log_file)
|
||||
expanded_dir_name = os.path.dirname(expanded_file_name)
|
||||
utils.create_dir(expanded_dir_name, do_log=False)
|
||||
logging.basicConfig(
|
||||
filename=expanded_file_name,
|
||||
level=levels[str_level],
|
||||
format=('%(asctime)s %(name)s %(levelname)s {0}%(message)s'.
|
||||
format(dry_run_message)))
|
||||
return expanded_file_name
|
||||
|
||||
if backup_args.log_file:
|
||||
return configure_logging(backup_args.log_file,
|
||||
backup_args.log_level)
|
||||
|
||||
for file_name in ['/var/log/freezer.log', '~/.freezer/freezer.log']:
|
||||
try:
|
||||
return configure_logging(file_name, backup_args.log_level)
|
||||
except IOError:
|
||||
pass
|
||||
|
||||
raise Exception("Unable to write to log file")
|
||||
|
||||
def set_max_process_priority():
|
||||
""" Set freezer in max priority on the os """
|
||||
# children processes inherit niceness from father
|
||||
try:
|
||||
logging.warning(
|
||||
LOG.warning(
|
||||
'[*] Setting freezer execution with high CPU and I/O priority')
|
||||
PID = os.getpid()
|
||||
# Set cpu priority
|
||||
@ -95,15 +59,10 @@ def freezer_main(backup_args):
|
||||
u'-p', u'{0}'.format(PID)
|
||||
])
|
||||
except Exception as priority_error:
|
||||
logging.warning('[*] Priority: {0}'.format(priority_error))
|
||||
|
||||
try:
|
||||
log_file_name = configure_log_file_using_defaults()
|
||||
except Exception as err:
|
||||
fail(1, err, quiet=backup_args.quiet, do_log=False)
|
||||
LOG.warning('[*] Priority: {0}'.format(priority_error))
|
||||
|
||||
if not backup_args.quiet:
|
||||
logging.info('log file at {0}'.format(log_file_name))
|
||||
LOG.info('log file at {0}'.format(CONF.get('log_file')))
|
||||
|
||||
if backup_args.max_priority:
|
||||
set_max_process_priority()
|
||||
@ -139,8 +98,8 @@ def freezer_main(backup_args):
|
||||
if hasattr(backup_args, 'trickle_command'):
|
||||
if "tricklecount" in os.environ:
|
||||
if int(os.environ.get("tricklecount")) > 1:
|
||||
logging.critical("[*] Trickle seems to be not working,"
|
||||
" Switching to normal mode ")
|
||||
LOG.critical("[*] Trickle seems to be not working, Switching "
|
||||
"to normal mode ")
|
||||
run_job(backup_args, storage)
|
||||
|
||||
freezer_command = '{0} {1}'.format(backup_args.trickle_command,
|
||||
@ -154,8 +113,8 @@ def freezer_main(backup_args):
|
||||
output, error = process.communicate()
|
||||
|
||||
if process.returncode:
|
||||
logging.error("[*] Trickle Error: {0}".format(error))
|
||||
logging.critical("[*] Switching to work without trickle ...")
|
||||
LOG.error("[*] Trickle Error: {0}".format(error))
|
||||
LOG.critical("[*] Switching to work without trickle ...")
|
||||
run_job(backup_args, storage)
|
||||
|
||||
else:
|
||||
@ -178,7 +137,7 @@ def fail(exit_code, e, quiet, do_log=True):
|
||||
if not quiet:
|
||||
sys.stderr.write(msg)
|
||||
if do_log:
|
||||
logging.critical(msg)
|
||||
LOG.critical(msg)
|
||||
return exit_code
|
||||
|
||||
|
||||
@ -216,25 +175,21 @@ def storage_from_dict(backup_args, work_dir, max_segment_size,
|
||||
backup_args['ssh_key'], backup_args['ssh_username'],
|
||||
backup_args['ssh_host'], int(backup_args.get('ssh_port', 22)))
|
||||
else:
|
||||
raise Exception("Not storage found for name " + backup_args['storage'])
|
||||
raise Exception("Not storage found for name {0}".format(
|
||||
backup_args['storage']))
|
||||
return storage
|
||||
|
||||
|
||||
def main():
|
||||
"""Freezerc binary main execution"""
|
||||
|
||||
(backup_args, opt_args) = arguments.backup_arguments()
|
||||
"""freezer-agent/freezerc binary main execution"""
|
||||
freezer_config.config()
|
||||
freezer_config.setup_logging()
|
||||
backup_args = freezer_config.get_backup_args()
|
||||
if len(sys.argv) < 2:
|
||||
CONF.print_help()
|
||||
sys.exit(1)
|
||||
try:
|
||||
if backup_args.version:
|
||||
print("freezer version {0}".format(backup_args.__version__))
|
||||
sys.exit(1)
|
||||
|
||||
if len(sys.argv) < 2:
|
||||
opt_args.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
freezer_main(backup_args)
|
||||
|
||||
except ValueError as err:
|
||||
return fail(1, err, backup_args.quiet)
|
||||
except ImportError as err:
|
||||
@ -242,6 +197,3 @@ def main():
|
||||
except Exception as err:
|
||||
return fail(1, err, backup_args.quiet)
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
sys.exit(main())
|
||||
|
@ -12,8 +12,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import logging
|
||||
import time
|
||||
|
||||
from cinderclient import client as cclient
|
||||