Fix tox.ini file

Remove select line in tox.ini.
Currently checks are not being run.
Run pep8 checks.
Fix code to pass pep8 checks.

Change-Id: Iacef4386b1214f95b1bd106c17a5f714f948a1a1
Closes-Bug: #1593452
This commit is contained in:
Deklan Dieterly 2016-06-16 16:09:48 -06:00
parent 3cdfd8fba3
commit 694c98e063
26 changed files with 169 additions and 153 deletions

View File

@ -112,9 +112,10 @@ def build_os_options():
dest='os_region_name'), dest='os_region_name'),
cfg.StrOpt('os-token', cfg.StrOpt('os-token',
default=env('OS_TOKEN'), default=env('OS_TOKEN'),
help='Specify an existing token to use instead of retrieving' help='Specify an existing token to use instead of '
' one via authentication (e.g. with username & ' 'retrieving one via authentication '
'password). Defaults to env[OS_TOKEN].', '(e.g. with username & password). Defaults '
'to env[OS_TOKEN].',
dest='os_token'), dest='os_token'),
cfg.StrOpt('os-identity-api-version', cfg.StrOpt('os-identity-api-version',
default=env('OS_IDENTITY_API_VERSION'), default=env('OS_IDENTITY_API_VERSION'),
@ -243,12 +244,11 @@ class Client(object):
self.backups = backups.BackupsManager(self, verify=self.verify) self.backups = backups.BackupsManager(self, verify=self.verify)
self.registration = registration.RegistrationManager( self.registration = registration.RegistrationManager(
self, verify=self.verify) self, verify=self.verify)
self.jobs = jobs.JobManager(self, verify=self.verify) self.jobs = jobs.JobManager(self, verify=self.verify)
self.actions = actions.ActionManager(self, verify=self.verify) self.actions = actions.ActionManager(self, verify=self.verify)
self.sessions = sessions.SessionManager(self, verify=self.verify) self.sessions = sessions.SessionManager(self, verify=self.verify)
@cached_property @cached_property
def session(self): def session(self):
if self._session: if self._session:

View File

@ -127,7 +127,7 @@ class SessionManager(object):
doc = {"start": { doc = {"start": {
"job_id": job_id, "job_id": job_id,
"current_tag": session_tag "current_tag": session_tag
}} }}
r = requests.post(endpoint, r = requests.post(endpoint,
headers=self.headers, headers=self.headers,
data=json.dumps(doc), data=json.dumps(doc),
@ -153,7 +153,7 @@ class SessionManager(object):
"job_id": job_id, "job_id": job_id,
"current_tag": session_tag, "current_tag": session_tag,
"result": result "result": result
}} }}
r = requests.post(endpoint, r = requests.post(endpoint,
headers=self.headers, headers=self.headers,
data=json.dumps(doc), data=json.dumps(doc),

View File

@ -39,11 +39,11 @@ DEFAULT_LVM_SNAP_BASENAME = 'freezer_backup_snap'
DEFAULT_SSH_PORT = 22 DEFAULT_SSH_PORT = 22
_DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN', _DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN',
'qpid=WARN', 'stevedore=WARN', 'oslo_log=INFO', 'qpid=WARN', 'stevedore=WARN', 'oslo_log=INFO',
'iso8601=WARN', 'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN', 'requests.packages.urllib3.connectionpool=WARN',
'urllib3.connectionpool=WARN', 'websocket=WARN', 'urllib3.connectionpool=WARN', 'websocket=WARN',
'keystonemiddleware=WARN', 'freezer=INFO'] 'keystonemiddleware=WARN', 'freezer=INFO']
_DEFAULT_LOGGING_CONTEXT_FORMAT = ( _DEFAULT_LOGGING_CONTEXT_FORMAT = (
'%(asctime)s.%(msecs)03d %(process)d ' '%(asctime)s.%(msecs)03d %(process)d '
@ -142,26 +142,28 @@ _COMMON = [
" -. If the permission is set to rw it will be mutable"), " -. If the permission is set to rw it will be mutable"),
cfg.StrOpt('lvm-snapsize', cfg.StrOpt('lvm-snapsize',
dest='lvm_snapsize', dest='lvm_snapsize',
help="Set the lvm snapshot size when creating a new snapshot. " help="Set the lvm snapshot size when creating a new "
"Please add G for Gigabytes or M for Megabytes, i.e. 500M " "snapshot. Please add G for Gigabytes or "
"or 8G. It is also possible to use percentages as with the" "M for Megabytes, i.e. 500M or 8G. It is also possible "
" -l option of lvm, i.e. 80%%FREE Default {0}.".format( "to use percentages as with the -l option of lvm, i.e. "
DEFAULT_LVM_SNAPSIZE)), "80%%FREE Default {0}.".format(DEFAULT_LVM_SNAPSIZE)),
cfg.StrOpt('lvm-dirmount', cfg.StrOpt('lvm-dirmount',
dest='lvm_dirmount', dest='lvm_dirmount',
help="Set the directory you want to mount the lvm snapshot to. " help="Set the directory you want to mount the lvm snapshot to. "
"If not provided, a unique name will be generated with the" "If not provided, a unique name will be generated with "
"basename {0} ".format(DEFAULT_LVM_MOUNT_BASENAME)), "thebasename {0} ".format(DEFAULT_LVM_MOUNT_BASENAME)),
cfg.StrOpt('lvm-volgroup', cfg.StrOpt('lvm-volgroup',
dest='lvm_volgroup', dest='lvm_volgroup',
help="Specify the volume group of your logical volume. This is " help="Specify the volume group of your logical volume. This is "
"important to mount your snapshot volume. Default not set"), "important to mount your snapshot volume. Default not "
"set"),
cfg.IntOpt('max-level', cfg.IntOpt('max-level',
dest='max_level', dest='max_level',
help="Set the backup level used with tar to implement " help="Set the backup level used with tar to implement "
"incremental backup. If a level 1 is specified but no level" "incremental backup. If a level 1 is specified but "
" 0 is already available, a level 0 will be done and " "no level 0 is already available, a level 0 will be "
"subsequently backs to level 1. Default 0 (No Incremental)" "done and subsequently backs to level 1. "
"Default 0 (No Incremental)"
), ),
cfg.IntOpt('always-level', cfg.IntOpt('always-level',
dest='always_level', dest='always_level',
@ -173,20 +175,21 @@ _COMMON = [
" --max-backup-level. Default False (Disabled)"), " --max-backup-level. Default False (Disabled)"),
cfg.FloatOpt('restart-always-level', cfg.FloatOpt('restart-always-level',
dest='restart_always_level', dest='restart_always_level',
help="Restart the backup from level 0 after n days. Valid only" help="Restart the backup from level 0 after n days. Valid "
" if --always-level option if set. If --always-level is " "only if --always-level option if set. If "
"used together with --remove-older-then, there might be " "--always-level is used together with "
"--remove-older-then, there might be "
"the chance where the initial level 0 will be removed. " "the chance where the initial level 0 will be removed. "
"Default False (Disabled)"), "Default False (Disabled)"),
cfg.FloatOpt('remove-older-than', cfg.FloatOpt('remove-older-than',
short='R', short='R',
dest='remove_older_than', dest='remove_older_than',
help="Checks in the specified container for object older than " help="Checks in the specified container for object older "
"the specified days. If i.e. 30 is specified, it will " "than the specified days. If i.e. 30 is specified, it "
"remove the remote object older than 30 days. Default " "will remove the remote object older than 30 days. "
"False (Disabled) The option --remove-older-then is " "Default False (Disabled) The option "
"deprecated and will be removed soon", "--remove-older-then is deprecated and will be removed "
deprecated_for_removal=True), "soon", deprecated_for_removal=True),
cfg.StrOpt('remove-from-date', cfg.StrOpt('remove-from-date',
dest='remove_from_date', dest='remove_from_date',
help="Checks the specified container and removes objects older " help="Checks the specified container and removes objects older "
@ -196,9 +199,9 @@ _COMMON = [
cfg.StrOpt('no-incremental', cfg.StrOpt('no-incremental',
dest='no_incremental', dest='no_incremental',
help="Disable incremental feature. By default freezer build the" help="Disable incremental feature. By default freezer build the"
" meta data even for level 0 backup. By setting this option" " meta data even for level 0 backup. By setting this "
" incremental meta data is not created at all. Default " "option incremental meta data is not created at all. "
"disabled"), "Default disabled"),
cfg.StrOpt('hostname', cfg.StrOpt('hostname',
dest='hostname', dest='hostname',
deprecated_name='restore-from-host', deprecated_name='restore-from-host',
@ -219,8 +222,9 @@ _COMMON = [
"port = <db-port>"), "port = <db-port>"),
cfg.StrOpt('metadata-out', cfg.StrOpt('metadata-out',
dest='metadata_out', dest='metadata_out',
help="Set the filename to which write the metadata regarding the" help="Set the filename to which write the metadata "
" backup metrics. Use '-' to output to standard output."), "regarding the backup metrics. Use '-' to output to "
"standard output."),
cfg.StrOpt('exclude', cfg.StrOpt('exclude',
dest='exclude', dest='exclude',
help="Exclude files,given as a PATTERN.Ex: --exclude '*.log' " help="Exclude files,given as a PATTERN.Ex: --exclude '*.log' "
@ -235,9 +239,9 @@ _COMMON = [
), ),
cfg.StrOpt('encrypt-pass-file', cfg.StrOpt('encrypt-pass-file',
dest='encrypt_pass_file', dest='encrypt_pass_file',
help="Passing a private key to this option, allow you to encrypt" help="Passing a private key to this option, allow you "
" the files before to be uploaded in Swift. Default do " "to encrypt the files before to be uploaded in Swift. "
"not encrypt." "Default do not encrypt."
), ),
cfg.IntOpt('max-segment-size', cfg.IntOpt('max-segment-size',
short='M', short='M',
@ -252,9 +256,9 @@ _COMMON = [
), ),
cfg.StrOpt('restore-from-date', cfg.StrOpt('restore-from-date',
dest='restore_from_date', dest='restore_from_date',
help="Set the date of the backup from which you want to restore." help="Set the date of the backup from which you want to "
"This will select the most recent backup previous to " "restore.This will select the most recent backup "
"the specified date (included). Example: " "previous to the specified date (included). Example: "
"if the last backup was created at '2016-03-22T14:29:01' " "if the last backup was created at '2016-03-22T14:29:01' "
"and restore-from-date is set to '2016-03-22T14:29:01', " "and restore-from-date is set to '2016-03-22T14:29:01', "
"the backup will be restored successfully. The same for " "the backup will be restored successfully. The same for "
@ -269,9 +273,9 @@ _COMMON = [
cfg.StrOpt('max-priority', cfg.StrOpt('max-priority',
dest='max_priority', dest='max_priority',
help="Set the cpu process to the highest priority (i.e. -20 on " help="Set the cpu process to the highest priority (i.e. -20 on "
"Linux) and real-time for I/O. The process priority will be" "Linux) and real-time for I/O. The process priority "
" set only if nice and ionice are installed Default " "will be set only if nice and ionice are installed "
"disabled. Use with caution." "Default disabled. Use with caution."
), ),
cfg.BoolOpt('quiet', cfg.BoolOpt('quiet',
short='q', short='q',
@ -303,8 +307,8 @@ _COMMON = [
"Can be invoked with dimensions (10K, 120M, 10G)."), "Can be invoked with dimensions (10K, 120M, 10G)."),
cfg.IntOpt('download-limit', cfg.IntOpt('download-limit',
dest='download_limit', dest='download_limit',
help="Download bandwidth limit in Bytes per sec. Can be invoked " help="Download bandwidth limit in Bytes per sec. Can be "
" with dimensions (10K, 120M, 10G)."), "invoked with dimensions (10K, 120M, 10G)."),
cfg.StrOpt('cinder-vol-id', cfg.StrOpt('cinder-vol-id',
dest='cinder_vol_id', dest='cinder_vol_id',
help="Id of cinder volume for backup" help="Id of cinder volume for backup"
@ -367,8 +371,8 @@ _COMMON = [
cfg.BoolOpt('consistency_check', cfg.BoolOpt('consistency_check',
dest='consistency_check', dest='consistency_check',
help="When true will checksum the files before backup. " help="When true will checksum the files before backup. "
"The commuted backup checksum is stored as bakcup metadata" "The commuted backup checksum is stored as backup "
" and can be retrieved through the freezer-api. " "metadata and can be retrieved through the freezer-api. "
"On restore it is possible to check for consistency. " "On restore it is possible to check for consistency. "
"Please note this option is currently only available " "Please note this option is currently only available "
"for file system backups. " "for file system backups. "
@ -419,9 +423,9 @@ def get_backup_args():
if CONF.get('config'): if CONF.get('config'):
conf = freezer_config.Config.parse(CONF.get('config')) conf = freezer_config.Config.parse(CONF.get('config'))
defaults.update(conf.default) defaults.update(conf.default)
# TODO: restore_from_host is deprecated and to be removed # TODO(ANONYMOUS): restore_from_host is deprecated and to be removed
defaults['hostname'] = conf.default.get('hostname') or \ defaults['hostname'] = (conf.default.get('hostname') or
conf.default.get('restore_from_host') conf.default.get('restore_from_host'))
# override default oslo values # override default oslo values
levels = { levels = {
'all': log.NOTSET, 'all': log.NOTSET,
@ -434,7 +438,7 @@ def get_backup_args():
if defaults['log_file']: if defaults['log_file']:
CONF.set_override('log_file', defaults['log_file'], levels.get( CONF.set_override('log_file', defaults['log_file'], levels.get(
log.NOTSET)) log.NOTSET))
CONF.set_override('default_log_levels', _DEFAULT_LOG_LEVELS) CONF.set_override('default_log_levels', _DEFAULT_LOG_LEVELS)
@ -495,7 +499,7 @@ def get_backup_args():
backup_args.__dict__['windows_volume'] = \ backup_args.__dict__['windows_volume'] = \
backup_args.path_to_backup[:3] backup_args.path_to_backup[:3]
# todo(enugaev) move it to new command line param backup_media # TODO(enugaev): move it to new command line param backup_media
if backup_args.lvm_auto_snap: if backup_args.lvm_auto_snap:
raise Exception('lvm-auto-snap is deprecated. ' raise Exception('lvm-auto-snap is deprecated. '

View File

@ -18,7 +18,7 @@ Freezer general utils functions
import abc import abc
import multiprocessing import multiprocessing
from multiprocessing.queues import SimpleQueue from multiprocessing.queues import SimpleQueue
import six import six
# PyCharm will not recognize queue. Puts red squiggle line under it. That's OK. # PyCharm will not recognize queue. Puts red squiggle line under it. That's OK.
from six.moves import queue from six.moves import queue
@ -39,8 +39,8 @@ logging = log.getLogger(__name__)
class BackupEngine(object): class BackupEngine(object):
""" """
The main part of making a backup and making a restore is the mechanism of The main part of making a backup and making a restore is the mechanism of
implementing it. For a long time Freezer had only one mechanism of doing it - implementing it. For a long time Freezer had only one mechanism of
invoking gnutar and it was heavy hard-coded. doing it - invoking gnutar and it was heavy hard-coded.
Currently we are going to support many different approaches. Currently we are going to support many different approaches.
One of them is rsync. Having many different implementations requires to One of them is rsync. Having many different implementations requires to
@ -122,10 +122,10 @@ class BackupEngine(object):
return False return False
got_exception = None got_exception = None
got_exception = (handle_except_queue(read_except_queue) got_exception = (handle_except_queue(read_except_queue) or
or got_exception) got_exception)
got_exception = (handle_except_queue(write_except_queue) got_exception = (handle_except_queue(write_except_queue) or
or got_exception) got_exception)
if (got_exception): if (got_exception):
raise EngineException("Engine error. Failed to backup.") raise EngineException("Engine error. Failed to backup.")
@ -220,10 +220,10 @@ class BackupEngine(object):
return False return False
got_exception = None got_exception = None
got_exception = (handle_except_SimpleQueue(read_except_queue) got_exception = (handle_except_SimpleQueue(read_except_queue) or
or got_exception) got_exception)
got_exception = (handle_except_SimpleQueue(write_except_queue) got_exception = (handle_except_SimpleQueue(write_except_queue) or
or got_exception) got_exception)
if tar_stream.exitcode or got_exception: if tar_stream.exitcode or got_exception:
raise EngineException("Engine error. Failed to restore file.") raise EngineException("Engine error. Failed to restore file.")

View File

@ -12,7 +12,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
class EngineException(Exception): class EngineException(Exception):
def __init__(self, message): def __init__(self, message):
super(EngineException, self).__init__(message) super(EngineException, self).__init__(message)

View File

@ -18,7 +18,7 @@ Freezer Tar related functions
from freezer.utils import utils from freezer.utils import utils
class TarCommandBuilder: class TarCommandBuilder(object):
""" """
Building a tar cmd command. To build command invoke method build. Building a tar cmd command. To build command invoke method build.
""" """
@ -102,7 +102,7 @@ class TarCommandBuilder:
return tar_command return tar_command
class TarCommandRestoreBuilder: class TarCommandRestoreBuilder(object):
WINDOWS_TEMPLATE = '{0} -x {1} --incremental --unlink-first ' \ WINDOWS_TEMPLATE = '{0} -x {1} --incremental --unlink-first ' \
'--ignore-zeros' '--ignore-zeros'
DRY_RUN_TEMPLATE = '{0} {1} --incremental --list ' \ DRY_RUN_TEMPLATE = '{0} {1} --incremental --list ' \

View File

@ -104,7 +104,8 @@ class TarBackupEngine(engine.BackupEngine):
try: try:
metadata = backup.metadata() metadata = backup.metadata()
if not self.encrypt_pass_file and metadata.get("encryption", False): if (not self.encrypt_pass_file and
metadata.get("encryption", False)):
raise Exception("Cannot restore encrypted backup without key") raise Exception("Cannot restore encrypted backup without key")
tar_command = tar_builders.TarCommandRestoreBuilder( tar_command = tar_builders.TarCommandRestoreBuilder(
@ -127,9 +128,9 @@ class TarBackupEngine(engine.BackupEngine):
tar_process = subprocess.Popen( tar_process = subprocess.Popen(
command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, command, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True) stderr=subprocess.PIPE, shell=True)
# Start loop reading the pipe and pass the data to the tar std input. # Start loop reading the pipe and pass the data to the tar
# If EOFError exception is raised, the loop end the std err will be # std input. If EOFError exception is raised, the loop end
# checked for errors. # the std err will be checked for errors.
try: try:
while True: while True:
tar_process.stdin.write(read_pipe.recv_bytes()) tar_process.stdin.write(read_pipe.recv_bytes())

View File

@ -58,8 +58,10 @@ def freezer_main(backup_args):
work_dir = backup_args.work_dir work_dir = backup_args.work_dir
max_segment_size = backup_args.max_segment_size max_segment_size = backup_args.max_segment_size
if backup_args.storage == 'swift' or ( if (backup_args.storage ==
backup_args.backup_media in ['nova', 'cinder', 'cindernative']): 'swift' or
backup_args.backup_media in ['nova', 'cinder', 'cindernative']):
backup_args.client_manager = get_client_manager(backup_args.__dict__) backup_args.client_manager = get_client_manager(backup_args.__dict__)
if backup_args.storages: if backup_args.storages:

View File

@ -117,7 +117,7 @@ class BackupOs(object):
'status': 'available', 'status': 'available',
} }
backups = cinder.backups.list(search_opts=search_opts) backups = cinder.backups.list(search_opts=search_opts)
if len(backups) > 0 : if len(backups) > 0:
incremental = True incremental = True
else: else:
incremental = False incremental = False

View File

@ -281,8 +281,8 @@ class OpenstackOpts(object):
""" """
Gathering and maintaining the right Openstack credentials that will be used Gathering and maintaining the right Openstack credentials that will be used
to authenticate against keystone. Now we support keystone v2 and v3. to authenticate against keystone. Now we support keystone v2 and v3.
We need to provide a correct url that ends with either v2.0 or v3 or provide We need to provide a correct url that ends with either v2.0 or v3
auth_version or identity_api_version or provide auth_version or identity_api_version
""" """
def __init__(self, auth_url, auth_method='password', auth_version=None, def __init__(self, auth_url, auth_method='password', auth_version=None,
username=None, password=None, region_name=None, cacert=None, username=None, password=None, region_name=None, cacert=None,
@ -307,7 +307,8 @@ class OpenstackOpts(object):
:param identity_api_version: string Keystone API version to use :param identity_api_version: string Keystone API version to use
:param project_id: UUID string Project ID :param project_id: UUID string Project ID
:param project_name: string Project Name :param project_name: string Project Name
:param tenant_id: string Project/ Tenant ID. Use with keystone v2.0 only :param tenant_id: string Project/ Tenant ID.
Use with keystone v2.0 only
:param tenant_name: string Project/ Tenant Name. keystone v2.0 only :param tenant_name: string Project/ Tenant Name. keystone v2.0 only
:param token: string Valid token. Only if auth_method is token :param token: string Valid token. Only if auth_method is token
:param insecure: boolean Use insecure connections :param insecure: boolean Use insecure connections
@ -321,7 +322,8 @@ class OpenstackOpts(object):
:param user_domain_id: string User Domain ID. only with keystone v3 :param user_domain_id: string User Domain ID. only with keystone v3
:param project_domain_id: string Project Domain ID. keystone v3 only :param project_domain_id: string Project Domain ID. keystone v3 only
:param domain_name: string Domain Name. only with keystone v3 :param domain_name: string Domain Name. only with keystone v3
:param project_domain_name: string Project Domain Name. keystone v3 only :param project_domain_name: string Project Domain Name.
keystone v3 only
:return: None :return: None
""" """
self.auth_url = auth_url self.auth_url = auth_url
@ -360,13 +362,13 @@ class OpenstackOpts(object):
self.auth_version = self.identity_api_version = str('2.0') self.auth_version = self.identity_api_version = str('2.0')
else: else:
raise Exception('Keystone Auth version {0} is not supported!. ' raise Exception('Keystone Auth version {0} is not supported!. '
'Generated from auth_url: {1}'.format(version, 'Generated from auth_url: {1}'
auth_url)) .format(version, auth_url))
logging.info('Authenticating with Keystone version: {0}, auth_url: {1},'
' username: {2}, project: {3}'.format(self.auth_version, logging.info('Authenticating with Keystone version: '
self.auth_url, '{0}, auth_url: {1}, username: {2}, project: {3}'.
self.username, format(self.auth_version, self.auth_url,
self.project_name)) self.username, self.project_name))
def get_opts_dicts(self): def get_opts_dicts(self):
""" """
@ -392,8 +394,8 @@ class OpenstackOpts(object):
opts.pop('tenant_id', None) opts.pop('tenant_id', None)
opts.pop('tenant_name', None) opts.pop('tenant_name', None)
elif self.auth_version in ['2.0', '2'] or self.identity_api_version in \ elif (self.auth_version in ['2.0', '2'] or
['2.0', '2']: self.identity_api_version in ['2.0', '2']):
opts['auth_version'] = opts['identity_api_version'] = '2.0' opts['auth_version'] = opts['identity_api_version'] = '2.0'
# these parameters won't work with keystone v2.0 # these parameters won't work with keystone v2.0
opts.pop('project_id', None) opts.pop('project_id', None)
@ -455,7 +457,7 @@ class OpenstackOpts(object):
compute_api_version=src_dict.get('OS_COMPUTE_API_VERSION', 2), compute_api_version=src_dict.get('OS_COMPUTE_API_VERSION', 2),
volume_api_version=src_dict.get('OS_VOLUME_API_VERSION', 2), volume_api_version=src_dict.get('OS_VOLUME_API_VERSION', 2),
image_api_version=src_dict.get('OS_IMAGE_API_VERSION', 2) image_api_version=src_dict.get('OS_IMAGE_API_VERSION', 2)
) )
class DryRunSwiftclientConnectionWrapper(object): class DryRunSwiftclientConnectionWrapper(object):

View File

@ -25,7 +25,7 @@ CONF = cfg.CONF
logging = log.getLogger(__name__) logging = log.getLogger(__name__)
class RestoreOs: class RestoreOs(object):
def __init__(self, client_manager, container): def __init__(self, client_manager, container):
self.client_manager = client_manager self.client_manager = client_manager
self.container = container self.container = container
@ -40,8 +40,8 @@ class RestoreOs:
""" """
swift = self.client_manager.get_swift() swift = self.client_manager.get_swift()
info, backups = swift.get_container(self.container, path=path) info, backups = swift.get_container(self.container, path=path)
backups = sorted(map(lambda x: int(x["name"].rsplit("/", 1)[-1]), backups = sorted(
backups)) map(lambda x: int(x["name"].rsplit("/", 1)[-1]), backups))
backups = list(filter(lambda x: x >= restore_from_timestamp, backups)) backups = list(filter(lambda x: x >= restore_from_timestamp, backups))
if not backups: if not backups:
@ -60,14 +60,13 @@ class RestoreOs:
swift = self.client_manager.get_swift() swift = self.client_manager.get_swift()
glance = self.client_manager.get_glance() glance = self.client_manager.get_glance()
backup = self._get_backups(path, restore_from_timestamp) backup = self._get_backups(path, restore_from_timestamp)
stream = swift.get_object( stream = swift.get_object(self.container, "%s/%s" % (path, backup),
self.container, "%s/%s" % (path, backup), resp_chunk_size=10000000) resp_chunk_size=10000000)
length = int(stream[0]["x-object-meta-length"]) length = int(stream[0]["x-object-meta-length"])
logging.info("[*] Creation glance image") logging.info("[*] Creation glance image")
image = glance.images.create( image = glance.images.create(
data=utils.ReSizeStream(stream[1], length, 1), data=utils.ReSizeStream(stream[1], length, 1),
container_format="bare", container_format="bare", disk_format="raw")
disk_format="raw")
return stream[0], image return stream[0], image
def restore_cinder(self, volume_id, restore_from_timestamp): def restore_cinder(self, volume_id, restore_from_timestamp):
@ -78,13 +77,11 @@ class RestoreOs:
:return: :return:
""" """
cinder = self.client_manager.get_cinder() cinder = self.client_manager.get_cinder()
search_opts = { search_opts = {'volume_id': volume_id, 'status': 'available', }
'volume_id': volume_id,
'status': 'available',
}
backups = cinder.backups.list(search_opts=search_opts) backups = cinder.backups.list(search_opts=search_opts)
backups_filter = [x for x in backups if (utils.date_to_timestamp(x.created_at.split('.')[0]) backups_filter = ([x for x in backups if utils.date_to_timestamp(
>= restore_from_timestamp)] x.created_at.split('.')[0]) >= restore_from_timestamp])
if not backups_filter: if not backups_filter:
logging.warning("no available backups for cinder volume," logging.warning("no available backups for cinder volume,"
"restore newest backup") "restore newest backup")

View File

@ -43,7 +43,7 @@ def get_common_opts():
_LOG.error('OS error: {0}'.format(err)) _LOG.error('OS error: {0}'.format(err))
except IOError: except IOError:
_LOG.error('Cannot create the directory {0}' _LOG.error('Cannot create the directory {0}'
.format(scheduler_conf_d)) .format(scheduler_conf_d))
_COMMON = [ _COMMON = [
cfg.StrOpt('job', cfg.StrOpt('job',
@ -84,8 +84,8 @@ def get_common_opts():
dest='jobs_dir', dest='jobs_dir',
short='f', short='f',
help='Used to store/retrieve files on local storage, ' help='Used to store/retrieve files on local storage, '
'including those exchanged with the api service.Default' 'including those exchanged with the api service. '
' value is {0} (Env: FREEZER_SCHEDULER_CONF_D)' 'Default value is {0} (Env: FREEZER_SCHEDULER_CONF_D)'
.format(scheduler_conf_d)), .format(scheduler_conf_d)),
cfg.IntOpt('interval', cfg.IntOpt('interval',
default=60, default=60,
@ -150,9 +150,9 @@ def setup_logging():
'freezer=INFO'] 'freezer=INFO']
_DEFAULT_LOGGING_CONTEXT_FORMAT = ('%(asctime)s.%(msecs)03d %(process)d ' _DEFAULT_LOGGING_CONTEXT_FORMAT = ('%(asctime)s.%(msecs)03d %(process)d '
'%(levelname)s %(name)s [%(request_id)s ' '%(levelname)s %(name)s '
'%(user_identity)s] %(instance)s' '[%(request_id)s %(user_identity)s] '
'%(message)s') '%(instance)s%(message)s')
log.set_defaults(_DEFAULT_LOGGING_CONTEXT_FORMAT, _DEFAULT_LOG_LEVELS) log.set_defaults(_DEFAULT_LOGGING_CONTEXT_FORMAT, _DEFAULT_LOG_LEVELS)
log.setup(CONF, 'freezer-scheduler', version=FREEZER_VERSION) log.setup(CONF, 'freezer-scheduler', version=FREEZER_VERSION)

View File

@ -66,7 +66,7 @@ def setup_logging(log_file):
raise Exception("Unable to write to log file") raise Exception("Unable to write to log file")
class NoDaemon: class NoDaemon(object):
""" """
A class which shares the same interface as the Daemon class, A class which shares the same interface as the Daemon class,
but is used to execute the scheduler as a foreground process but is used to execute the scheduler as a foreground process
@ -91,7 +91,7 @@ class NoDaemon:
signal.SIGTERM: NoDaemon.handle_program_exit, signal.SIGTERM: NoDaemon.handle_program_exit,
signal.SIGINT: NoDaemon.handle_program_exit, signal.SIGINT: NoDaemon.handle_program_exit,
signal.SIGHUP: NoDaemon.handle_reload, signal.SIGHUP: NoDaemon.handle_reload,
} }
@staticmethod @staticmethod
def handle_program_exit(signum, frame): def handle_program_exit(signum, frame):
@ -125,7 +125,7 @@ class NoDaemon:
pass pass
class Daemon: class Daemon(object):
""" """
A class to manage all the daemon-related stuff A class to manage all the daemon-related stuff
@ -154,7 +154,7 @@ class Daemon:
return { return {
signal.SIGTERM: Daemon.handle_program_exit, signal.SIGTERM: Daemon.handle_program_exit,
signal.SIGHUP: Daemon.handle_reload, signal.SIGHUP: Daemon.handle_reload,
} }
@property @property
def pid_fname(self): def pid_fname(self):

View File

@ -52,7 +52,8 @@ class FreezerScheduler(object):
# Needed in the case of a non-activated virtualenv # Needed in the case of a non-activated virtualenv
self.freezerc_executable = spawn.find_executable( self.freezerc_executable = spawn.find_executable(
'freezer-agent', path=':'.join(sys.path)) 'freezer-agent', path=':'.join(sys.path))
LOG.debug('Freezer-agent found at {0}'.format(self.freezerc_executable)) LOG.debug('Freezer-agent found at {0}'
.format(self.freezerc_executable))
self.job_path = job_path self.job_path = job_path
self._client = None self._client = None
self.lock = threading.Lock() self.lock = threading.Lock()
@ -77,7 +78,7 @@ class FreezerScheduler(object):
utils.save_jobs_to_disk(job_doc_list, self.job_path) utils.save_jobs_to_disk(job_doc_list, self.job_path)
except Exception as e: except Exception as e:
LOG.error('Unable to save jobs to {0}. ' LOG.error('Unable to save jobs to {0}. '
'{1}'.format(self.job_path, e)) '{1}'.format(self.job_path, e))
return job_doc_list return job_doc_list
else: else:
return utils.get_jobs_from_disk(self.job_path) return utils.get_jobs_from_disk(self.job_path)

View File

@ -110,7 +110,7 @@ def lvm_snap(backup_opt_dict):
uuid.uuid4().hex) uuid.uuid4().hex)
backup_opt_dict.path_to_backup = os.path.join(backup_opt_dict.lvm_dirmount, backup_opt_dict.path_to_backup = os.path.join(backup_opt_dict.lvm_dirmount,
lvm_info['snap_path']) lvm_info['snap_path'])
if not validate_lvm_params(backup_opt_dict): if not validate_lvm_params(backup_opt_dict):
logging.info('[*] No LVM requested/configured') logging.info('[*] No LVM requested/configured')
@ -297,7 +297,8 @@ def validate_lvm_params(backup_opt_dict):
def _umount(path): def _umount(path):
# TODO: check if cwd==path and change working directory to unmount ? # TODO(ANONYMOUS): check if cwd==path
# and change working directory to unmount ?
umount_proc = subprocess.Popen('{0} -l -f {1}'.format( umount_proc = subprocess.Popen('{0} -l -f {1}'.format(
utils.find_executable('umount'), path), utils.find_executable('umount'), path),
stdin=subprocess.PIPE, stdin=subprocess.PIPE,

View File

@ -115,5 +115,5 @@ def vss_delete_symlink(windows_volume):
os.rmdir(path) os.rmdir(path)
except Exception: except Exception:
logging.error('Failed to delete shadow copy symlink {0}'. logging.error('Failed to delete shadow copy symlink {0}'.
format(os.path.join(windows_volume, format(os.path.join(windows_volume,
'freezer_shadowcopy'))) 'freezer_shadowcopy')))

View File

@ -215,7 +215,7 @@ class Storage(object):
return latest_update return latest_update
class Backup: class Backup(object):
""" """
Internal freezer representation of backup. Internal freezer representation of backup.
Includes: Includes:
@ -386,7 +386,7 @@ class Backup:
len(self.increments) == len(other.increments) len(self.increments) == len(other.increments)
class BackupRepr: class BackupRepr(object):
""" """
Intermediate for parsing purposes - it parsed backup name. Intermediate for parsing purposes - it parsed backup name.
Difference between Backup and BackupRepr - backupRepr can be parsed from Difference between Backup and BackupRepr - backupRepr can be parsed from

View File

@ -12,7 +12,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
class StorageException(Exception): class StorageException(Exception):
def __init__(self, message): def __init__(self, message):
super(StorageException, self).__init__(message) super(StorageException, self).__init__(message)

View File

@ -41,13 +41,17 @@ class MultipleStorage(base.Storage):
def write_backup(self, rich_queue, backup): def write_backup(self, rich_queue, backup):
output_queues = [streaming.RichQueue() for x in self.storages] output_queues = [streaming.RichQueue() for x in self.storages]
except_queues = [queue.Queue() for x in self.storages] except_queues = [queue.Queue() for x in self.storages]
threads = [streaming.QueuedThread( threads = ([streaming.QueuedThread(storage.write_backup, output_queue,
storage.write_backup, output_queue, except_queue, kwargs={"backup": backup}) for except_queue, kwargs={"backup": backup}) for
storage, output_queue, except_queue in zip(self.storages, output_queues, except_queues)] storage, output_queue, except_queue in
zip(self.storages, output_queues, except_queues)])
for thread in threads: for thread in threads:
thread.daemon = True thread.daemon = True
thread.start() thread.start()
StorageManager(rich_queue, output_queues).transmit() StorageManager(rich_queue, output_queues).transmit()
for thread in threads: for thread in threads:
thread.join() thread.join()
@ -62,8 +66,8 @@ class MultipleStorage(base.Storage):
got_exception = None got_exception = None
for except_queue in except_queues: for except_queue in except_queues:
got_exception = (handle_exception_queue(except_queue) got_exception = (handle_exception_queue(except_queue) or
or got_exception) got_exception)
if (got_exception): if (got_exception):
raise StorageException("Storage error. Failed to write backup.") raise StorageException("Storage error. Failed to write backup.")
@ -90,23 +94,23 @@ class MultipleStorage(base.Storage):
self.storages = storages self.storages = storages
def download_freezer_meta_data(self, backup): def download_freezer_meta_data(self, backup):
# TODO. Need to implement. # TODO(DEKLAN): Need to implement.
pass pass
def get_file(self, from_path, to_path): def get_file(self, from_path, to_path):
# TODO. Need to implement. # TODO(DEKLAN): Need to implement.
pass pass
def meta_file_abs_path(self, backup): def meta_file_abs_path(self, backup):
# TODO. Need to implement. # TODO(DEKLAN): Need to implement.
pass pass
def upload_freezer_meta_data(self, backup, meta_dict): def upload_freezer_meta_data(self, backup, meta_dict):
# TODO. Need to implement. # TODO(DEKLAN): Need to implement.
pass pass
class StorageManager: class StorageManager(object):
def __init__(self, input_queue, output_queues): def __init__(self, input_queue, output_queues):
""" """

View File

@ -48,6 +48,7 @@ class BaseFreezerCliTest(base.BaseFreezerTest):
uri = cls.get_auth_url(), uri = cls.get_auth_url(),
cli_dir = '/usr/local/bin' # devstack default cli_dir = '/usr/local/bin' # devstack default
) )
cls.cli.cli_dir = ''
def delete_job(self, job_id): def delete_job(self, job_id):
self.cli.freezer_scheduler(action='job-delete', flags='-c test_node -j {}'.format(job_id)) self.cli.freezer_scheduler(action='job-delete', flags='-c test_node -j {}'.format(job_id))

View File

@ -79,7 +79,8 @@ class CheckSum(object):
Walk the files in path computing the checksum for each one and updates Walk the files in path computing the checksum for each one and updates
the concatenation checksum for the final result the concatenation checksum for the final result
""" """
self.count = utils.walk_path(self.path, self.exclude, self.ignorelinks, self.get_hash) self.count = utils.walk_path(self.path, self.exclude,
self.ignorelinks, self.get_hash)
return self._increment_hash return self._increment_hash
@ -92,7 +93,8 @@ class CheckSum(object):
:type filename: string :type filename: string
:return: string containing the hash of the given file :return: string containing the hash of the given file
""" """
if os.path.isfile(filepath) and not (os.path.islink(filepath) and self.ignorelinks): if (os.path.isfile(filepath) and not (
os.path.islink(filepath) and self.ignorelinks)):
file_hash = self.hashfile(open(filepath, 'rb')) file_hash = self.hashfile(open(filepath, 'rb'))
else: else:
file_hash = self.hashstring(filepath) file_hash = self.hashstring(filepath)

View File

@ -23,7 +23,7 @@ from six.moves import cStringIO
from freezer.utils import utils from freezer.utils import utils
class Config: class Config(object):
@staticmethod @staticmethod
def parse(config_path): def parse(config_path):
@ -68,6 +68,7 @@ EXPORT = re.compile(r"^\s*export\s+([^=^#^\s]+)\s*=\s*([^#^\n]*)\s*$",
INI = re.compile(r"^\s*([^=#\s]+)\s*=[\t]*([^#\n]*)\s*$", re.MULTILINE) INI = re.compile(r"^\s*([^=#\s]+)\s*=[\t]*([^#\n]*)\s*$", re.MULTILINE)
def osrc_parse(lines): def osrc_parse(lines):
""" """
:param lines: :param lines:
@ -76,6 +77,7 @@ def osrc_parse(lines):
""" """
return find_all(EXPORT, lines) return find_all(EXPORT, lines)
def ini_parse(lines): def ini_parse(lines):
""" """
:param lines: :param lines:
@ -89,7 +91,7 @@ def ini_parse(lines):
return dict(parser.items('default')) return dict(parser.items('default'))
except Exception as e: except Exception as e:
try: try:
# TODO: Remove the parsing of ini-like file via regex # TODO(ANONYMOUS): Remove the parsing of ini-like file via regex
conf = find_all(INI, lines) conf = find_all(INI, lines)
logging.warning("Using non-INI files for database configuration " logging.warning("Using non-INI files for database configuration "
"file is deprecated. Falling back to Regex.") "file is deprecated. Falling back to Regex.")
@ -99,6 +101,7 @@ def ini_parse(lines):
logging.warning("Couldn't parse non-INI config file using Regex") logging.warning("Couldn't parse non-INI config file using Regex")
raise raise
def find_all(regex, lines): def find_all(regex, lines):
return dict([(k.strip(), utils.dequote(v.strip())) for k, v in return dict([(k.strip(), utils.dequote(v.strip())) for k, v in
regex.findall(lines)]) regex.findall(lines)])

View File

@ -23,7 +23,7 @@ class Wait(Exception):
pass pass
class RichQueue: class RichQueue(object):
""" """
:type data_queue: Queue.Queue :type data_queue: Queue.Queue
""" """
@ -84,7 +84,8 @@ class RichQueue:
class QueuedThread(threading.Thread): class QueuedThread(threading.Thread):
def __init__(self, target, rich_queue, exception_queue, args=(), kwargs=None): def __init__(self, target, rich_queue, exception_queue,
args=(), kwargs=None):
""" """
:type args: collections.Iterable :type args: collections.Iterable
:type kwargs: dict :type kwargs: dict

View File

@ -202,7 +202,7 @@ def date_to_timestamp(date):
return int(time.mktime(opt_backup_date.timetuple())) return int(time.mktime(opt_backup_date.timetuple()))
class Bunch: class Bunch(object):
def __init__(self, **kwds): def __init__(self, **kwds):
self.__dict__.update(kwds) self.__dict__.update(kwds)
@ -211,7 +211,7 @@ class Bunch:
return self.__dict__.get(item) return self.__dict__.get(item)
class ReSizeStream: class ReSizeStream(object):
""" """
Iterator/File-like object for changing size of chunk in stream Iterator/File-like object for changing size of chunk in stream
""" """
@ -294,8 +294,8 @@ def tar_path():
path_to_binaries = os.path.dirname(os.path.abspath(__file__)) path_to_binaries = os.path.dirname(os.path.abspath(__file__))
return '{0}\\bin\\tar.exe'.format(path_to_binaries) return '{0}\\bin\\tar.exe'.format(path_to_binaries)
tar = (get_executable_path('gnutar') or get_executable_path('gtar') tar = (get_executable_path('gnutar') or get_executable_path('gtar') or
or get_executable_path('tar')) get_executable_path('tar'))
if not tar: if not tar:
raise Exception('Please install gnu tar (gtar) as it is a ' raise Exception('Please install gnu tar (gtar) as it is a '
'mandatory requirement to use freezer.') 'mandatory requirement to use freezer.')

View File

@ -26,7 +26,7 @@ def is_windows():
return True if sys.platform == 'win32' else False return True if sys.platform == 'win32' else False
class DisableFileSystemRedirection: class DisableFileSystemRedirection(object):
""" """
When a 32 bit program runs on a 64 bit operating system the paths When a 32 bit program runs on a 64 bit operating system the paths
to C:/Windows/System32 automatically get redirected to the 32 bit to C:/Windows/System32 automatically get redirected to the 32 bit
@ -36,8 +36,10 @@ class DisableFileSystemRedirection:
def __init__(self): def __init__(self):
if is_windows(): if is_windows():
self._disable = ctypes.windll.kernel32.Wow64DisableWow64FsRedirection self._disable = (ctypes.windll.kernel32.
self._revert = ctypes.windll.kernel32.Wow64RevertWow64FsRedirection Wow64DisableWow64FsRedirection)
self._revert = (ctypes.windll.kernel32.
Wow64RevertWow64FsRedirection)
else: else:
raise Exception("Useless if not windows") raise Exception("Useless if not windows")

View File

@ -68,13 +68,6 @@ commands = flake8 freezer
commands = pylint --rcfile .pylintrc freezer commands = pylint --rcfile .pylintrc freezer
[flake8] [flake8]
# it's not a bug that we aren't using all of hacking ignore = H405,H404,H403,H401
# H102 -> apache2 license exists
# H103 -> license is apache
# H201 -> no bare excepts
# H501 -> don't use locals() for str formatting
# H903 -> \n not \r\n
ignore = H
select = H102, H103, H201, H501, H903, H201, H306, H301, H233
show-source = True show-source = True
exclude = .venv,.tox,dist,doc,test,*egg,tests exclude = .venv,.tox,dist,doc,test,*egg,tests