Fix tox.ini file

Remove select line in tox.ini.
Currently checks are not being run.
Run pep8 checks.
Fix code to pass pep8 checks.

Change-Id: Iacef4386b1214f95b1bd106c17a5f714f948a1a1
Closes-Bug: #1593452
This commit is contained in:
Deklan Dieterly 2016-06-16 16:09:48 -06:00
parent 3cdfd8fba3
commit 694c98e063
26 changed files with 169 additions and 153 deletions

View File

@ -112,9 +112,10 @@ def build_os_options():
dest='os_region_name'),
cfg.StrOpt('os-token',
default=env('OS_TOKEN'),
help='Specify an existing token to use instead of retrieving'
' one via authentication (e.g. with username & '
'password). Defaults to env[OS_TOKEN].',
help='Specify an existing token to use instead of '
'retrieving one via authentication '
'(e.g. with username & password). Defaults '
'to env[OS_TOKEN].',
dest='os_token'),
cfg.StrOpt('os-identity-api-version',
default=env('OS_IDENTITY_API_VERSION'),
@ -248,7 +249,6 @@ class Client(object):
self.actions = actions.ActionManager(self, verify=self.verify)
self.sessions = sessions.SessionManager(self, verify=self.verify)
@cached_property
def session(self):
if self._session:

View File

@ -142,26 +142,28 @@ _COMMON = [
" -. If the permission is set to rw it will be mutable"),
cfg.StrOpt('lvm-snapsize',
dest='lvm_snapsize',
help="Set the lvm snapshot size when creating a new snapshot. "
"Please add G for Gigabytes or M for Megabytes, i.e. 500M "
"or 8G. It is also possible to use percentages as with the"
" -l option of lvm, i.e. 80%%FREE Default {0}.".format(
DEFAULT_LVM_SNAPSIZE)),
help="Set the lvm snapshot size when creating a new "
"snapshot. Please add G for Gigabytes or "
"M for Megabytes, i.e. 500M or 8G. It is also possible "
"to use percentages as with the -l option of lvm, i.e. "
"80%%FREE Default {0}.".format(DEFAULT_LVM_SNAPSIZE)),
cfg.StrOpt('lvm-dirmount',
dest='lvm_dirmount',
help="Set the directory you want to mount the lvm snapshot to. "
"If not provided, a unique name will be generated with the"
"basename {0} ".format(DEFAULT_LVM_MOUNT_BASENAME)),
"If not provided, a unique name will be generated with "
"thebasename {0} ".format(DEFAULT_LVM_MOUNT_BASENAME)),
cfg.StrOpt('lvm-volgroup',
dest='lvm_volgroup',
help="Specify the volume group of your logical volume. This is "
"important to mount your snapshot volume. Default not set"),
"important to mount your snapshot volume. Default not "
"set"),
cfg.IntOpt('max-level',
dest='max_level',
help="Set the backup level used with tar to implement "
"incremental backup. If a level 1 is specified but no level"
" 0 is already available, a level 0 will be done and "
"subsequently backs to level 1. Default 0 (No Incremental)"
"incremental backup. If a level 1 is specified but "
"no level 0 is already available, a level 0 will be "
"done and subsequently backs to level 1. "
"Default 0 (No Incremental)"
),
cfg.IntOpt('always-level',
dest='always_level',
@ -173,20 +175,21 @@ _COMMON = [
" --max-backup-level. Default False (Disabled)"),
cfg.FloatOpt('restart-always-level',
dest='restart_always_level',
help="Restart the backup from level 0 after n days. Valid only"
" if --always-level option if set. If --always-level is "
"used together with --remove-older-then, there might be "
help="Restart the backup from level 0 after n days. Valid "
"only if --always-level option if set. If "
"--always-level is used together with "
"--remove-older-then, there might be "
"the chance where the initial level 0 will be removed. "
"Default False (Disabled)"),
cfg.FloatOpt('remove-older-than',
short='R',
dest='remove_older_than',
help="Checks in the specified container for object older than "
"the specified days. If i.e. 30 is specified, it will "
"remove the remote object older than 30 days. Default "
"False (Disabled) The option --remove-older-then is "
"deprecated and will be removed soon",
deprecated_for_removal=True),
help="Checks in the specified container for object older "
"than the specified days. If i.e. 30 is specified, it "
"will remove the remote object older than 30 days. "
"Default False (Disabled) The option "
"--remove-older-then is deprecated and will be removed "
"soon", deprecated_for_removal=True),
cfg.StrOpt('remove-from-date',
dest='remove_from_date',
help="Checks the specified container and removes objects older "
@ -196,9 +199,9 @@ _COMMON = [
cfg.StrOpt('no-incremental',
dest='no_incremental',
help="Disable incremental feature. By default freezer build the"
" meta data even for level 0 backup. By setting this option"
" incremental meta data is not created at all. Default "
"disabled"),
" meta data even for level 0 backup. By setting this "
"option incremental meta data is not created at all. "
"Default disabled"),
cfg.StrOpt('hostname',
dest='hostname',
deprecated_name='restore-from-host',
@ -219,8 +222,9 @@ _COMMON = [
"port = <db-port>"),
cfg.StrOpt('metadata-out',
dest='metadata_out',
help="Set the filename to which write the metadata regarding the"
" backup metrics. Use '-' to output to standard output."),
help="Set the filename to which write the metadata "
"regarding the backup metrics. Use '-' to output to "
"standard output."),
cfg.StrOpt('exclude',
dest='exclude',
help="Exclude files,given as a PATTERN.Ex: --exclude '*.log' "
@ -235,9 +239,9 @@ _COMMON = [
),
cfg.StrOpt('encrypt-pass-file',
dest='encrypt_pass_file',
help="Passing a private key to this option, allow you to encrypt"
" the files before to be uploaded in Swift. Default do "
"not encrypt."
help="Passing a private key to this option, allow you "
"to encrypt the files before to be uploaded in Swift. "
"Default do not encrypt."
),
cfg.IntOpt('max-segment-size',
short='M',
@ -252,9 +256,9 @@ _COMMON = [
),
cfg.StrOpt('restore-from-date',
dest='restore_from_date',
help="Set the date of the backup from which you want to restore."
"This will select the most recent backup previous to "
"the specified date (included). Example: "
help="Set the date of the backup from which you want to "
"restore.This will select the most recent backup "
"previous to the specified date (included). Example: "
"if the last backup was created at '2016-03-22T14:29:01' "
"and restore-from-date is set to '2016-03-22T14:29:01', "
"the backup will be restored successfully. The same for "
@ -269,9 +273,9 @@ _COMMON = [
cfg.StrOpt('max-priority',
dest='max_priority',
help="Set the cpu process to the highest priority (i.e. -20 on "
"Linux) and real-time for I/O. The process priority will be"
" set only if nice and ionice are installed Default "
"disabled. Use with caution."
"Linux) and real-time for I/O. The process priority "
"will be set only if nice and ionice are installed "
"Default disabled. Use with caution."
),
cfg.BoolOpt('quiet',
short='q',
@ -303,8 +307,8 @@ _COMMON = [
"Can be invoked with dimensions (10K, 120M, 10G)."),
cfg.IntOpt('download-limit',
dest='download_limit',
help="Download bandwidth limit in Bytes per sec. Can be invoked "
" with dimensions (10K, 120M, 10G)."),
help="Download bandwidth limit in Bytes per sec. Can be "
"invoked with dimensions (10K, 120M, 10G)."),
cfg.StrOpt('cinder-vol-id',
dest='cinder_vol_id',
help="Id of cinder volume for backup"
@ -367,8 +371,8 @@ _COMMON = [
cfg.BoolOpt('consistency_check',
dest='consistency_check',
help="When true will checksum the files before backup. "
"The commuted backup checksum is stored as bakcup metadata"
" and can be retrieved through the freezer-api. "
"The commuted backup checksum is stored as backup "
"metadata and can be retrieved through the freezer-api. "
"On restore it is possible to check for consistency. "
"Please note this option is currently only available "
"for file system backups. "
@ -419,9 +423,9 @@ def get_backup_args():
if CONF.get('config'):
conf = freezer_config.Config.parse(CONF.get('config'))
defaults.update(conf.default)
# TODO: restore_from_host is deprecated and to be removed
defaults['hostname'] = conf.default.get('hostname') or \
conf.default.get('restore_from_host')
# TODO(ANONYMOUS): restore_from_host is deprecated and to be removed
defaults['hostname'] = (conf.default.get('hostname') or
conf.default.get('restore_from_host'))
# override default oslo values
levels = {
'all': log.NOTSET,
@ -495,7 +499,7 @@ def get_backup_args():
backup_args.__dict__['windows_volume'] = \
backup_args.path_to_backup[:3]
# todo(enugaev) move it to new command line param backup_media
# TODO(enugaev): move it to new command line param backup_media
if backup_args.lvm_auto_snap:
raise Exception('lvm-auto-snap is deprecated. '

View File

@ -39,8 +39,8 @@ logging = log.getLogger(__name__)
class BackupEngine(object):
"""
The main part of making a backup and making a restore is the mechanism of
implementing it. For a long time Freezer had only one mechanism of doing it -
invoking gnutar and it was heavy hard-coded.
implementing it. For a long time Freezer had only one mechanism of
doing it - invoking gnutar and it was heavy hard-coded.
Currently we are going to support many different approaches.
One of them is rsync. Having many different implementations requires to
@ -122,10 +122,10 @@ class BackupEngine(object):
return False
got_exception = None
got_exception = (handle_except_queue(read_except_queue)
or got_exception)
got_exception = (handle_except_queue(write_except_queue)
or got_exception)
got_exception = (handle_except_queue(read_except_queue) or
got_exception)
got_exception = (handle_except_queue(write_except_queue) or
got_exception)
if (got_exception):
raise EngineException("Engine error. Failed to backup.")
@ -220,10 +220,10 @@ class BackupEngine(object):
return False
got_exception = None
got_exception = (handle_except_SimpleQueue(read_except_queue)
or got_exception)
got_exception = (handle_except_SimpleQueue(write_except_queue)
or got_exception)
got_exception = (handle_except_SimpleQueue(read_except_queue) or
got_exception)
got_exception = (handle_except_SimpleQueue(write_except_queue) or
got_exception)
if tar_stream.exitcode or got_exception:
raise EngineException("Engine error. Failed to restore file.")

View File

@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
class EngineException(Exception):
def __init__(self, message):

View File

@ -18,7 +18,7 @@ Freezer Tar related functions
from freezer.utils import utils
class TarCommandBuilder:
class TarCommandBuilder(object):
"""
Building a tar cmd command. To build command invoke method build.
"""
@ -102,7 +102,7 @@ class TarCommandBuilder:
return tar_command
class TarCommandRestoreBuilder:
class TarCommandRestoreBuilder(object):
WINDOWS_TEMPLATE = '{0} -x {1} --incremental --unlink-first ' \
'--ignore-zeros'
DRY_RUN_TEMPLATE = '{0} {1} --incremental --list ' \

View File

@ -104,7 +104,8 @@ class TarBackupEngine(engine.BackupEngine):
try:
metadata = backup.metadata()
if not self.encrypt_pass_file and metadata.get("encryption", False):
if (not self.encrypt_pass_file and
metadata.get("encryption", False)):
raise Exception("Cannot restore encrypted backup without key")
tar_command = tar_builders.TarCommandRestoreBuilder(
@ -127,9 +128,9 @@ class TarBackupEngine(engine.BackupEngine):
tar_process = subprocess.Popen(
command, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
# Start loop reading the pipe and pass the data to the tar std input.
# If EOFError exception is raised, the loop end the std err will be
# checked for errors.
# Start loop reading the pipe and pass the data to the tar
# std input. If EOFError exception is raised, the loop end
# the std err will be checked for errors.
try:
while True:
tar_process.stdin.write(read_pipe.recv_bytes())

View File

@ -58,8 +58,10 @@ def freezer_main(backup_args):
work_dir = backup_args.work_dir
max_segment_size = backup_args.max_segment_size
if backup_args.storage == 'swift' or (
if (backup_args.storage ==
'swift' or
backup_args.backup_media in ['nova', 'cinder', 'cindernative']):
backup_args.client_manager = get_client_manager(backup_args.__dict__)
if backup_args.storages:

View File

@ -117,7 +117,7 @@ class BackupOs(object):
'status': 'available',
}
backups = cinder.backups.list(search_opts=search_opts)
if len(backups) > 0 :
if len(backups) > 0:
incremental = True
else:
incremental = False

View File

@ -281,8 +281,8 @@ class OpenstackOpts(object):
"""
Gathering and maintaining the right Openstack credentials that will be used
to authenticate against keystone. Now we support keystone v2 and v3.
We need to provide a correct url that ends with either v2.0 or v3 or provide
auth_version or identity_api_version
We need to provide a correct url that ends with either v2.0 or v3
or provide auth_version or identity_api_version
"""
def __init__(self, auth_url, auth_method='password', auth_version=None,
username=None, password=None, region_name=None, cacert=None,
@ -307,7 +307,8 @@ class OpenstackOpts(object):
:param identity_api_version: string Keystone API version to use
:param project_id: UUID string Project ID
:param project_name: string Project Name
:param tenant_id: string Project/ Tenant ID. Use with keystone v2.0 only
:param tenant_id: string Project/ Tenant ID.
Use with keystone v2.0 only
:param tenant_name: string Project/ Tenant Name. keystone v2.0 only
:param token: string Valid token. Only if auth_method is token
:param insecure: boolean Use insecure connections
@ -321,7 +322,8 @@ class OpenstackOpts(object):
:param user_domain_id: string User Domain ID. only with keystone v3
:param project_domain_id: string Project Domain ID. keystone v3 only
:param domain_name: string Domain Name. only with keystone v3
:param project_domain_name: string Project Domain Name. keystone v3 only
:param project_domain_name: string Project Domain Name.
keystone v3 only
:return: None
"""
self.auth_url = auth_url
@ -360,13 +362,13 @@ class OpenstackOpts(object):
self.auth_version = self.identity_api_version = str('2.0')
else:
raise Exception('Keystone Auth version {0} is not supported!. '
'Generated from auth_url: {1}'.format(version,
auth_url))
logging.info('Authenticating with Keystone version: {0}, auth_url: {1},'
' username: {2}, project: {3}'.format(self.auth_version,
self.auth_url,
self.username,
self.project_name))
'Generated from auth_url: {1}'
.format(version, auth_url))
logging.info('Authenticating with Keystone version: '
'{0}, auth_url: {1}, username: {2}, project: {3}'.
format(self.auth_version, self.auth_url,
self.username, self.project_name))
def get_opts_dicts(self):
"""
@ -392,8 +394,8 @@ class OpenstackOpts(object):
opts.pop('tenant_id', None)
opts.pop('tenant_name', None)
elif self.auth_version in ['2.0', '2'] or self.identity_api_version in \
['2.0', '2']:
elif (self.auth_version in ['2.0', '2'] or
self.identity_api_version in ['2.0', '2']):
opts['auth_version'] = opts['identity_api_version'] = '2.0'
# these parameters won't work with keystone v2.0
opts.pop('project_id', None)

View File

@ -25,7 +25,7 @@ CONF = cfg.CONF
logging = log.getLogger(__name__)
class RestoreOs:
class RestoreOs(object):
def __init__(self, client_manager, container):
self.client_manager = client_manager
self.container = container
@ -40,8 +40,8 @@ class RestoreOs:
"""
swift = self.client_manager.get_swift()
info, backups = swift.get_container(self.container, path=path)
backups = sorted(map(lambda x: int(x["name"].rsplit("/", 1)[-1]),
backups))
backups = sorted(
map(lambda x: int(x["name"].rsplit("/", 1)[-1]), backups))
backups = list(filter(lambda x: x >= restore_from_timestamp, backups))
if not backups:
@ -60,14 +60,13 @@ class RestoreOs:
swift = self.client_manager.get_swift()
glance = self.client_manager.get_glance()
backup = self._get_backups(path, restore_from_timestamp)
stream = swift.get_object(
self.container, "%s/%s" % (path, backup), resp_chunk_size=10000000)
stream = swift.get_object(self.container, "%s/%s" % (path, backup),
resp_chunk_size=10000000)
length = int(stream[0]["x-object-meta-length"])
logging.info("[*] Creation glance image")
image = glance.images.create(
data=utils.ReSizeStream(stream[1], length, 1),
container_format="bare",
disk_format="raw")
container_format="bare", disk_format="raw")
return stream[0], image
def restore_cinder(self, volume_id, restore_from_timestamp):
@ -78,13 +77,11 @@ class RestoreOs:
:return:
"""
cinder = self.client_manager.get_cinder()
search_opts = {
'volume_id': volume_id,
'status': 'available',
}
search_opts = {'volume_id': volume_id, 'status': 'available', }
backups = cinder.backups.list(search_opts=search_opts)
backups_filter = [x for x in backups if (utils.date_to_timestamp(x.created_at.split('.')[0])
>= restore_from_timestamp)]
backups_filter = ([x for x in backups if utils.date_to_timestamp(
x.created_at.split('.')[0]) >= restore_from_timestamp])
if not backups_filter:
logging.warning("no available backups for cinder volume,"
"restore newest backup")

View File

@ -84,8 +84,8 @@ def get_common_opts():
dest='jobs_dir',
short='f',
help='Used to store/retrieve files on local storage, '
'including those exchanged with the api service.Default'
' value is {0} (Env: FREEZER_SCHEDULER_CONF_D)'
'including those exchanged with the api service. '
'Default value is {0} (Env: FREEZER_SCHEDULER_CONF_D)'
.format(scheduler_conf_d)),
cfg.IntOpt('interval',
default=60,
@ -150,9 +150,9 @@ def setup_logging():
'freezer=INFO']
_DEFAULT_LOGGING_CONTEXT_FORMAT = ('%(asctime)s.%(msecs)03d %(process)d '
'%(levelname)s %(name)s [%(request_id)s '
'%(user_identity)s] %(instance)s'
'%(message)s')
'%(levelname)s %(name)s '
'[%(request_id)s %(user_identity)s] '
'%(instance)s%(message)s')
log.set_defaults(_DEFAULT_LOGGING_CONTEXT_FORMAT, _DEFAULT_LOG_LEVELS)
log.setup(CONF, 'freezer-scheduler', version=FREEZER_VERSION)

View File

@ -66,7 +66,7 @@ def setup_logging(log_file):
raise Exception("Unable to write to log file")
class NoDaemon:
class NoDaemon(object):
"""
A class which shares the same interface as the Daemon class,
but is used to execute the scheduler as a foreground process
@ -125,7 +125,7 @@ class NoDaemon:
pass
class Daemon:
class Daemon(object):
"""
A class to manage all the daemon-related stuff

View File

@ -52,7 +52,8 @@ class FreezerScheduler(object):
# Needed in the case of a non-activated virtualenv
self.freezerc_executable = spawn.find_executable(
'freezer-agent', path=':'.join(sys.path))
LOG.debug('Freezer-agent found at {0}'.format(self.freezerc_executable))
LOG.debug('Freezer-agent found at {0}'
.format(self.freezerc_executable))
self.job_path = job_path
self._client = None
self.lock = threading.Lock()

View File

@ -297,7 +297,8 @@ def validate_lvm_params(backup_opt_dict):
def _umount(path):
# TODO: check if cwd==path and change working directory to unmount ?
# TODO(ANONYMOUS): check if cwd==path
# and change working directory to unmount ?
umount_proc = subprocess.Popen('{0} -l -f {1}'.format(
utils.find_executable('umount'), path),
stdin=subprocess.PIPE,

View File

@ -215,7 +215,7 @@ class Storage(object):
return latest_update
class Backup:
class Backup(object):
"""
Internal freezer representation of backup.
Includes:
@ -386,7 +386,7 @@ class Backup:
len(self.increments) == len(other.increments)
class BackupRepr:
class BackupRepr(object):
"""
Intermediate for parsing purposes - it parsed backup name.
Difference between Backup and BackupRepr - backupRepr can be parsed from

View File

@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
class StorageException(Exception):
def __init__(self, message):

View File

@ -41,13 +41,17 @@ class MultipleStorage(base.Storage):
def write_backup(self, rich_queue, backup):
output_queues = [streaming.RichQueue() for x in self.storages]
except_queues = [queue.Queue() for x in self.storages]
threads = [streaming.QueuedThread(
storage.write_backup, output_queue, except_queue, kwargs={"backup": backup}) for
storage, output_queue, except_queue in zip(self.storages, output_queues, except_queues)]
threads = ([streaming.QueuedThread(storage.write_backup, output_queue,
except_queue, kwargs={"backup": backup}) for
storage, output_queue, except_queue in
zip(self.storages, output_queues, except_queues)])
for thread in threads:
thread.daemon = True
thread.start()
StorageManager(rich_queue, output_queues).transmit()
for thread in threads:
thread.join()
@ -62,8 +66,8 @@ class MultipleStorage(base.Storage):
got_exception = None
for except_queue in except_queues:
got_exception = (handle_exception_queue(except_queue)
or got_exception)
got_exception = (handle_exception_queue(except_queue) or
got_exception)
if (got_exception):
raise StorageException("Storage error. Failed to write backup.")
@ -90,23 +94,23 @@ class MultipleStorage(base.Storage):
self.storages = storages
def download_freezer_meta_data(self, backup):
# TODO. Need to implement.
# TODO(DEKLAN): Need to implement.
pass
def get_file(self, from_path, to_path):
# TODO. Need to implement.
# TODO(DEKLAN): Need to implement.
pass
def meta_file_abs_path(self, backup):
# TODO. Need to implement.
# TODO(DEKLAN): Need to implement.
pass
def upload_freezer_meta_data(self, backup, meta_dict):
# TODO. Need to implement.
# TODO(DEKLAN): Need to implement.
pass
class StorageManager:
class StorageManager(object):
def __init__(self, input_queue, output_queues):
"""

View File

@ -48,6 +48,7 @@ class BaseFreezerCliTest(base.BaseFreezerTest):
uri = cls.get_auth_url(),
cli_dir = '/usr/local/bin' # devstack default
)
cls.cli.cli_dir = ''
def delete_job(self, job_id):
self.cli.freezer_scheduler(action='job-delete', flags='-c test_node -j {}'.format(job_id))

View File

@ -79,7 +79,8 @@ class CheckSum(object):
Walk the files in path computing the checksum for each one and updates
the concatenation checksum for the final result
"""
self.count = utils.walk_path(self.path, self.exclude, self.ignorelinks, self.get_hash)
self.count = utils.walk_path(self.path, self.exclude,
self.ignorelinks, self.get_hash)
return self._increment_hash
@ -92,7 +93,8 @@ class CheckSum(object):
:type filename: string
:return: string containing the hash of the given file
"""
if os.path.isfile(filepath) and not (os.path.islink(filepath) and self.ignorelinks):
if (os.path.isfile(filepath) and not (
os.path.islink(filepath) and self.ignorelinks)):
file_hash = self.hashfile(open(filepath, 'rb'))
else:
file_hash = self.hashstring(filepath)

View File

@ -23,7 +23,7 @@ from six.moves import cStringIO
from freezer.utils import utils
class Config:
class Config(object):
@staticmethod
def parse(config_path):
@ -68,6 +68,7 @@ EXPORT = re.compile(r"^\s*export\s+([^=^#^\s]+)\s*=\s*([^#^\n]*)\s*$",
INI = re.compile(r"^\s*([^=#\s]+)\s*=[\t]*([^#\n]*)\s*$", re.MULTILINE)
def osrc_parse(lines):
"""
:param lines:
@ -76,6 +77,7 @@ def osrc_parse(lines):
"""
return find_all(EXPORT, lines)
def ini_parse(lines):
"""
:param lines:
@ -89,7 +91,7 @@ def ini_parse(lines):
return dict(parser.items('default'))
except Exception as e:
try:
# TODO: Remove the parsing of ini-like file via regex
# TODO(ANONYMOUS): Remove the parsing of ini-like file via regex
conf = find_all(INI, lines)
logging.warning("Using non-INI files for database configuration "
"file is deprecated. Falling back to Regex.")
@ -99,6 +101,7 @@ def ini_parse(lines):
logging.warning("Couldn't parse non-INI config file using Regex")
raise
def find_all(regex, lines):
return dict([(k.strip(), utils.dequote(v.strip())) for k, v in
regex.findall(lines)])

View File

@ -23,7 +23,7 @@ class Wait(Exception):
pass
class RichQueue:
class RichQueue(object):
"""
:type data_queue: Queue.Queue
"""
@ -84,7 +84,8 @@ class RichQueue:
class QueuedThread(threading.Thread):
def __init__(self, target, rich_queue, exception_queue, args=(), kwargs=None):
def __init__(self, target, rich_queue, exception_queue,
args=(), kwargs=None):
"""
:type args: collections.Iterable
:type kwargs: dict

View File

@ -202,7 +202,7 @@ def date_to_timestamp(date):
return int(time.mktime(opt_backup_date.timetuple()))
class Bunch:
class Bunch(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
@ -211,7 +211,7 @@ class Bunch:
return self.__dict__.get(item)
class ReSizeStream:
class ReSizeStream(object):
"""
Iterator/File-like object for changing size of chunk in stream
"""
@ -294,8 +294,8 @@ def tar_path():
path_to_binaries = os.path.dirname(os.path.abspath(__file__))
return '{0}\\bin\\tar.exe'.format(path_to_binaries)
tar = (get_executable_path('gnutar') or get_executable_path('gtar')
or get_executable_path('tar'))
tar = (get_executable_path('gnutar') or get_executable_path('gtar') or
get_executable_path('tar'))
if not tar:
raise Exception('Please install gnu tar (gtar) as it is a '
'mandatory requirement to use freezer.')

View File

@ -26,7 +26,7 @@ def is_windows():
return True if sys.platform == 'win32' else False
class DisableFileSystemRedirection:
class DisableFileSystemRedirection(object):
"""
When a 32 bit program runs on a 64 bit operating system the paths
to C:/Windows/System32 automatically get redirected to the 32 bit
@ -36,8 +36,10 @@ class DisableFileSystemRedirection:
def __init__(self):
if is_windows():
self._disable = ctypes.windll.kernel32.Wow64DisableWow64FsRedirection
self._revert = ctypes.windll.kernel32.Wow64RevertWow64FsRedirection
self._disable = (ctypes.windll.kernel32.
Wow64DisableWow64FsRedirection)
self._revert = (ctypes.windll.kernel32.
Wow64RevertWow64FsRedirection)
else:
raise Exception("Useless if not windows")

View File

@ -68,13 +68,6 @@ commands = flake8 freezer
commands = pylint --rcfile .pylintrc freezer
[flake8]
# it's not a bug that we aren't using all of hacking
# H102 -> apache2 license exists
# H103 -> license is apache
# H201 -> no bare excepts
# H501 -> don't use locals() for str formatting
# H903 -> \n not \r\n
ignore = H
select = H102, H103, H201, H501, H903, H201, H306, H301, H233
ignore = H405,H404,H403,H401
show-source = True
exclude = .venv,.tox,dist,doc,test,*egg,tests