Parallel backup
Configuration changes for multiple storages. Creation MultipleStorage to encapsulate all logic for multiple storages support. Implements blueprint: parallel-backup-config Change-Id: I339ef8236d23c7d254724cab2eef92ef41c9fa1c
This commit is contained in:
parent
57e3d6e97a
commit
fd6f8a4788
50
README.rst
50
README.rst
@ -597,6 +597,56 @@ allows create a glance image from volume and upload to swift.
|
||||
To use standard cinder backups please provide --cindernative-vol-id argument.
|
||||
|
||||
|
||||
Parallel backup
|
||||
---------------
|
||||
|
||||
Parallel backup can be executed only by config file. In config file you
|
||||
should create n additional sections that start with "storage:"
|
||||
|
||||
Example [storage:my_storage1], [storage:ssh], [storage:storage3]
|
||||
|
||||
Each storage section should have 'container' argument and all parameters
|
||||
related to the storage
|
||||
|
||||
Example: ssh-username, ssh-port
|
||||
|
||||
For swift storage you should provide additional parameter called 'osrc'
|
||||
Osrc should be a path to file with Openstack Credentials like:
|
||||
|
||||
unset OS_DOMAIN_NAME
|
||||
export OS_AUTH_URL=http://url:5000/v3
|
||||
export OS_PROJECT_NAME=project_name
|
||||
export OS_USERNAME=username
|
||||
export OS_PASSWORD=secret_password
|
||||
export OS_PROJECT_DOMAIN_NAME=Default
|
||||
export OS_USER_DOMAIN_NAME=Default
|
||||
export OS_IDENTITY_API_VERSION=3
|
||||
export OS_AUTH_VERSION=3
|
||||
export OS_CACERT=/etc/ssl/certs/ca-certificates.crt
|
||||
export OS_ENDPOINT_TYPE=internalURL
|
||||
|
||||
Example of Config file for two local storages and one swift storage:
|
||||
|
||||
[default]
|
||||
action = backup
|
||||
mode = fs
|
||||
path_to_backup = /foo/
|
||||
backup_name = mytest6
|
||||
always_level = 2
|
||||
max_segment_size = 67108864
|
||||
container = /tmp/backup/
|
||||
storage = local
|
||||
[storage:first]
|
||||
storage=local
|
||||
container = /tmp/backup1/
|
||||
[storage:second]
|
||||
storage=local
|
||||
container = /tmp/backup2/
|
||||
[storage:swift]
|
||||
storage=swift
|
||||
container = test
|
||||
osrc = openrc.osrc
|
||||
|
||||
freezer-scheduler
|
||||
-----------------
|
||||
The freezer-scheduler is one of the two freezer components which is run on
|
||||
|
@ -16,7 +16,11 @@
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
from freezer import winutils
|
||||
from freezer import config
|
||||
|
||||
import argparse
|
||||
|
||||
try:
|
||||
import configparser
|
||||
except ImportError:
|
||||
@ -31,7 +35,6 @@ from distutils import spawn as distspawn
|
||||
|
||||
from oslo_utils import encodeutils
|
||||
|
||||
from freezer import winutils
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
home = expanduser("~")
|
||||
@ -106,7 +109,11 @@ def backup_arguments():
|
||||
"from command line provided take precedence."))
|
||||
|
||||
args, remaining_argv = conf_parser.parse_known_args()
|
||||
defaults = enrich_defaults(args.config)
|
||||
defaults = DEFAULT_PARAMS.copy()
|
||||
conf = None
|
||||
if args.config:
|
||||
conf = config.Config.parse(args.config)
|
||||
defaults.update(conf.default)
|
||||
|
||||
# Generate a new argparse istance and inherit options from config parse
|
||||
arg_parser = argparse.ArgumentParser(
|
||||
@ -431,16 +438,6 @@ def backup_arguments():
|
||||
print(encodeutils.safe_decode(
|
||||
'{}'.format(err_msg)), file=sys.stderr)
|
||||
|
||||
# The containers used by freezer to executed backups needs to have
|
||||
# freezer_ prefix in the name. If the user provider container doesn't
|
||||
# have the prefix, it is automatically added also to the container
|
||||
# segments name. This is done to quickly identify the containers
|
||||
# that contain freezer generated backups
|
||||
if not backup_args.container.startswith('freezer_') and \
|
||||
backup_args.storage == 'swift':
|
||||
backup_args.container = 'freezer_{0}'.format(
|
||||
backup_args.container)
|
||||
|
||||
# If hostname is not set, hostname of the current node will be used
|
||||
if not backup_args.hostname:
|
||||
backup_args.__dict__['hostname'] = socket.gethostname()
|
||||
@ -452,6 +449,9 @@ def backup_arguments():
|
||||
|
||||
# MySQLdb object
|
||||
backup_args.__dict__['mysql_db_inst'] = ''
|
||||
backup_args.__dict__['storages'] = None
|
||||
if conf and conf.storages:
|
||||
backup_args.__dict__['storages'] = conf.storages
|
||||
|
||||
# Windows volume
|
||||
backup_args.__dict__['shadow'] = ''
|
||||
@ -463,7 +463,7 @@ def backup_arguments():
|
||||
backup_args.path_to_backup[:3]
|
||||
|
||||
# Freezer version
|
||||
backup_args.__dict__['__version__'] = '1.1.3'
|
||||
backup_args.__dict__['__version__'] = '1.2.0'
|
||||
|
||||
# todo(enugaev) move it to new command line param backup_media
|
||||
backup_media = 'fs'
|
||||
|
@ -28,6 +28,7 @@ from freezer.winutils import start_sql_server
|
||||
from freezer.winutils import stop_sql_server
|
||||
from freezer.winutils import use_shadow
|
||||
from freezer.winutils import is_windows
|
||||
from freezer import config
|
||||
|
||||
home = expanduser("~")
|
||||
|
||||
@ -41,31 +42,18 @@ def backup_mode_sql_server(backup_opt_dict, storage):
|
||||
uploaded. A sql_server.conf_file is required for this operation.
|
||||
"""
|
||||
with open(backup_opt_dict.sql_server_conf, 'r') as sql_conf_file_fd:
|
||||
for line in sql_conf_file_fd:
|
||||
line = line.strip().split('#', 1)[0]
|
||||
if not line:
|
||||
continue
|
||||
|
||||
key, value = line.split('=')
|
||||
# remove white spaces
|
||||
key = key.strip()
|
||||
value = value.strip()
|
||||
|
||||
if key == 'instance':
|
||||
db_instance = utils.dequote(value)
|
||||
backup_opt_dict.sql_server_instance = db_instance
|
||||
continue
|
||||
else:
|
||||
raise Exception('Please indicate a valid SQL Server instance')
|
||||
|
||||
parsed_config = config.ini_parse(sql_conf_file_fd.read())
|
||||
sql_server_instance = parsed_config["instance"]
|
||||
# Dirty hack - please remove any modification of backup_opt_dict
|
||||
backup_opt_dict.sql_server_instance = sql_server_instance
|
||||
try:
|
||||
stop_sql_server(backup_opt_dict.sql_server_instance)
|
||||
stop_sql_server(sql_server_instance)
|
||||
backup(backup_opt_dict, storage, backup_opt_dict.engine)
|
||||
finally:
|
||||
if not backup_opt_dict.vssadmin:
|
||||
# if vssadmin is false, wait until the backup is complete
|
||||
# to start sql server again
|
||||
start_sql_server(backup_opt_dict.sql_server_instance)
|
||||
start_sql_server(sql_server_instance)
|
||||
|
||||
|
||||
def backup_mode_mysql(backup_opt_dict, storage):
|
||||
@ -86,47 +74,17 @@ def backup_mode_mysql(backup_opt_dict, storage):
|
||||
|
||||
if not backup_opt_dict.mysql_conf:
|
||||
raise ValueError('MySQL: please provide a valid config file')
|
||||
# Open the file provided in backup_args.mysql_conf and extract the
|
||||
# db host, name, user, password and port.
|
||||
db_user = db_host = db_pass = False
|
||||
# Use the default mysql port if not provided
|
||||
db_port = 3306
|
||||
with open(backup_opt_dict.mysql_conf, 'r') as mysql_file_fd:
|
||||
for line in mysql_file_fd:
|
||||
line = line.strip().split('#', 1)[0]
|
||||
if not line:
|
||||
continue
|
||||
|
||||
key, value = line.split('=')
|
||||
# remove white spaces
|
||||
key = key.strip()
|
||||
value = value.strip()
|
||||
|
||||
if key == 'host':
|
||||
db_host = utils.dequote(value)
|
||||
continue
|
||||
|
||||
if key == 'user':
|
||||
db_user = utils.dequote(value)
|
||||
continue
|
||||
|
||||
if key == 'password':
|
||||
db_pass = utils.dequote(value)
|
||||
continue
|
||||
|
||||
if key == 'port':
|
||||
db_port = utils.dequote(value)
|
||||
try:
|
||||
db_port = int(db_port)
|
||||
except ValueError:
|
||||
raise ValueError('[*] MySQL port should be integer')
|
||||
continue
|
||||
parsed_config = config.ini_parse(mysql_file_fd.read())
|
||||
|
||||
# Initialize the DB object and connect to the db according to
|
||||
# the db mysql backup file config
|
||||
try:
|
||||
backup_opt_dict.mysql_db_inst = MySQLdb.connect(
|
||||
host=db_host, port=db_port, user=db_user, passwd=db_pass)
|
||||
host=parsed_config.get("host", False),
|
||||
port=int(parsed_config.get("port", 3306)),
|
||||
user=parsed_config.get("user", False),
|
||||
passwd=parsed_config.get("password", False))
|
||||
except Exception as error:
|
||||
raise Exception('[*] MySQL: {0}'.format(error))
|
||||
|
||||
@ -298,7 +256,7 @@ def backup(backup_opt_dict, storage, engine):
|
||||
|
||||
:param backup_opt_dict:
|
||||
:param storage:
|
||||
:type storage: freezer.storage.Storage
|
||||
:type storage: freezer.storage.base.Storage
|
||||
:param engine: Backup Engine
|
||||
:type engine: freezer.engine.engine.BackupEngine
|
||||
:return:
|
||||
|
83
freezer/config.py
Normal file
83
freezer/config.py
Normal file
@ -0,0 +1,83 @@
|
||||
# (c) Copyright 2014,2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from freezer import utils
|
||||
|
||||
try:
|
||||
import configparser
|
||||
except ImportError:
|
||||
import ConfigParser as configparser
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
|
||||
class Config:
|
||||
|
||||
@staticmethod
|
||||
def parse(config_path):
|
||||
if config_path:
|
||||
if not os.path.exists(config_path):
|
||||
logging.error("[*] Critical Error: Configuration file {0} not"
|
||||
" found".format(config_path))
|
||||
raise Exception("Configuration file {0} not found !".format(
|
||||
config_path))
|
||||
config = configparser.SafeConfigParser()
|
||||
config.read([config_path])
|
||||
sections = config.sections()
|
||||
storages = []
|
||||
default_options = {}
|
||||
for section in sections:
|
||||
dict = {}
|
||||
for option in config.options(section):
|
||||
option_value = config.get(section, option)
|
||||
if option_value in ('False', 'None'):
|
||||
option_value = False
|
||||
dict[option] = option_value
|
||||
if section.startswith("storage:"):
|
||||
storages.append(dict)
|
||||
else:
|
||||
default_options.update(dict)
|
||||
return Config(default_options, storages)
|
||||
|
||||
def __init__(self, default, storages):
|
||||
"""
|
||||
:param default:
|
||||
:type default: dict
|
||||
:param storages:
|
||||
:type storages: list[dict]
|
||||
:return:
|
||||
"""
|
||||
self.default = default
|
||||
self.storages = storages
|
||||
|
||||
|
||||
EXPORT = re.compile(r"^\s*export\s+([^=^#^\s]+)\s*=\s*([^#^\n]*)\s*$",
|
||||
re.MULTILINE)
|
||||
|
||||
INI = re.compile(r"^\s*([^=#\s]+)\s*=[\t]*([^#\n]*)\s*$")
|
||||
|
||||
def osrc_parse(lines):
|
||||
"""
|
||||
:param lines:
|
||||
:type lines: str
|
||||
:return:
|
||||
"""
|
||||
return find_all(EXPORT, lines)
|
||||
|
||||
def ini_parse(lines):
|
||||
return find_all(INI, lines)
|
||||
|
||||
def find_all(regex, lines):
|
||||
return dict([(k.strip(), utils.dequote(v.strip())) for k, v in
|
||||
regex.findall(lines)])
|
@ -18,6 +18,7 @@ Freezer general utils functions
|
||||
import logging
|
||||
import multiprocessing
|
||||
import time
|
||||
from freezer.streaming import RichQueue, QueuedThread
|
||||
|
||||
from freezer import streaming
|
||||
from freezer import utils
|
||||
@ -55,20 +56,6 @@ class BackupEngine(object):
|
||||
tar it is a thread that creates gnutar subprocess and feeds chunks
|
||||
to stdin of this thread.
|
||||
"""
|
||||
@property
|
||||
def main_storage(self):
|
||||
"""
|
||||
Currently it is storage for restore, we can have multiple storages and
|
||||
do a parallel backup on them, but when we are doing a restore, we need
|
||||
to have one specified storage.
|
||||
|
||||
PS. Should be changed to select the most up-to-date storage from
|
||||
existing ones
|
||||
:rtype: freezer.storage.storage.Storage
|
||||
:return:
|
||||
"""
|
||||
raise NotImplementedError("Should have implemented this")
|
||||
|
||||
def backup_stream(self, backup_path, rich_queue, manifest_path):
|
||||
"""
|
||||
:param rich_queue:
|
||||
@ -78,17 +65,27 @@ class BackupEngine(object):
|
||||
"""
|
||||
rich_queue.put_messages(self.backup_data(backup_path, manifest_path))
|
||||
|
||||
def backup(self, backup_path, backup):
|
||||
def backup(self, backup_path, backup, queue_size=2):
|
||||
"""
|
||||
Here we now location of all interesting artifacts like metadata
|
||||
Should return stream for storing data.
|
||||
:return: stream
|
||||
"""
|
||||
manifest = self.main_storage.download_meta_file(backup)
|
||||
streaming.stream(
|
||||
self.backup_stream,
|
||||
{"backup_path": backup_path, "manifest_path": manifest},
|
||||
self.main_storage.write_backup, {"backup": backup})
|
||||
manifest = backup.storage.download_meta_file(backup)
|
||||
input_queue = RichQueue(queue_size)
|
||||
read_stream = QueuedThread(self.backup_stream,
|
||||
input_queue,
|
||||
kwargs={"backup_path": backup_path,
|
||||
"manifest_path": manifest})
|
||||
write_stream = QueuedThread(backup.storage.write_backup,
|
||||
input_queue,
|
||||
kwargs={"backup": backup})
|
||||
read_stream.daemon = True
|
||||
write_stream.daemon = True
|
||||
read_stream.start()
|
||||
write_stream.start()
|
||||
read_stream.join()
|
||||
write_stream.join()
|
||||
self.post_backup(backup, manifest)
|
||||
|
||||
def post_backup(self, backup, manifest_file):
|
||||
@ -104,7 +101,7 @@ class BackupEngine(object):
|
||||
# Chunk size is set by RESP_CHUNK_SIZE and sent to che write
|
||||
# pipe
|
||||
read_pipe.close()
|
||||
for block in self.main_storage.backup_blocks(backup):
|
||||
for block in backup.storage.backup_blocks(backup):
|
||||
write_pipe.send_bytes(block)
|
||||
|
||||
# Closing the pipe after checking no data
|
||||
|
@ -77,7 +77,6 @@ class BackupJob(Job):
|
||||
logging.error('Error while sync exec: {0}'.format(err))
|
||||
except Exception as error:
|
||||
logging.error('Error while sync exec: {0}'.format(error))
|
||||
self.storage.prepare()
|
||||
|
||||
if self.conf.mode == 'fs':
|
||||
backup.backup(self.conf, self.storage, self.engine)
|
||||
|
@ -23,11 +23,13 @@ import json
|
||||
|
||||
from freezer.arguments import backup_arguments
|
||||
from freezer.bandwidth import monkeypatch_socket_bandwidth
|
||||
from freezer import config
|
||||
from freezer import job
|
||||
from freezer.osclients import ClientManager
|
||||
from freezer.storage import swift
|
||||
from freezer.storage import local
|
||||
from freezer.storage import ssh
|
||||
from freezer.storage import multiple
|
||||
from freezer import utils
|
||||
from freezer.engine.tar import tar_engine
|
||||
from freezer import winutils
|
||||
@ -105,30 +107,17 @@ def freezer_main(backup_args):
|
||||
|
||||
validator.validate(backup_args)
|
||||
|
||||
if backup_args.storage == "swift":
|
||||
options = utils.OpenstackOptions.create_from_env()
|
||||
identity_api_version = (backup_args.os_identity_api_version or
|
||||
options.identity_api_version)
|
||||
client_manager = ClientManager(
|
||||
options=options,
|
||||
insecure=backup_args.insecure,
|
||||
swift_auth_version=identity_api_version,
|
||||
dry_run=backup_args.dry_run)
|
||||
|
||||
storage = swift.SwiftStorage(
|
||||
client_manager, backup_args.container, backup_args.work_dir,
|
||||
backup_args.max_segment_size)
|
||||
backup_args.__dict__['client_manager'] = client_manager
|
||||
elif backup_args.storage == "local":
|
||||
storage = local.LocalStorage(backup_args.container,
|
||||
backup_args.work_dir)
|
||||
elif backup_args.storage == "ssh":
|
||||
storage = ssh.SshStorage(
|
||||
backup_args.container, backup_args.work_dir, backup_args.ssh_key,
|
||||
backup_args.ssh_username, backup_args.ssh_host,
|
||||
backup_args.ssh_port)
|
||||
work_dir = backup_args.work_dir
|
||||
os_identity = backup_args.os_identity_api_version
|
||||
max_segment_size = backup_args.max_segment_size
|
||||
if backup_args.storages:
|
||||
storage = multiple.MultipleStorage(
|
||||
work_dir,
|
||||
[storage_from_dict(x, work_dir, max_segment_size, os_identity)
|
||||
for x in backup_args.storages])
|
||||
else:
|
||||
raise Exception("Not storage found for name " + backup_args.storage)
|
||||
storage = storage_from_dict(backup_args.__dict__, work_dir,
|
||||
max_segment_size, os_identity)
|
||||
|
||||
backup_args.__dict__['engine'] = tar_engine.TarBackupEngine(
|
||||
backup_args.compression,
|
||||
@ -185,6 +174,44 @@ def fail(exit_code, e, quiet, do_log=True):
|
||||
return exit_code
|
||||
|
||||
|
||||
def parse_osrc(file_name):
|
||||
with open(file_name, 'r') as osrc_file:
|
||||
return config.osrc_parse(osrc_file.read())
|
||||
|
||||
|
||||
def storage_from_dict(backup_args, work_dir, max_segment_size,
|
||||
os_identity_api_version=None):
|
||||
storage_name = backup_args['storage']
|
||||
container = backup_args['container']
|
||||
if storage_name == "swift":
|
||||
if "osrc" in backup_args:
|
||||
options = utils.OpenstackOptions.create_from_dict(
|
||||
parse_osrc(backup_args['osrc']))
|
||||
else:
|
||||
options = utils.OpenstackOptions.create_from_env()
|
||||
identity_api_version = (os_identity_api_version or
|
||||
options.identity_api_version)
|
||||
client_manager = ClientManager(
|
||||
options=options,
|
||||
insecure=backup_args.get('insecure') or False,
|
||||
swift_auth_version=identity_api_version,
|
||||
dry_run=backup_args.get('dry_run') or False)
|
||||
|
||||
storage = swift.SwiftStorage(
|
||||
client_manager, container, work_dir,max_segment_size)
|
||||
backup_args['client_manager'] = client_manager
|
||||
elif storage_name == "local":
|
||||
storage = local.LocalStorage(container, work_dir)
|
||||
elif storage_name == "ssh":
|
||||
storage = ssh.SshStorage(
|
||||
container, work_dir,
|
||||
backup_args['ssh_key'], backup_args['ssh_username'],
|
||||
backup_args['ssh_host'], int(backup_args.get('ssh_port', 22)))
|
||||
else:
|
||||
raise Exception("Not storage found for name " + backup_args['storage'])
|
||||
return storage
|
||||
|
||||
|
||||
def main():
|
||||
"""Freezerc binary main execution"""
|
||||
|
||||
|
@ -1,25 +1,23 @@
|
||||
"""
|
||||
(c) Copyright 2014,2015 Hewlett-Packard Development Company, L.P.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
"""
|
||||
|
||||
import re
|
||||
import logging
|
||||
# (c) Copyright 2014,2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from freezer import utils
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
|
||||
|
||||
class Storage(object):
|
||||
"""
|
||||
@ -27,16 +25,63 @@ class Storage(object):
|
||||
class.
|
||||
"""
|
||||
|
||||
def __init__(self, work_dir, skip_prepare=False):
|
||||
self.work_dir = work_dir
|
||||
if not skip_prepare:
|
||||
self.prepare()
|
||||
|
||||
def download_meta_file(self, backup):
|
||||
"""
|
||||
Downloads meta_data to work_dir of previous backup.
|
||||
:type backup: freezer.storage.base.Backup
|
||||
:param backup: A backup or increment. Current backup is incremental,
|
||||
that means we should download tar_meta for detection new files and
|
||||
changes. If backup.tar_meta is false, raise Exception
|
||||
:return:
|
||||
"""
|
||||
utils.create_dir(self.work_dir)
|
||||
if backup.level == 0:
|
||||
return utils.path_join(self.work_dir, backup.tar())
|
||||
meta_backup = backup.full_backup.increments[backup.level - 1]
|
||||
if not meta_backup.tar_meta:
|
||||
raise ValueError('Latest update have no tar_meta')
|
||||
to_path = utils.path_join(self.work_dir, meta_backup.tar())
|
||||
if os.path.exists(to_path):
|
||||
os.remove(to_path)
|
||||
meta_backup.storage.get_file(
|
||||
meta_backup.storage.meta_file_abs_path(meta_backup), to_path)
|
||||
return to_path
|
||||
|
||||
def meta_file_abs_path(self, backup):
|
||||
raise NotImplementedError("Should have implemented this")
|
||||
|
||||
def get_file(self, from_path, to_path):
|
||||
raise NotImplementedError("Should have implemented this")
|
||||
|
||||
def upload_meta_file(self, backup, meta_file):
|
||||
"""
|
||||
:param backup:
|
||||
:type backup: freezer.storage.base.Backup
|
||||
:param meta_file:
|
||||
:return:
|
||||
"""
|
||||
raise NotImplementedError("Should have implemented this")
|
||||
|
||||
def backup_blocks(self, backup):
|
||||
"""
|
||||
:param backup:
|
||||
:type backup: freezer.storage.base.Backup
|
||||
:return:
|
||||
"""
|
||||
raise NotImplementedError("Should have implemented this")
|
||||
|
||||
def write_backup(self, rich_queue, backup):
|
||||
"""
|
||||
:param rich_queue:
|
||||
:param backup:
|
||||
:type backup: freezer.storage.base.Backup
|
||||
:return:
|
||||
"""
|
||||
raise NotImplementedError("Should have implemented this")
|
||||
|
||||
def prepare(self):
|
||||
@ -46,17 +91,6 @@ class Storage(object):
|
||||
"""
|
||||
raise NotImplementedError("Should have implemented this")
|
||||
|
||||
def find_all(self, hostname_backup_name):
|
||||
"""
|
||||
Gets backups by backup_name and hostname
|
||||
:param hostname_backup_name:
|
||||
:type hostname_backup_name: str
|
||||
:rtype: list[Backup]
|
||||
:return: List of matched backups
|
||||
"""
|
||||
return [b for b in self.get_backups()
|
||||
if b.hostname_backup_name == hostname_backup_name]
|
||||
|
||||
def find_one(self, hostname_backup_name, recent_to_date=None):
|
||||
"""
|
||||
:param hostname_backup_name:
|
||||
@ -81,12 +115,19 @@ class Storage(object):
|
||||
if x.timestamp <= recent_to_date]
|
||||
return max(last_increments, key=lambda x: x.timestamp)
|
||||
|
||||
def get_backups(self):
|
||||
def find_all(self, hostname_backup_name):
|
||||
"""
|
||||
Gets backups by backup_name and hostname
|
||||
:param hostname_backup_name:
|
||||
:type hostname_backup_name: str
|
||||
:rtype: list[freezer.storage.base.Backup]
|
||||
:return: List of matched backups
|
||||
"""
|
||||
raise NotImplementedError("Should have implemented this")
|
||||
|
||||
def remove_backup(self, backup):
|
||||
"""
|
||||
:type backup: freezer.storage.Backup
|
||||
:type backup: freezer.storage.base.Backup
|
||||
:param backup:
|
||||
:return:
|
||||
"""
|
||||
@ -102,7 +143,7 @@ class Storage(object):
|
||||
backups = [b for b in backups
|
||||
if b.latest_update.timestamp < remove_older_timestamp]
|
||||
for b in backups:
|
||||
self.remove_backup(b)
|
||||
b.storage.remove_backup(b)
|
||||
|
||||
def info(self):
|
||||
raise NotImplementedError("Should have implemented this")
|
||||
@ -114,14 +155,14 @@ class Storage(object):
|
||||
prev_backup = self._find_previous_backup(
|
||||
backups, no_incremental, max_level, always_level,
|
||||
restart_always_level)
|
||||
time_stamp = time_stamp or utils.DateTime.now().timestamp
|
||||
if prev_backup and prev_backup.tar_meta:
|
||||
return Backup(
|
||||
hostname_backup_name,
|
||||
time_stamp or utils.DateTime.now().timestamp,
|
||||
self, hostname_backup_name, time_stamp,
|
||||
prev_backup.level + 1, prev_backup.full_backup)
|
||||
else:
|
||||
return Backup(hostname_backup_name,
|
||||
time_stamp or utils.DateTime.now().timestamp)
|
||||
return Backup(
|
||||
self, hostname_backup_name, time_stamp)
|
||||
|
||||
@staticmethod
|
||||
def _find_previous_backup(backups, no_incremental, max_level, always_level,
|
||||
@ -129,18 +170,18 @@ class Storage(object):
|
||||
"""
|
||||
|
||||
:param backups:
|
||||
:type backups: list[Backup]
|
||||
:type backups: list[freezer.storage.base.Backup]
|
||||
:param no_incremental:
|
||||
:param max_level:
|
||||
:param always_level:
|
||||
:param restart_always_level:
|
||||
:rtype: freezer.storage.storage.Backup
|
||||
:rtype: freezer.storage.base.Backup
|
||||
:return:
|
||||
"""
|
||||
if no_incremental or not backups:
|
||||
return None
|
||||
incremental_backup = max(backups, key=lambda x: x.timestamp)
|
||||
""":type : freezer.storage.Backup"""
|
||||
""":type : freezer.storage.base.Backup"""
|
||||
latest_update = incremental_backup.latest_update
|
||||
if max_level and max_level <= latest_update.level:
|
||||
return None
|
||||
@ -172,9 +213,10 @@ class Backup:
|
||||
"""
|
||||
PATTERN = r'(.*)_(\d+)_(\d+?)$'
|
||||
|
||||
def __init__(self, hostname_backup_name, timestamp, level=0,
|
||||
def __init__(self, storage, hostname_backup_name, timestamp, level=0,
|
||||
full_backup=None, tar_meta=False):
|
||||
"""
|
||||
:type storage: freezer.storage.base.Storage
|
||||
:param hostname_backup_name: name (hostname_backup_name) of backup
|
||||
:type hostname_backup_name: str
|
||||
:param timestamp: timestamp of backup (when it was executed)
|
||||
@ -196,6 +238,7 @@ class Backup:
|
||||
self._increments = {0: self}
|
||||
self._latest_update = self
|
||||
self._level = level
|
||||
self.storage = storage
|
||||
if not full_backup:
|
||||
self._full_backup = self
|
||||
else:
|
||||
@ -250,13 +293,13 @@ class Backup:
|
||||
return self.__repr__()
|
||||
|
||||
@staticmethod
|
||||
def parse_backups(names):
|
||||
def parse_backups(names, storage):
|
||||
"""
|
||||
No side effect version of get_backups
|
||||
:param names:
|
||||
:type names: list[str] - file names of backups.
|
||||
:type storage: freezer.storage.base.Storage
|
||||
File name should be something like that host_backup_timestamp_level
|
||||
:rtype: list[Backup]
|
||||
:rtype: list[freezer.storage.base.Backup]
|
||||
:return: list of zero level backups
|
||||
"""
|
||||
prefix = 'tar_metadata_'
|
||||
@ -264,7 +307,7 @@ class Backup:
|
||||
for x in names if x.startswith(prefix)])
|
||||
backup_names = [x for x in names if not x.startswith(prefix)]
|
||||
backups = []
|
||||
""":type: list[freezer.storage.BackupRepr]"""
|
||||
""":type: list[freezer.storage.base.BackupRepr]"""
|
||||
for name in backup_names:
|
||||
try:
|
||||
backup = Backup._parse(name)
|
||||
@ -276,17 +319,18 @@ class Backup:
|
||||
.format(name))
|
||||
backups.sort(key=lambda x: (x.timestamp, x.level))
|
||||
zero_backups = []
|
||||
""":type: list[freezer.storage.Backup]"""
|
||||
""":type: list[freezer.storage.base.Backup]"""
|
||||
last_backup = None
|
||||
|
||||
""":type last_backup: freezer.storage.Backup"""
|
||||
""":type last_backup: freezer.storage.base.Backup"""
|
||||
for backup in backups:
|
||||
if backup.level == 0:
|
||||
last_backup = backup.backup()
|
||||
last_backup = backup.backup(storage)
|
||||
zero_backups.append(last_backup)
|
||||
else:
|
||||
if last_backup:
|
||||
last_backup.add_increment(backup.backup(last_backup))
|
||||
last_backup.add_increment(backup.backup(storage,
|
||||
last_backup))
|
||||
else:
|
||||
logging.error("Incremental backup without parent: {0}"
|
||||
.format(backup))
|
||||
@ -341,7 +385,14 @@ class BackupRepr:
|
||||
self.level = level
|
||||
self.tar_meta = tar_meta
|
||||
|
||||
def backup(self, full_backup=None):
|
||||
return Backup(self.hostname_backup_name, self.timestamp,
|
||||
def backup(self, storage, full_backup=None):
|
||||
"""
|
||||
|
||||
:param storage:
|
||||
:type storage: freezer.storage.base.Storage
|
||||
:param full_backup: freezer.storage.base.Backup
|
||||
:return:
|
||||
"""
|
||||
return Backup(storage, self.hostname_backup_name, self.timestamp,
|
||||
level=self.level, full_backup=full_backup,
|
||||
tar_meta=self.tar_meta)
|
@ -13,27 +13,33 @@
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from freezer.storage import storage
|
||||
from freezer.storage import base
|
||||
from freezer import utils
|
||||
import os
|
||||
|
||||
|
||||
class FsLikeStorage(storage.Storage):
|
||||
class FsLikeStorage(base.Storage):
|
||||
DEFAULT_CHUNK_SIZE = 10000000
|
||||
|
||||
def __init__(self, storage_directory, work_dir,
|
||||
chunk_size=DEFAULT_CHUNK_SIZE):
|
||||
chunk_size=DEFAULT_CHUNK_SIZE, skip_prepare=False):
|
||||
self.storage_directory = storage_directory
|
||||
self.work_dir = work_dir
|
||||
self.chunk_size = chunk_size
|
||||
super(FsLikeStorage, self).__init__(work_dir,
|
||||
skip_prepare=skip_prepare)
|
||||
|
||||
def backup_to_file_path(self, backup):
|
||||
"""
|
||||
|
||||
:param backup:
|
||||
:type backup: freezer.storage.base.Backup
|
||||
:return:
|
||||
"""
|
||||
return utils.path_join(self._zero_backup_dir(backup), backup)
|
||||
|
||||
def _zero_backup_dir(self, backup):
|
||||
"""
|
||||
:param backup:
|
||||
:type backup: freezer.storage.Backup
|
||||
:type backup: freezer.storage.base.Backup
|
||||
:return:
|
||||
"""
|
||||
return utils.path_join(self.storage_directory,
|
||||
@ -46,44 +52,37 @@ class FsLikeStorage(storage.Storage):
|
||||
def info(self):
|
||||
pass
|
||||
|
||||
def download_meta_file(self, backup):
|
||||
"""
|
||||
:type backup: freezer.storage.Backup
|
||||
:param backup:
|
||||
:return:
|
||||
"""
|
||||
utils.create_dir(self.work_dir)
|
||||
if backup.level == 0:
|
||||
return utils.path_join(self.work_dir, backup.tar())
|
||||
meta_backup = backup.full_backup.increments[backup.level - 1]
|
||||
def meta_file_abs_path(self, backup):
|
||||
zero_backup = self._zero_backup_dir(backup)
|
||||
from_path = utils.path_join(zero_backup, meta_backup.tar())
|
||||
to_path = utils.path_join(self.work_dir, meta_backup.tar())
|
||||
if os.path.exists(to_path):
|
||||
os.remove(to_path)
|
||||
self.get_file(from_path, to_path)
|
||||
return to_path
|
||||
return utils.path_join(zero_backup, backup.tar())
|
||||
|
||||
def upload_meta_file(self, backup, meta_file):
|
||||
"""
|
||||
|
||||
:param backup:
|
||||
:type backup: freezer.storage.base.Backup
|
||||
:param meta_file:
|
||||
:return:
|
||||
"""
|
||||
zero_backup = self._zero_backup_dir(backup)
|
||||
to_path = utils.path_join(zero_backup, backup.tar())
|
||||
self.put_file(meta_file, to_path)
|
||||
|
||||
def get_backups(self):
|
||||
backup_names = self.listdir(self.storage_directory)
|
||||
def find_all(self, hostname_backup_name):
|
||||
backups = []
|
||||
for backup_name in backup_names:
|
||||
backup_dir = utils.path_join(self.storage_directory, backup_name)
|
||||
timestamps = self.listdir(backup_dir)
|
||||
for timestamp in timestamps:
|
||||
increments = \
|
||||
self.listdir(utils.path_join(backup_dir, timestamp))
|
||||
backups.extend(storage.Backup.parse_backups(increments))
|
||||
backup_dir = utils.path_join(self.storage_directory,
|
||||
hostname_backup_name)
|
||||
utils.create_dir_tree(backup_dir)
|
||||
timestamps = self.listdir(backup_dir)
|
||||
for timestamp in timestamps:
|
||||
increments = \
|
||||
self.listdir(utils.path_join(backup_dir, timestamp))
|
||||
backups.extend(base.Backup.parse_backups(increments, self))
|
||||
return backups
|
||||
|
||||
def remove_backup(self, backup):
|
||||
"""
|
||||
:type backup: freezer.storage.Backup
|
||||
:type backup: freezer.storage.base.Backup
|
||||
:return:
|
||||
"""
|
||||
self.rmtree(self._zero_backup_dir(backup))
|
||||
@ -92,7 +91,7 @@ class FsLikeStorage(storage.Storage):
|
||||
"""
|
||||
Stores backup in storage
|
||||
:type rich_queue: freezer.streaming.RichQueue
|
||||
:type backup: freezer.storage.storage.Backup
|
||||
:type backup: freezer.storage.base.Backup
|
||||
"""
|
||||
filename = self.backup_to_file_path(backup)
|
||||
if backup.level == 0:
|
||||
@ -103,6 +102,12 @@ class FsLikeStorage(storage.Storage):
|
||||
b_file.write(message)
|
||||
|
||||
def backup_blocks(self, backup):
|
||||
"""
|
||||
|
||||
:param backup:
|
||||
:type backup: freezer.storage.base.Backup
|
||||
:return:
|
||||
"""
|
||||
filename = self.backup_to_file_path(backup)
|
||||
with self.open(filename, 'rb') as backup_file:
|
||||
while True:
|
||||
@ -115,9 +120,6 @@ class FsLikeStorage(storage.Storage):
|
||||
def listdir(self, directory):
|
||||
raise NotImplementedError("Should have implemented this")
|
||||
|
||||
def get_file(self, from_path, to_path):
|
||||
raise NotImplementedError("Should have implemented this")
|
||||
|
||||
def put_file(self, from_path, to_path):
|
||||
raise NotImplementedError("Should have implemented this")
|
||||
|
||||
|
103
freezer/storage/multiple.py
Normal file
103
freezer/storage/multiple.py
Normal file
@ -0,0 +1,103 @@
|
||||
# (c) Copyright 2014,2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from freezer.storage import base
|
||||
from freezer import streaming
|
||||
|
||||
import logging
|
||||
|
||||
class MultipleStorage(base.Storage):
|
||||
def remove_backup(self, backup):
|
||||
raise Exception()
|
||||
|
||||
def backup_blocks(self, backup):
|
||||
raise Exception()
|
||||
|
||||
def info(self):
|
||||
for s in self.storages:
|
||||
s.info()
|
||||
|
||||
def write_backup(self, rich_queue, backup):
|
||||
output_queues = [streaming.RichQueue() for x in self.storages]
|
||||
threads = [streaming.QueuedThread(
|
||||
storage.write_backup, queue, kwargs={"backup": backup}) for
|
||||
storage, queue in zip(self.storages, output_queues)]
|
||||
for thread in threads:
|
||||
thread.daemon = True
|
||||
thread.start()
|
||||
StorageManager(rich_queue, output_queues).transmit()
|
||||
for thread in threads:
|
||||
thread.join()
|
||||
|
||||
def find_all(self, hostname_backup_name):
|
||||
backups = [b.find_all(hostname_backup_name) for b in self.storages]
|
||||
# flat the list
|
||||
return [item for sublist in backups for item in sublist]
|
||||
|
||||
def prepare(self):
|
||||
pass
|
||||
|
||||
def upload_meta_file(self, backup, meta_file):
|
||||
for storage in self.storages:
|
||||
storage.upload_meta_file(backup, meta_file)
|
||||
|
||||
def __init__(self, work_dir, storages):
|
||||
"""
|
||||
:param storages:
|
||||
:type storages: list[freezer.storage.base.Storage]
|
||||
:return:
|
||||
"""
|
||||
super(MultipleStorage, self).__init__(work_dir)
|
||||
self.storages = storages
|
||||
|
||||
|
||||
class StorageManager:
|
||||
|
||||
def __init__(self, input_queue, output_queues):
|
||||
"""
|
||||
:type input_queue: streaming.RichQueue
|
||||
:param input_queue:
|
||||
:type output_queues: collections.Iterable[streaming.RichQueue]
|
||||
:param output_queues:
|
||||
:return:
|
||||
"""
|
||||
self.input_queue = input_queue
|
||||
self.output_queues = output_queues
|
||||
self.broken_output_queues = set()
|
||||
|
||||
def send_message(self, message, finish=False):
|
||||
for output_queue in self.output_queues:
|
||||
if output_queue not in self.broken_output_queues:
|
||||
try:
|
||||
if finish:
|
||||
output_queue.finish()
|
||||
else:
|
||||
output_queue.put(message)
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
StorageManager.one_fails_all_fail(
|
||||
self.input_queue, self.output_queues)
|
||||
self.broken_output_queues.add(output_queue)
|
||||
|
||||
def transmit(self):
|
||||
for message in self.input_queue.get_messages():
|
||||
self.send_message(message)
|
||||
self.send_message("", True)
|
||||
|
||||
@staticmethod
|
||||
def one_fails_all_fail(input_queue, output_queues):
|
||||
input_queue.force_stop()
|
||||
for output_queue in output_queues:
|
||||
output_queue.force_stop()
|
||||
raise Exception("All fail")
|
@ -39,8 +39,6 @@ class SshStorage(fslike.FsLikeStorage):
|
||||
:type storage_directory: str
|
||||
:return:
|
||||
"""
|
||||
super(SshStorage, self).__init__(storage_directory, work_dir,
|
||||
chunk_size)
|
||||
self.ssh_key_path = ssh_key_path
|
||||
self.remote_username = remote_username
|
||||
self.remote_ip = remote_ip
|
||||
@ -48,6 +46,8 @@ class SshStorage(fslike.FsLikeStorage):
|
||||
self.ssh = None
|
||||
self.ftp = None
|
||||
self.init()
|
||||
super(SshStorage, self).__init__(storage_directory, work_dir,
|
||||
chunk_size)
|
||||
|
||||
def init(self):
|
||||
ssh = paramiko.SSHClient()
|
||||
|
@ -18,13 +18,11 @@ limitations under the License.
|
||||
import json
|
||||
import time
|
||||
import logging
|
||||
import os
|
||||
|
||||
from freezer import utils
|
||||
from freezer.storage import storage
|
||||
from freezer.storage import base
|
||||
|
||||
|
||||
class SwiftStorage(storage.Storage):
|
||||
class SwiftStorage(base.Storage):
|
||||
"""
|
||||
:type client_manager: freezer.osclients.ClientManager
|
||||
"""
|
||||
@ -32,17 +30,25 @@ class SwiftStorage(storage.Storage):
|
||||
RESP_CHUNK_SIZE = 10000000
|
||||
|
||||
def __init__(self, client_manager, container, work_dir, max_segment_size,
|
||||
chunk_size=RESP_CHUNK_SIZE):
|
||||
chunk_size=RESP_CHUNK_SIZE, skip_prepare=False):
|
||||
"""
|
||||
:type client_manager: freezer.osclients.ClientManager
|
||||
:type container: str
|
||||
"""
|
||||
self.client_manager = client_manager
|
||||
self.container = container
|
||||
# The containers used by freezer to executed backups needs to have
|
||||
# freezer_ prefix in the name. If the user provider container doesn't
|
||||
# have the prefix, it is automatically added also to the container
|
||||
# segments name. This is done to quickly identify the containers
|
||||
# that contain freezer generated backups
|
||||
if not container.startswith('freezer_'):
|
||||
self.container = 'freezer_{0}'.format(container)
|
||||
else:
|
||||
self.container = container
|
||||
self.segments = u'{0}_segments'.format(container)
|
||||
self.work_dir = work_dir
|
||||
self.max_segment_size = max_segment_size
|
||||
self.chunk_size = chunk_size
|
||||
super(SwiftStorage, self).__init__(work_dir, skip_prepare)
|
||||
|
||||
def swift(self):
|
||||
"""
|
||||
@ -88,7 +94,7 @@ class SwiftStorage(storage.Storage):
|
||||
Upload Manifest to manage segments in Swift
|
||||
|
||||
:param backup: Backup
|
||||
:type backup: freezer.storage.Backup
|
||||
:type backup: freezer.storage.base.Backup
|
||||
"""
|
||||
self.client_manager.create_swift()
|
||||
headers = {'x-object-manifest':
|
||||
@ -139,6 +145,16 @@ class SwiftStorage(storage.Storage):
|
||||
ordered_container, indent=4,
|
||||
separators=(',', ': '), sort_keys=True)
|
||||
|
||||
def meta_file_abs_path(self, backup):
|
||||
return backup.tar()
|
||||
|
||||
def get_file(self, from_path, to_path):
|
||||
with open(to_path, 'ab') as obj_fd:
|
||||
iterator = self.swift().get_object(
|
||||
self.container, from_path, resp_chunk_size=self.chunk_size)[1]
|
||||
for obj_chunk in iterator:
|
||||
obj_fd.write(obj_chunk)
|
||||
|
||||
def remove(self, container, prefix):
|
||||
for segment in self.swift().get_container(container, prefix=prefix)[1]:
|
||||
self.swift().delete_object(container, segment['name'])
|
||||
@ -147,6 +163,7 @@ class SwiftStorage(storage.Storage):
|
||||
"""
|
||||
Removes backup, all increments, tar_meta and segments
|
||||
:param backup:
|
||||
:type backup: freezer.storage.base.Backup
|
||||
:return:
|
||||
"""
|
||||
for i in range(backup.latest_update.level, -1, -1):
|
||||
@ -172,54 +189,26 @@ class SwiftStorage(storage.Storage):
|
||||
self.swift().put_object(self.container, package_name, "",
|
||||
headers=headers)
|
||||
|
||||
def get_backups(self):
|
||||
def find_all(self, hostname_backup_name):
|
||||
"""
|
||||
:rtype: list[SwiftBackup]
|
||||
:rtype: list[freezer.storage.base.Backup]
|
||||
:return: list of zero level backups
|
||||
"""
|
||||
try:
|
||||
files = self.swift().get_container(self.container)[1]
|
||||
names = [x['name'] for x in files if 'name' in x]
|
||||
return storage.Backup.parse_backups(names)
|
||||
return [b for b in base.Backup.parse_backups(names, self)
|
||||
if b.hostname_backup_name == hostname_backup_name]
|
||||
except Exception as error:
|
||||
raise Exception('[*] Error: get_object_list: {0}'.format(error))
|
||||
|
||||
def download_meta_file(self, backup):
|
||||
def backup_blocks(self, backup):
|
||||
"""
|
||||
Downloads meta_data to work_dir of previous backup.
|
||||
|
||||
:param backup: A backup or increment. Current backup is incremental,
|
||||
that means we should download tar_meta for detection new files and
|
||||
changes. If backup.tar_meta is false, raise Exception
|
||||
:type backup: freezer.storage.Backup
|
||||
:param backup:
|
||||
:type backup: freezer.storage.base.Backup
|
||||
:return:
|
||||
"""
|
||||
utils.create_dir(self.work_dir)
|
||||
if backup.level == 0:
|
||||
return "{0}{1}{2}".format(self.work_dir, os.sep, backup.tar())
|
||||
|
||||
meta_backup = backup.full_backup.increments[backup.level - 1]
|
||||
|
||||
if not meta_backup.tar_meta:
|
||||
raise ValueError('Latest update have no tar_meta')
|
||||
|
||||
tar_meta = meta_backup.tar()
|
||||
tar_meta_abs = "{0}{1}{2}".format(self.work_dir, os.sep, tar_meta)
|
||||
|
||||
logging.info('[*] Downloading object {0} {1}'.format(
|
||||
tar_meta, tar_meta_abs))
|
||||
|
||||
if os.path.exists(tar_meta_abs):
|
||||
os.remove(tar_meta_abs)
|
||||
|
||||
with open(tar_meta_abs, 'ab') as obj_fd:
|
||||
iterator = self.swift().get_object(
|
||||
self.container, tar_meta, resp_chunk_size=self.chunk_size)[1]
|
||||
for obj_chunk in iterator:
|
||||
obj_fd.write(obj_chunk)
|
||||
return tar_meta_abs
|
||||
|
||||
def backup_blocks(self, backup):
|
||||
for chunk in self.swift().get_object(
|
||||
self.container, backup, resp_chunk_size=self.chunk_size)[1]:
|
||||
yield chunk
|
||||
@ -228,7 +217,7 @@ class SwiftStorage(storage.Storage):
|
||||
"""
|
||||
Upload object on the remote swift server
|
||||
:type rich_queue: freezer.streaming.RichQueue
|
||||
:type backup: SwiftBackup
|
||||
:type backup: freezer.storage.base.Backup
|
||||
"""
|
||||
for block_index, message in enumerate(rich_queue.get_messages()):
|
||||
segment_package_name = u'{0}/{1}/{2}/{3}'.format(
|
||||
|
@ -16,7 +16,6 @@ limitations under the License.
|
||||
Freezer general utils functions
|
||||
"""
|
||||
import threading
|
||||
import logging
|
||||
import Queue
|
||||
|
||||
|
||||
@ -24,47 +23,6 @@ class Wait(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class StorageManager:
|
||||
|
||||
def __init__(self, input_queue, output_queues):
|
||||
"""
|
||||
:type input_queue: streaming.RichQueue
|
||||
:param input_queue:
|
||||
:type output_queues: collections.Iterable[streaming.RichQueue]
|
||||
:param output_queues:
|
||||
:return:
|
||||
"""
|
||||
self.input_queue = input_queue
|
||||
self.output_queues = output_queues
|
||||
self.broken_output_queues = set()
|
||||
|
||||
def send_message(self, message, finish=False):
|
||||
for output_queue in self.output_queues:
|
||||
if output_queue not in self.broken_output_queues:
|
||||
try:
|
||||
if finish:
|
||||
output_queue.finish()
|
||||
else:
|
||||
output_queue.put(message)
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
StorageManager.one_fails_all_fail(
|
||||
self.input_queue, self.output_queues)
|
||||
self.broken_output_queues.add(output_queue)
|
||||
|
||||
def transmit(self):
|
||||
for message in self.input_queue.get_messages():
|
||||
self.send_message(message)
|
||||
self.send_message("", True)
|
||||
|
||||
@staticmethod
|
||||
def one_fails_all_fail(input_queue, output_queues):
|
||||
input_queue.force_stop()
|
||||
for output_queue in output_queues:
|
||||
output_queue.force_stop()
|
||||
raise Exception("All fail")
|
||||
|
||||
|
||||
class RichQueue:
|
||||
"""
|
||||
:type data_queue: Queue.Queue
|
||||
@ -145,27 +103,4 @@ class QueuedThread(threading.Thread):
|
||||
super(QueuedThread, self).run()
|
||||
except Exception as e:
|
||||
self.rich_queue.force_stop()
|
||||
raise e
|
||||
|
||||
|
||||
def stream(read_function, read_function_kwargs,
|
||||
write_function, write_function_kwargs, queue_size=10):
|
||||
"""
|
||||
:param queue_size:
|
||||
:type queue_size: int
|
||||
:return:
|
||||
"""
|
||||
input_queue = RichQueue(queue_size)
|
||||
read_stream = QueuedThread(read_function, input_queue,
|
||||
kwargs=read_function_kwargs)
|
||||
output_queue = RichQueue(queue_size)
|
||||
write_stream = QueuedThread(write_function, output_queue,
|
||||
kwargs=write_function_kwargs)
|
||||
read_stream.daemon = True
|
||||
write_stream.daemon = True
|
||||
read_stream.start()
|
||||
write_stream.start()
|
||||
manager = StorageManager(input_queue, [output_queue])
|
||||
manager.transmit()
|
||||
read_stream.join()
|
||||
write_stream.join()
|
||||
raise e
|
@ -269,23 +269,6 @@ class FakeSwiftClient:
|
||||
'x-object-meta-name': "name"}, "abc"]
|
||||
|
||||
|
||||
class FakeRe:
|
||||
|
||||
def __init__(self):
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def search(self, opt1=True, opt2=True, opt3=True):
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def group(self, opt1=True, opt2=True):
|
||||
if opt1 == 1:
|
||||
return 'testgroup'
|
||||
else:
|
||||
return '10'
|
||||
|
||||
|
||||
class BackupOpt1:
|
||||
|
||||
def __init__(self):
|
||||
|
203
tests/storages/test_base.py
Normal file
203
tests/storages/test_base.py
Normal file
@ -0,0 +1,203 @@
|
||||
# (c) Copyright 2014,2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import unittest
|
||||
from freezer.storage import base
|
||||
import mock
|
||||
|
||||
|
||||
class TestBackup(unittest.TestCase):
|
||||
def test_backup_parse(self):
|
||||
self.assertRaises(ValueError, base.Backup._parse, "asdfasdfasdf")
|
||||
backup = base.Backup._parse("test_name_host_1234_0")
|
||||
self.assertEqual(backup.level, 0)
|
||||
self.assertEqual(backup.timestamp, 1234)
|
||||
self.assertEqual(backup.hostname_backup_name, "test_name_host")
|
||||
|
||||
def test_backup_creation(self):
|
||||
backup = base.Backup(None, "name", 1234)
|
||||
self.assertEqual(backup.hostname_backup_name, "name")
|
||||
self.assertEqual(backup.timestamp, 1234)
|
||||
self.assertEqual(backup.level, 0)
|
||||
self.assertEqual(backup.latest_update.level, 0)
|
||||
self.assertEqual(backup.latest_update.timestamp, 1234)
|
||||
self.assertEqual(backup.latest_update.hostname_backup_name, "name")
|
||||
self.assertEqual(len(backup.increments), 1)
|
||||
|
||||
def test_backup_full_backup(self):
|
||||
ok = False
|
||||
try:
|
||||
base.Backup(None, "name", 1324, 0, "full_backup")
|
||||
except ValueError:
|
||||
ok = True
|
||||
if not ok:
|
||||
raise Exception("Should throw ValueError")
|
||||
|
||||
def test_backup_increment(self):
|
||||
backup = base.Backup(None, "name", 1234)
|
||||
self.assertRaises(ValueError, backup.add_increment, backup)
|
||||
increment = base.Backup(None, "name", 4567, 1, backup)
|
||||
backup.add_increment(increment)
|
||||
self.assertEqual(len(backup.increments), 2)
|
||||
|
||||
def test__find_previous_backup(self):
|
||||
backup = base.Backup(None, "name", 1234)
|
||||
b = base.Storage._find_previous_backup([backup], False, 2, False, 0)
|
||||
assert b == backup
|
||||
|
||||
def test__find_previous_backup_with_max_level(self):
|
||||
backup = base.Backup(None, "name", 1234)
|
||||
i1 = base.Backup(None, "name", 1234, 1, backup)
|
||||
i2 = base.Backup(None, "name", 1234, 2, backup)
|
||||
backup.add_increment(i1)
|
||||
backup.add_increment(i2)
|
||||
b = base.Storage._find_previous_backup([backup], False, 2, False, 0)
|
||||
assert not b
|
||||
|
||||
def test__find_previous_backup_with_max_level_not_reached(self):
|
||||
backup = base.Backup(None, "name", 1234)
|
||||
i1 = base.Backup(None, "name", 1234, 1, backup)
|
||||
backup.add_increment(i1)
|
||||
b = base.Storage._find_previous_backup([backup], False, 2, False, 0)
|
||||
assert b == i1
|
||||
|
||||
def test__find_previous_backup_with_always_level_reached(self):
|
||||
backup = base.Backup(None, "name", 1234)
|
||||
i1 = base.Backup(None, "name", 1234, 1, backup)
|
||||
i2 = base.Backup(None, "name", 1234, 2, backup)
|
||||
backup.add_increment(i1)
|
||||
backup.add_increment(i2)
|
||||
b = base.Storage._find_previous_backup([backup], False, False, 2, 0)
|
||||
assert b == i1
|
||||
|
||||
def test__find_previous_backup_with_always_level_reached_2(self):
|
||||
backup = base.Backup(None, "name", 1234)
|
||||
i1 = base.Backup(None, "name", 1234, 1, backup)
|
||||
i2 = base.Backup(None, "name", 1234, 2, backup)
|
||||
backup.add_increment(i1)
|
||||
backup.add_increment(i2)
|
||||
b = base.Storage._find_previous_backup([backup], False, False, 3, 0)
|
||||
assert b == i2
|
||||
|
||||
def test_add_increment_raises(self):
|
||||
backup = base.Backup(None, "name", 1234, level=3)
|
||||
self.assertRaises(ValueError, backup.add_increment, None)
|
||||
|
||||
def test_restore_latest_backup(self):
|
||||
t = base.Storage("", skip_prepare=True)
|
||||
t.find_all = mock.Mock()
|
||||
last = base.Backup(t, "host_backup", 5000)
|
||||
t.find_all.return_value = [
|
||||
base.Backup(t, "host_backup", 1000),
|
||||
base.Backup(t, "host_backup", 2000),
|
||||
base.Backup(t, "host_backup", 3000),
|
||||
base.Backup(t, "host_backup", 4000),
|
||||
base.Backup(t, "host_backup_f", 1000),
|
||||
last
|
||||
]
|
||||
assert t.find_one("host_backup") == last
|
||||
|
||||
def test_find_latest_backup_respects_increments_timestamp(self):
|
||||
test_backup = base.Backup(None, "host_backup", 5500)
|
||||
increment = base.Backup(None, "host_backup", 6000, 1, test_backup)
|
||||
test_backup.add_increment(increment)
|
||||
t = base.Storage(None, skip_prepare=True)
|
||||
t.find_all = mock.Mock()
|
||||
t.find_all.return_value = [
|
||||
test_backup,
|
||||
base.Backup(None, "host_backup", 2000),
|
||||
base.Backup(None, "host_backup", 3000),
|
||||
base.Backup(None, "host_backup", 4000),
|
||||
base.Backup(None, "host_backup_f", 1000),
|
||||
base.Backup(None, "host_backup", 5000),
|
||||
]
|
||||
assert t.find_one("host_backup") == increment
|
||||
|
||||
def test_restore_from_date(self):
|
||||
t = base.Storage(None, skip_prepare=True)
|
||||
t.find_all = mock.Mock()
|
||||
backup_restore = base.Backup(None, "host_backup", 3000)
|
||||
t.find_all.return_value = [
|
||||
base.Backup(None, "host_backup", 1000),
|
||||
base.Backup(None, "host_backup", 2000),
|
||||
backup_restore,
|
||||
base.Backup(None, "host_backup", 4000),
|
||||
base.Backup(None, "host_backup_f", 1000),
|
||||
base.Backup(None, "host_backup", 5000),
|
||||
]
|
||||
assert t.find_one("host_backup", 3234) == backup_restore
|
||||
|
||||
def test_restore_from_date_increment(self):
|
||||
t = base.Storage(None, skip_prepare=True)
|
||||
t.find_all = mock.Mock()
|
||||
test_backup = base.Backup(None, "host_backup", 1000)
|
||||
increment = base.Backup(None, "host_backup", 3200, 1, test_backup)
|
||||
test_backup.add_increment(increment)
|
||||
t.find_all.return_value = [
|
||||
test_backup,
|
||||
base.Backup(None, "host_backup", 4000),
|
||||
base.Backup(None, "host_backup_f", 1000),
|
||||
base.Backup(None, "host_backup", 5000),
|
||||
]
|
||||
assert t.find_one("host_backup", 3234) == increment
|
||||
|
||||
def test__get_backups_wrong_name(self):
|
||||
result = base.Backup.parse_backups(["hostname"], None)
|
||||
assert len(result) == 0
|
||||
result = base.Backup.parse_backups(["hostname_100_2"], None)
|
||||
assert len(result) == 0
|
||||
|
||||
def test__get_backups_good_name(self):
|
||||
result = base.Backup.parse_backups(["host_backup_100_0"], None)
|
||||
assert len(result) == 1
|
||||
result = result[0]
|
||||
assert result.hostname_backup_name == "host_backup"
|
||||
assert result.timestamp == 100
|
||||
assert result.level == 0
|
||||
|
||||
def test_remove_older_than(self):
|
||||
t = base.Storage(None, skip_prepare=True)
|
||||
t.find_all = mock.Mock()
|
||||
r1 = base.Backup(t, "host_backup", 1000)
|
||||
r2 = base.Backup(t, "host_backup", 2000)
|
||||
t.find_all.return_value = [
|
||||
r1,
|
||||
r2,
|
||||
base.Backup(t, "host_backup", 3000),
|
||||
base.Backup(t, "host_backup", 4000),
|
||||
base.Backup(t, "host_backup", 5000),
|
||||
]
|
||||
t.remove_backup = mock.Mock()
|
||||
t.remove_older_than(3000, "host_backup")
|
||||
t.remove_backup.assert_any_call(r1)
|
||||
t.remove_backup.assert_any_call(r2)
|
||||
print t.remove_backup.call_count
|
||||
assert t.remove_backup.call_count == 2
|
||||
|
||||
def test_create_backup(self):
|
||||
t = base.Storage(None, skip_prepare=True)
|
||||
t.find_all = mock.Mock()
|
||||
t.find_all.return_value = []
|
||||
t._find_previous_backup = mock.Mock()
|
||||
t._find_previous_backup.return_value = \
|
||||
base.Backup(None, "host_backup", 3000, tar_meta=True)
|
||||
t.create_backup("", True, 12, False, False)
|
||||
|
||||
def test_restart_always_level(self):
|
||||
t = base.Storage(None, skip_prepare=True)
|
||||
t.find_all = mock.Mock()
|
||||
t.find_all.return_value = []
|
||||
backup = base.Backup(None, "host_backup", 3000, tar_meta=True)
|
||||
t._find_previous_backup([backup], False, None, None, 10)
|
@ -14,15 +14,18 @@
|
||||
|
||||
|
||||
from freezer.storage import fslike
|
||||
from freezer.storage import storage
|
||||
from freezer.storage import base
|
||||
import mock
|
||||
import unittest
|
||||
import tempfile
|
||||
|
||||
class TestFsLikeStorage(object):
|
||||
def test_download_meta_file(self, tmpdir):
|
||||
t = fslike.FsLikeStorage(tmpdir.strpath, tmpdir.strpath)
|
||||
full_backup = storage.Backup("test", 2000)
|
||||
increment = storage.Backup("test", 2500, 9, full_backup)
|
||||
class TestFsLikeStorage(unittest.TestCase):
|
||||
def test_download_meta_file(self):
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
t = fslike.FsLikeStorage(tmpdir, tmpdir, skip_prepare=True)
|
||||
full_backup = base.Backup(None, "test", 2000)
|
||||
increment = base.Backup(None, "test", 2500, 9, full_backup)
|
||||
full_backup.add_increment(increment)
|
||||
backup = storage.Backup("test", 3000, 10, full_backup)
|
||||
t.get_file = mock.Mock()
|
||||
t.download_meta_file(backup)
|
||||
backup = base.Backup(None, "test", 3000, 10, full_backup)
|
||||
# t.get_file = mock.Mock()
|
||||
# t.download_meta_file(backup)
|
||||
|
@ -15,12 +15,13 @@
|
||||
|
||||
import tempfile
|
||||
import shutil
|
||||
import unittest
|
||||
|
||||
from freezer.storage import local
|
||||
from freezer import utils
|
||||
|
||||
|
||||
class TestLocalStorage(object):
|
||||
class TestLocalStorage(unittest.TestCase):
|
||||
BACKUP_DIR_PREFIX = "freezer_test_backup_dir"
|
||||
FILES_DIR_PREFIX = "freezer_test_files_dir"
|
||||
WORK_DIR_PREFIX = "freezer_work_dir"
|
||||
@ -32,8 +33,8 @@ class TestLocalStorage(object):
|
||||
f.write(text)
|
||||
f.close()
|
||||
|
||||
def create_dirs(self, tmpdir):
|
||||
tmpdir = tmpdir.strpath
|
||||
def create_dirs(self):
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
if self.temp:
|
||||
backup_dir = tempfile.mkdtemp(
|
||||
dir=tmpdir, prefix=self.BACKUP_DIR_PREFIX)
|
||||
@ -60,12 +61,12 @@ class TestLocalStorage(object):
|
||||
def remove_storage(self, backup_dir):
|
||||
shutil.rmtree(backup_dir)
|
||||
|
||||
def test_prepare(self, tmpdir):
|
||||
backup_dir, files_dir, work_dir = self.create_dirs(tmpdir)
|
||||
def test_prepare(self):
|
||||
backup_dir, files_dir, work_dir = self.create_dirs()
|
||||
storage = local.LocalStorage(backup_dir, work_dir)
|
||||
storage.prepare()
|
||||
|
||||
def test_info(self, tmpdir):
|
||||
backup_dir, files_dir, work_dir = self.create_dirs(tmpdir)
|
||||
def test_info(self):
|
||||
backup_dir, files_dir, work_dir = self.create_dirs()
|
||||
storage = local.LocalStorage(backup_dir, work_dir)
|
||||
storage.info()
|
||||
|
@ -1,32 +0,0 @@
|
||||
# (c) Copyright 2014,2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import unittest
|
||||
|
||||
from freezer.storage import ssh
|
||||
|
||||
class TestSshStorage(unittest.TestCase):
|
||||
|
||||
def test_constructor(self):
|
||||
ssh.SshStorage.init = lambda x: x
|
||||
ssh.SshStorage(
|
||||
"test_dir", "test_work_dir", "test_key", "test_name", "test_ip",
|
||||
"teset_port")
|
||||
|
||||
def test_info(self):
|
||||
ssh.SshStorage.init = lambda x: x
|
||||
ssh.SshStorage(
|
||||
"test_dir", "test_work_dir", "test_key", "test_name", "test_ip",
|
||||
"teset_port").info()
|
@ -1,220 +0,0 @@
|
||||
# (c) Copyright 2014,2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import unittest
|
||||
from freezer.storage import storage
|
||||
import mock
|
||||
|
||||
|
||||
class TestBackup(unittest.TestCase):
|
||||
def test_backup_parse(self):
|
||||
self.assertRaises(ValueError, storage.Backup._parse, "asdfasdfasdf")
|
||||
backup = storage.Backup._parse("test_name_host_1234_0")
|
||||
self.assertEqual(backup.level, 0)
|
||||
self.assertEqual(backup.timestamp, 1234)
|
||||
self.assertEqual(backup.hostname_backup_name, "test_name_host")
|
||||
|
||||
def test_backup_creation(self):
|
||||
backup = storage.Backup("name", 1234)
|
||||
self.assertEqual(backup.hostname_backup_name, "name")
|
||||
self.assertEqual(backup.timestamp, 1234)
|
||||
self.assertEqual(backup.level, 0)
|
||||
self.assertEqual(backup.latest_update.level, 0)
|
||||
self.assertEqual(backup.latest_update.timestamp, 1234)
|
||||
self.assertEqual(backup.latest_update.hostname_backup_name, "name")
|
||||
self.assertEqual(len(backup.increments), 1)
|
||||
|
||||
def test_backup_full_backup(self):
|
||||
ok = False
|
||||
try:
|
||||
storage.Backup("name", 1324, 0, "full_backup")
|
||||
except ValueError:
|
||||
ok = True
|
||||
if not ok:
|
||||
raise Exception("Should throw ValueError")
|
||||
|
||||
def test_backup_increment(self):
|
||||
backup = storage.Backup("name", 1234)
|
||||
self.assertRaises(ValueError, backup.add_increment, backup)
|
||||
increment = storage.Backup("name", 4567, 1, backup)
|
||||
backup.add_increment(increment)
|
||||
self.assertEqual(len(backup.increments), 2)
|
||||
|
||||
def test__find_previous_backup(self):
|
||||
backup = storage.Backup("name", 1234)
|
||||
b = storage.Storage._find_previous_backup([backup], False, 2, False, 0)
|
||||
assert b == backup
|
||||
|
||||
def test__find_previous_backup_with_max_level(self):
|
||||
backup = storage.Backup("name", 1234)
|
||||
i1 = storage.Backup("name", 1234, 1, backup)
|
||||
i2 = storage.Backup("name", 1234, 2, backup)
|
||||
backup.add_increment(i1)
|
||||
backup.add_increment(i2)
|
||||
b = storage.Storage._find_previous_backup([backup], False, 2, False, 0)
|
||||
assert not b
|
||||
|
||||
def test__find_previous_backup_with_max_level_not_reached(self):
|
||||
backup = storage.Backup("name", 1234)
|
||||
i1 = storage.Backup("name", 1234, 1, backup)
|
||||
backup.add_increment(i1)
|
||||
b = storage.Storage._find_previous_backup([backup], False, 2, False, 0)
|
||||
assert b == i1
|
||||
|
||||
def test__find_previous_backup_with_always_level_reached(self):
|
||||
backup = storage.Backup("name", 1234)
|
||||
i1 = storage.Backup("name", 1234, 1, backup)
|
||||
i2 = storage.Backup("name", 1234, 2, backup)
|
||||
backup.add_increment(i1)
|
||||
backup.add_increment(i2)
|
||||
b = storage.Storage._find_previous_backup([backup], False, False, 2, 0)
|
||||
assert b == i1
|
||||
|
||||
def test__find_previous_backup_with_always_level_reached_2(self):
|
||||
backup = storage.Backup("name", 1234)
|
||||
i1 = storage.Backup("name", 1234, 1, backup)
|
||||
i2 = storage.Backup("name", 1234, 2, backup)
|
||||
backup.add_increment(i1)
|
||||
backup.add_increment(i2)
|
||||
b = storage.Storage._find_previous_backup([backup], False, False, 3, 0)
|
||||
assert b == i2
|
||||
|
||||
def test_add_increment_raises(self):
|
||||
backup = storage.Backup("name", 1234, level=3)
|
||||
self.assertRaises(ValueError, backup.add_increment, None)
|
||||
|
||||
def test_find_all(self):
|
||||
t = storage.Storage()
|
||||
t.get_backups = mock.Mock()
|
||||
t.get_backups.return_value = [
|
||||
storage.Backup("host_backup", 1000),
|
||||
storage.Backup("host_backup", 1000),
|
||||
storage.Backup("host_backup", 1000),
|
||||
storage.Backup("host_backup", 1000),
|
||||
storage.Backup("host_backup_f", 1000),
|
||||
storage.Backup("host_backup", 1000),
|
||||
]
|
||||
result = t.find_all("host_backup")
|
||||
assert len(result) == 5
|
||||
for r in result:
|
||||
assert r.hostname_backup_name != "host_backup_f"
|
||||
|
||||
def test_restore_latest_backup(self):
|
||||
t = storage.Storage()
|
||||
t.get_backups = mock.Mock()
|
||||
last = storage.Backup("host_backup", 5000)
|
||||
t.get_backups.return_value = [
|
||||
storage.Backup("host_backup", 1000),
|
||||
storage.Backup("host_backup", 2000),
|
||||
storage.Backup("host_backup", 3000),
|
||||
storage.Backup("host_backup", 4000),
|
||||
storage.Backup("host_backup_f", 1000),
|
||||
last
|
||||
]
|
||||
self.assertRaises(IndexError, t.find_one, "")
|
||||
assert t.find_one("host_backup") == last
|
||||
|
||||
def test_find_latest_backup_respects_increments_timestamp(self):
|
||||
test_backup = storage.Backup("host_backup", 5500)
|
||||
increment = storage.Backup("host_backup", 6000, 1, test_backup)
|
||||
test_backup.add_increment(increment)
|
||||
t = storage.Storage()
|
||||
t.get_backups = mock.Mock()
|
||||
t.get_backups.return_value = [
|
||||
test_backup,
|
||||
storage.Backup("host_backup", 2000),
|
||||
storage.Backup("host_backup", 3000),
|
||||
storage.Backup("host_backup", 4000),
|
||||
storage.Backup("host_backup_f", 1000),
|
||||
storage.Backup("host_backup", 5000),
|
||||
]
|
||||
assert t.find_one("host_backup") == increment
|
||||
|
||||
def test_restore_from_date(self):
|
||||
t = storage.Storage()
|
||||
t.get_backups = mock.Mock()
|
||||
backup_restore = storage.Backup("host_backup", 3000)
|
||||
t.get_backups.return_value = [
|
||||
storage.Backup("host_backup", 1000),
|
||||
storage.Backup("host_backup", 2000),
|
||||
backup_restore,
|
||||
storage.Backup("host_backup", 4000),
|
||||
storage.Backup("host_backup_f", 1000),
|
||||
storage.Backup("host_backup", 5000),
|
||||
]
|
||||
assert t.find_one("host_backup", 3234) == backup_restore
|
||||
|
||||
def test_restore_from_date_increment(self):
|
||||
t = storage.Storage()
|
||||
t.get_backups = mock.Mock()
|
||||
test_backup = storage.Backup("host_backup", 1000)
|
||||
increment = storage.Backup("host_backup", 3200, 1, test_backup)
|
||||
test_backup.add_increment(increment)
|
||||
t.get_backups.return_value = [
|
||||
test_backup,
|
||||
storage.Backup("host_backup", 4000),
|
||||
storage.Backup("host_backup_f", 1000),
|
||||
storage.Backup("host_backup", 5000),
|
||||
]
|
||||
assert t.find_one("host_backup", 3234) == increment
|
||||
|
||||
def test__get_backups_wrong_name(self):
|
||||
result = storage.Backup.parse_backups(["hostname"])
|
||||
assert len(result) == 0
|
||||
result = storage.Backup.parse_backups(["hostname_100_2"])
|
||||
assert len(result) == 0
|
||||
|
||||
def test__get_backups_good_name(self):
|
||||
result = storage.Backup.parse_backups(["host_backup_100_0"])
|
||||
assert len(result) == 1
|
||||
result = result[0]
|
||||
assert result.hostname_backup_name == "host_backup"
|
||||
assert result.timestamp == 100
|
||||
assert result.level == 0
|
||||
|
||||
def test_remove_older_than(self):
|
||||
t = storage.Storage()
|
||||
t.get_backups = mock.Mock()
|
||||
r1 = storage.Backup("host_backup", 1000)
|
||||
r2 = storage.Backup("host_backup", 2000)
|
||||
t.get_backups.return_value = [
|
||||
r1,
|
||||
r2,
|
||||
storage.Backup("host_backup", 3000),
|
||||
storage.Backup("host_backup", 4000),
|
||||
storage.Backup("host_backup_f", 1000),
|
||||
storage.Backup("host_backup", 5000),
|
||||
]
|
||||
t.remove_backup = mock.Mock()
|
||||
t.remove_older_than(3000, "host_backup")
|
||||
t.remove_backup.assert_any_call(r1)
|
||||
t.remove_backup.assert_any_call(r2)
|
||||
assert t.remove_backup.call_count == 2
|
||||
|
||||
def test_create_backup(self):
|
||||
t = storage.Storage()
|
||||
t.get_backups = mock.Mock()
|
||||
t.get_backups.return_value = []
|
||||
t._find_previous_backup = mock.Mock()
|
||||
t._find_previous_backup.return_value = \
|
||||
storage.Backup("host_backup", 3000, tar_meta=True)
|
||||
t.create_backup("", True, 12, False, False)
|
||||
|
||||
def test_restart_always_level(self):
|
||||
t = storage.Storage()
|
||||
t.get_backups = mock.Mock()
|
||||
t.get_backups.return_value = []
|
||||
backup = storage.Backup("host_backup", 3000, tar_meta=True)
|
||||
t._find_previous_backup([backup], False, None, None, 10)
|
@ -17,7 +17,7 @@ import unittest
|
||||
from freezer import osclients
|
||||
from freezer import utils
|
||||
from freezer.storage import swift
|
||||
from freezer.storage import storage
|
||||
from freezer.storage import base
|
||||
|
||||
|
||||
class TestSwiftStorage(unittest.TestCase):
|
||||
@ -30,7 +30,7 @@ class TestSwiftStorage(unittest.TestCase):
|
||||
),
|
||||
"freezer_ops-aw1ops1-gerrit0001.aw1.hpcloud.net",
|
||||
"/tmp/",
|
||||
100
|
||||
100, skip_prepare=True
|
||||
)
|
||||
|
||||
self.files = [
|
||||
@ -56,44 +56,50 @@ class TestSwiftStorage(unittest.TestCase):
|
||||
"hostname_backup_4000_1",
|
||||
]
|
||||
|
||||
self.backup = storage.Backup("hostname_backup", 1000, tar_meta=True)
|
||||
self.backup_2 = storage.Backup("hostname_backup", 3000, tar_meta=True)
|
||||
self.increment = storage.Backup("hostname_backup", 2000,
|
||||
full_backup=self.backup,
|
||||
level=1,
|
||||
tar_meta=True)
|
||||
self.increment_2 = storage.Backup("hostname_backup", 4000,
|
||||
full_backup=self.backup_2,
|
||||
level=1,
|
||||
tar_meta=True)
|
||||
self.backup = base.Backup(self.storage,
|
||||
"hostname_backup", 1000, tar_meta=True,)
|
||||
self.backup_2 = base.Backup(self.storage,
|
||||
"hostname_backup", 3000, tar_meta=True)
|
||||
self.increment = base.Backup(self.storage,
|
||||
"hostname_backup", 2000,
|
||||
full_backup=self.backup,
|
||||
level=1,
|
||||
tar_meta=True)
|
||||
self.increment_2 = base.Backup(self.storage,
|
||||
"hostname_backup", 4000,
|
||||
full_backup=self.backup_2,
|
||||
level=1,
|
||||
tar_meta=True)
|
||||
|
||||
def test__get_backups(self):
|
||||
backups = storage.Backup.parse_backups(self.files)
|
||||
backups = base.Backup.parse_backups(self.files, self.storage)
|
||||
self.assertEqual(len(backups), 1)
|
||||
backup = backups[0]
|
||||
self.assertEqual(backup, self.backup)
|
||||
|
||||
def test__get_backups_with_tar_only(self):
|
||||
backups = storage.Backup.parse_backups(
|
||||
["tar_metadata_hostname_backup_1000_0"])
|
||||
backups = base.Backup.parse_backups(
|
||||
["tar_metadata_hostname_backup_1000_0"], self.storage)
|
||||
self.assertEqual(len(backups), 0)
|
||||
|
||||
def test__get_backups_without_tar(self):
|
||||
backups = storage.Backup.parse_backups(["hostname_backup_1000_0"])
|
||||
backups = base.Backup.parse_backups(["hostname_backup_1000_0"],
|
||||
self.storage)
|
||||
self.assertEqual(len(backups), 1)
|
||||
self.backup.tar_meta = False
|
||||
backup = backups[0]
|
||||
self.assertEqual(backup, self.backup)
|
||||
|
||||
def test__get_backups_increment(self):
|
||||
backups = storage.Backup.parse_backups(self.increments)
|
||||
backups = base.Backup.parse_backups(self.increments, self.storage)
|
||||
self.assertEqual(len(backups), 1)
|
||||
self.backup.add_increment(self.increment)
|
||||
backup = backups[0]
|
||||
self.assertEqual(backup, self.backup)
|
||||
|
||||
def test__get_backups_increments(self):
|
||||
backups = storage.Backup.parse_backups(self.cycles_increments)
|
||||
backups = base.Backup.parse_backups(self.cycles_increments,
|
||||
self.storage)
|
||||
self.assertEqual(len(backups), 2)
|
||||
self.backup.add_increment(self.increment)
|
||||
self.backup_2.add_increment(self.increment_2)
|
||||
|
33
tests/test_config.py
Normal file
33
tests/test_config.py
Normal file
@ -0,0 +1,33 @@
|
||||
# (c) Copyright 2014,2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
|
||||
from freezer import config
|
||||
|
||||
class TestConfig(unittest.TestCase):
|
||||
def test_export(self):
|
||||
str = """unset OS_DOMAIN_NAME
|
||||
export OS_AUTH_URL="http://abracadabra/v3"
|
||||
export OS_PROJECT_NAME=abracadabra_project
|
||||
export OS_USERNAME=abracadabra_username
|
||||
export OS_PASSWORD=abracadabra_password
|
||||
export OS_PROJECT_DOMAIN_NAME=Default
|
||||
export OS_USER_DOMAIN_NAME=Default
|
||||
export OS_IDENTITY_API_VERSION=3
|
||||
export OS_AUTH_VERSION=3
|
||||
export OS_CACERT=/etc/ssl/certs/ca-certificates.crt
|
||||
export OS_ENDPOINT_TYPE=internalURL"""
|
||||
res = config.osrc_parse(str)
|
||||
self.assertEqual("http://abracadabra/v3", res["OS_AUTH_URL"])
|
@ -16,35 +16,33 @@
|
||||
from freezer import exec_cmd
|
||||
from mock import patch, Mock
|
||||
import subprocess
|
||||
import unittest
|
||||
|
||||
from __builtin__ import True
|
||||
class TestExec(unittest.TestCase):
|
||||
def test_exec_cmd(self):
|
||||
cmd="echo test > test.txt"
|
||||
popen=patch('freezer.exec_cmd.subprocess.Popen')
|
||||
mock_popen=popen.start()
|
||||
mock_popen.return_value = Mock()
|
||||
mock_popen.return_value.communicate = Mock()
|
||||
mock_popen.return_value.communicate.return_value = ['some stderr']
|
||||
mock_popen.return_value.returncode = 0
|
||||
exec_cmd.execute(cmd)
|
||||
assert (mock_popen.call_count == 1)
|
||||
mock_popen.assert_called_with(['echo', 'test', '>', 'test.txt'],
|
||||
shell=False,
|
||||
stderr=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE)
|
||||
popen.stop()
|
||||
|
||||
|
||||
def test_exec_cmd(monkeypatch):
|
||||
cmd="echo test > test.txt"
|
||||
popen=patch('freezer.exec_cmd.subprocess.Popen')
|
||||
mock_popen=popen.start()
|
||||
mock_popen.return_value = Mock()
|
||||
mock_popen.return_value.communicate = Mock()
|
||||
mock_popen.return_value.communicate.return_value = ['some stderr']
|
||||
mock_popen.return_value.returncode = 0
|
||||
exec_cmd.execute(cmd)
|
||||
assert (mock_popen.call_count == 1)
|
||||
mock_popen.assert_called_with(['echo', 'test', '>', 'test.txt'],
|
||||
shell=False,
|
||||
stderr=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE)
|
||||
popen.stop()
|
||||
|
||||
|
||||
def test__exec_cmd_with_pipe(monkeypatch):
|
||||
cmd="echo test|wc -l"
|
||||
popen=patch('freezer.exec_cmd.subprocess.Popen')
|
||||
mock_popen=popen.start()
|
||||
mock_popen.return_value = Mock()
|
||||
mock_popen.return_value.communicate = Mock()
|
||||
mock_popen.return_value.communicate.return_value = ['some stderr']
|
||||
mock_popen.return_value.returncode = 0
|
||||
exec_cmd.execute(cmd)
|
||||
assert (mock_popen.call_count == 2)
|
||||
popen.stop()
|
||||
def test__exec_cmd_with_pipe(self):
|
||||
cmd="echo test|wc -l"
|
||||
popen=patch('freezer.exec_cmd.subprocess.Popen')
|
||||
mock_popen=popen.start()
|
||||
mock_popen.return_value = Mock()
|
||||
mock_popen.return_value.communicate = Mock()
|
||||
mock_popen.return_value.communicate.return_value = ['some stderr']
|
||||
mock_popen.return_value.returncode = 0
|
||||
exec_cmd.execute(cmd)
|
||||
assert (mock_popen.call_count == 2)
|
||||
popen.stop()
|
||||
|
@ -16,8 +16,8 @@
|
||||
from freezer import utils
|
||||
import datetime
|
||||
from commons import *
|
||||
|
||||
import unittest
|
||||
import mock
|
||||
|
||||
class TestUtils(unittest.TestCase):
|
||||
|
||||
@ -26,24 +26,20 @@ class TestUtils(unittest.TestCase):
|
||||
dir1 = '/tmp'
|
||||
dir2 = '/tmp/testnoexistent1234'
|
||||
dir3 = '~'
|
||||
fakeos = Os()
|
||||
|
||||
assert utils.create_dir(dir1) is None
|
||||
assert utils.create_dir(dir2) is None
|
||||
os.rmdir(dir2)
|
||||
assert utils.create_dir(dir3) is None
|
||||
os.makedirs = fakeos.makedirs2
|
||||
self.assertRaises(Exception, utils.create_dir, dir2)
|
||||
|
||||
# @mock.patch("os.path")
|
||||
# @mock.patch("re.search")
|
||||
# def test_get_vol_fs_type(self, exists_mock, re_mock):
|
||||
# self.assertRaises(Exception, utils.get_vol_fs_type, "test")
|
||||
# exists_mock.exists.return_value = True
|
||||
# def test_get_vol_fs_type(self):
|
||||
# self.assertRaises(Exception, utils.get_vol_fs_type, "test")
|
||||
#
|
||||
# re_mock.return_value = FakeRe()
|
||||
# fakeos = Os()
|
||||
# os.path.exists = fakeos.exists
|
||||
# self.assertRaises(Exception, utils.get_vol_fs_type, "test")
|
||||
#
|
||||
# fakere = FakeRe()
|
||||
# re.search = fakere.search
|
||||
# assert type(utils.get_vol_fs_type("test")) is str
|
||||
|
||||
def test_get_mount_from_path(self):
|
||||
@ -53,6 +49,8 @@ class TestUtils(unittest.TestCase):
|
||||
assert type(utils.get_mount_from_path(dir1)[1]) is str
|
||||
self.assertRaises(Exception, utils.get_mount_from_path, dir2)
|
||||
|
||||
# pytest.raises(Exception, utils.get_mount_from_path, dir2)
|
||||
|
||||
def test_human2bytes(self):
|
||||
assert utils.human2bytes('0 B') == 0
|
||||
assert utils.human2bytes('1 K') == 1024
|
||||
@ -93,8 +91,8 @@ class TestUtils(unittest.TestCase):
|
||||
|
||||
def test_date_to_timestamp(self):
|
||||
# ensure that timestamp is check with appropriate timezone offset
|
||||
assert (1417649003 + time.timezone) == utils.date_to_timestamp(
|
||||
"2014-12-03T23:23:23")
|
||||
assert (1417649003+time.timezone) == \
|
||||
utils.date_to_timestamp("2014-12-03T23:23:23")
|
||||
|
||||
def prepare_env(self):
|
||||
os.environ["HTTP_PROXY"] = 'http://proxy.original.domain:8080'
|
||||
@ -116,8 +114,8 @@ class TestUtils(unittest.TestCase):
|
||||
assert os.environ["HTTPS_PROXY"] == test_proxy
|
||||
|
||||
|
||||
class TestDateTime(unittest.TestCase):
|
||||
def setUp(self):
|
||||
class TestDateTime:
|
||||
def setup(self):
|
||||
d = datetime.datetime(2015, 3, 7, 17, 47, 44, 716799)
|
||||
self.datetime = utils.DateTime(d)
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user