Plugable storages architecture and Local storage implementation
Implements: blueprint local-storage Change-Id: Id968933f636f01ca20e82cba40291c9202e281b5
This commit is contained in:
parent
59431a891f
commit
253ad8128a
45
README.rst
45
README.rst
@ -240,6 +240,12 @@ Execute a mysql backup with nova::
|
||||
|
||||
All the freezerc activities are logged into /var/log/freezer.log.
|
||||
|
||||
Local storage backup execution:
|
||||
|
||||
$ sudo freezerc --file-to-backup /data/dir/to/backup
|
||||
--container /my_backup_path/ --backup-name my-backup-name
|
||||
--storage local
|
||||
|
||||
Restore
|
||||
-------
|
||||
|
||||
@ -314,6 +320,15 @@ Execute a nova restore::
|
||||
|
||||
$ freezerc --action restore --nova-inst-id 3ad7a62f-217a-48cd-a861-43ec0a04a78b
|
||||
|
||||
Local storage restore execution:
|
||||
|
||||
$ sudo freezerc --action restore --container /local_backup_storage/
|
||||
--backup-name adminui.git
|
||||
--restore-from-host git-HP-DL380-host-001 --restore-abs-path
|
||||
/home/git/repositories/adminui.git/
|
||||
--restore-from-date "2014-05-23T23:23:23"
|
||||
--storage local
|
||||
|
||||
Architecture
|
||||
============
|
||||
|
||||
@ -323,7 +338,7 @@ Freezer architecture is simple. The components are:
|
||||
- freezer client running on the node you want to execute the backups or
|
||||
restore
|
||||
|
||||
Frezeer use GNU Tar under the hood to execute incremental backup and
|
||||
Freezer use GNU Tar under the hood to execute incremental backup and
|
||||
restore. When a key is provided, it uses OpenSSL to encrypt data
|
||||
(AES-256-CFB)
|
||||
|
||||
@ -374,26 +389,6 @@ following basic logic happens when Freezer execute:
|
||||
important as the Manifest file contains the information of the
|
||||
previous Freezer execution.
|
||||
|
||||
The following is what the Swift Manifest looks like::
|
||||
|
||||
{
|
||||
'X-Object-Meta-Encrypt-Data': 'Yes',
|
||||
'X-Object-Meta-Segments-Size-Bytes': '134217728',
|
||||
'X-Object-Meta-Backup-Created-Timestamp': '1395734461',
|
||||
'X-Object-Meta-Remove-Backup-Older-Than-Days': '',
|
||||
'X-Object-Meta-Src-File-To-Backup': '/var/lib/snapshot-backup/mongod_dev-mongo-s1',
|
||||
'X-Object-Meta-Maximum-Backup-level': '0',
|
||||
'X-Object-Meta-Always-Backup-Level': '',
|
||||
'X-Object-Manifest': u'socorro-backup-dev_segments/dev-mongo-s1-r1_mongod_dev-mongo-s1_1395734461_0',
|
||||
'X-Object-Meta-Providers-List': 'HP',
|
||||
'X-Object-Meta-Backup-Current-Level': '0',
|
||||
'X-Object-Meta-Abs-File-Path': '',
|
||||
'X-Object-Meta-Backup-Name': 'mongod_dev-mongo-s1',
|
||||
'X-Object-Meta-Tar-Meta-Obj-Name': 'tar_metadata_dev-mongo-s1-r1_mongod_dev-mongo-s1_1395734461_0',
|
||||
'X-Object-Meta-Hostname': 'dev-mongo-s1-r1',
|
||||
'X-Object-Meta-Container-Segments': 'socorro-backup-dev_segments'
|
||||
}
|
||||
|
||||
3) The most relevant data taken in consideration for incremental are:
|
||||
|
||||
- 'X-Object-Meta-Maximum-Backup-level': '7'
|
||||
@ -426,20 +421,12 @@ Through this meta data, we can identify the exact Manifest name of the
|
||||
provided backup name. The syntax is:
|
||||
container\_name/hostname\_backup\_name\_timestamp\_initiallevel
|
||||
|
||||
- 'X-Object-Meta-Providers-List': 'HP'
|
||||
|
||||
This option is NOT implemented yet The idea of Freezer is to support
|
||||
every Cloud provider that provide Object Storage service using OpenStack
|
||||
Swift. The meta data allows you to specify multiple provider and
|
||||
therefore store your data in different Geographic location.
|
||||
|
||||
- 'X-Object-Meta-Backup-Current-Level': '0'
|
||||
|
||||
Record the current backup level. This is important as the value is
|
||||
incremented by 1 in the next freezer execution.
|
||||
|
||||
- 'X-Object-Meta-Backup-Name': 'mongod\_dev-mongo-s1'
|
||||
|
||||
Value set by the option: -N BACKUP\_NAME, --backup-name BACKUP\_NAME The
|
||||
option is used to identify the backup. It is a mandatory option and
|
||||
fundamental to execute incremental backup. 'Meta-Backup-Name' and
|
||||
|
@ -64,7 +64,8 @@ DEFAULT_PARAMS = {
|
||||
'restore_abs_path': False, 'log_file': None,
|
||||
'upload': True, 'mode': 'fs', 'action': 'backup',
|
||||
'vssadmin': True, 'shadow': '', 'shadow_path': '',
|
||||
'windows_volume': '', 'command': None, 'metadata_out': False
|
||||
'windows_volume': '', 'command': None, 'metadata_out': False,
|
||||
'storage': 'swift'
|
||||
}
|
||||
|
||||
|
||||
@ -153,7 +154,8 @@ def backup_arguments(args_dict={}):
|
||||
default='fs')
|
||||
arg_parser.add_argument(
|
||||
'-C', '--container', action='store',
|
||||
help="The Swift container used to upload files to",
|
||||
help="The Swift container (or path to local storage) "
|
||||
"used to upload files to",
|
||||
dest='container', default='freezer_backups')
|
||||
arg_parser.add_argument(
|
||||
'-L', '--list-containers', action='store_true',
|
||||
@ -330,7 +332,7 @@ def backup_arguments(args_dict={}):
|
||||
help='''Set the absolute path where you want your data restored.
|
||||
Please provide datetime in format "YYYY-MM-DDThh:mm:ss"
|
||||
i.e. "1979-10-03T23:23:23". Make sure the "T" is between date and time
|
||||
Default False.''', dest='restore_from_date', default=False)
|
||||
Default None.''', dest='restore_from_date', default=None)
|
||||
arg_parser.add_argument(
|
||||
'--max-priority', action='store_true',
|
||||
help='''Set the cpu process to the highest priority (i.e. -20 on Linux)
|
||||
@ -409,6 +411,14 @@ def backup_arguments(args_dict={}):
|
||||
help='Command executed by exec action',
|
||||
dest='command', default=None)
|
||||
|
||||
arg_parser.add_argument(
|
||||
'--storage', action='store',
|
||||
choices=['local', 'swift'],
|
||||
help="Storage for backups. Can be Swift or Local now. Swift is default"
|
||||
"storage now. Local stores backups on the same defined path and"
|
||||
"swift will store files in container.",
|
||||
dest='storage', default='swift')
|
||||
|
||||
arg_parser.set_defaults(**defaults)
|
||||
backup_args = arg_parser.parse_args()
|
||||
|
||||
@ -431,7 +441,8 @@ def backup_arguments(args_dict={}):
|
||||
# have the prefix, it is automatically added also to the container
|
||||
# segments name. This is done to quickly identify the containers
|
||||
# that contain freezer generated backups
|
||||
if not backup_args.container.startswith('freezer_'):
|
||||
if not backup_args.container.startswith('freezer_') and \
|
||||
backup_args.storage != 'local':
|
||||
backup_args.container = 'freezer_{0}'.format(
|
||||
backup_args.container)
|
||||
|
||||
@ -452,6 +463,9 @@ def backup_arguments(args_dict={}):
|
||||
if distspawn.find_executable('gtar'):
|
||||
backup_args.__dict__['tar_path'] = \
|
||||
distspawn.find_executable('gtar')
|
||||
elif distspawn.find_executable('gnutar'):
|
||||
backup_args.__dict__['tar_path'] = \
|
||||
distspawn.find_executable('gnutar')
|
||||
else:
|
||||
raise Exception('Please install gnu tar (gtar) as it is a '
|
||||
'mandatory requirement to use freezer.')
|
||||
|
@ -20,27 +20,25 @@ Hudson (tjh@cryptsoft.com).
|
||||
|
||||
Freezer Backup modes related functions
|
||||
"""
|
||||
import multiprocessing
|
||||
import logging
|
||||
import os
|
||||
from os.path import expanduser
|
||||
import time
|
||||
|
||||
from freezer import utils
|
||||
from freezer.lvm import lvm_snap, lvm_snap_remove, get_lvm_info
|
||||
from freezer.tar import tar_backup, TarCommandBuilder
|
||||
from freezer.utils import gen_manifest_meta, create_dir
|
||||
from freezer.tar import TarCommandBuilder
|
||||
from freezer.vss import vss_create_shadow_copy
|
||||
from freezer.vss import vss_delete_shadow_copy
|
||||
from freezer.winutils import start_sql_server
|
||||
from freezer.winutils import stop_sql_server
|
||||
from freezer.winutils import use_shadow
|
||||
from freezer.winutils import is_windows
|
||||
from freezer import swift
|
||||
|
||||
home = expanduser("~")
|
||||
|
||||
|
||||
def backup_mode_sql_server(backup_opt_dict, time_stamp, manifest_meta_dict):
|
||||
def backup_mode_sql_server(backup_opt_dict):
|
||||
"""
|
||||
Execute a SQL Server DB backup. Currently only backups with shadow
|
||||
copy are supported. This mean, as soon as the shadow copy is created
|
||||
@ -59,7 +57,7 @@ def backup_mode_sql_server(backup_opt_dict, time_stamp, manifest_meta_dict):
|
||||
|
||||
try:
|
||||
stop_sql_server(backup_opt_dict)
|
||||
backup_mode_fs(backup_opt_dict, time_stamp, manifest_meta_dict)
|
||||
backup(backup_opt_dict, backup_opt_dict.storage)
|
||||
finally:
|
||||
if not backup_opt_dict.vssadmin:
|
||||
# if vssadmin is false, wait until the backup is complete
|
||||
@ -67,7 +65,7 @@ def backup_mode_sql_server(backup_opt_dict, time_stamp, manifest_meta_dict):
|
||||
start_sql_server(backup_opt_dict)
|
||||
|
||||
|
||||
def backup_mode_mysql(backup_opt_dict, time_stamp, manifest_meta_dict):
|
||||
def backup_mode_mysql(backup_opt_dict):
|
||||
"""
|
||||
Execute a MySQL DB backup. currently only backup with lvm snapshots
|
||||
are supported. This mean, just before the lvm snap vol is created,
|
||||
@ -114,10 +112,10 @@ def backup_mode_mysql(backup_opt_dict, time_stamp, manifest_meta_dict):
|
||||
raise Exception('[*] MySQL: {0}'.format(error))
|
||||
|
||||
# Execute backup
|
||||
backup(backup_opt_dict, time_stamp, manifest_meta_dict)
|
||||
backup(backup_opt_dict, backup_opt_dict.storage)
|
||||
|
||||
|
||||
def backup_mode_mongo(backup_opt_dict, time_stamp, manifest_meta_dict):
|
||||
def backup_mode_mongo(backup_opt_dict):
|
||||
"""
|
||||
Execute the necessary tasks for file system backup mode
|
||||
"""
|
||||
@ -138,7 +136,7 @@ def backup_mode_mongo(backup_opt_dict, time_stamp, manifest_meta_dict):
|
||||
mongo_primary = master_dict['primary']
|
||||
|
||||
if mongo_me == mongo_primary:
|
||||
backup(backup_opt_dict, time_stamp, manifest_meta_dict)
|
||||
backup(backup_opt_dict, backup_opt_dict.storage)
|
||||
else:
|
||||
logging.warning('[*] localhost {0} is not Master/Primary,\
|
||||
exiting...'.format(local_hostname))
|
||||
@ -147,15 +145,23 @@ def backup_mode_mongo(backup_opt_dict, time_stamp, manifest_meta_dict):
|
||||
|
||||
class BackupOs:
|
||||
|
||||
def __init__(self, client_manager, container):
|
||||
def __init__(self, client_manager, container, storage):
|
||||
"""
|
||||
|
||||
:param client_manager:
|
||||
:param container:
|
||||
:param storage:
|
||||
:type storage: freezer.swift.SwiftStorage
|
||||
:return:
|
||||
"""
|
||||
self.client_manager = client_manager
|
||||
self.container = container
|
||||
self.storage = storage
|
||||
|
||||
def backup_nova(self, instance_id, time_stamp):
|
||||
def backup_nova(self, instance_id):
|
||||
"""
|
||||
Implement nova backup
|
||||
:param instance_id: Id of the instance for backup
|
||||
:param time_stamp: timestamp of backup
|
||||
:return:
|
||||
"""
|
||||
instance_id = instance_id
|
||||
@ -175,23 +181,21 @@ class BackupOs:
|
||||
image = glance.images.get(image)
|
||||
|
||||
stream = client_manager.download_image(image)
|
||||
package = "{0}/{1}".format(instance_id, time_stamp)
|
||||
package = "{0}/{1}".format(instance_id, utils.DateTime.now().timestamp)
|
||||
logging.info("[*] Uploading image to swift")
|
||||
headers = {"x-object-meta-name": instance._info['name'],
|
||||
"x-object-meta-tenant_id": instance._info['tenant_id']}
|
||||
swift.add_stream(client_manager,
|
||||
self.container, stream, package, headers)
|
||||
self.storage.add_stream(stream, package, headers)
|
||||
logging.info("[*] Deleting temporary image")
|
||||
glance.images.delete(image)
|
||||
|
||||
def backup_cinder_by_glance(self, volume_id, time_stamp):
|
||||
def backup_cinder_by_glance(self, volume_id):
|
||||
"""
|
||||
Implements cinder backup:
|
||||
1) Gets a stream of the image from glance
|
||||
2) Stores resulted image to the swift as multipart object
|
||||
|
||||
:param volume_id: id of volume for backup
|
||||
:param time_stamp: timestamp of snapshot
|
||||
"""
|
||||
client_manager = self.client_manager
|
||||
cinder = client_manager.get_cinder()
|
||||
@ -205,11 +209,10 @@ class BackupOs:
|
||||
logging.info("[*] Creation temporary glance image")
|
||||
image = client_manager.make_glance_image("name", copied_volume)
|
||||
stream = client_manager.download_image(image)
|
||||
package = "{0}/{1}".format(volume_id, time_stamp)
|
||||
package = "{0}/{1}".format(volume_id, utils.DateTime.now().timestamp)
|
||||
logging.info("[*] Uploading image to swift")
|
||||
headers = {}
|
||||
swift.add_stream(self.client_manager,
|
||||
self.container, stream, package, headers=headers)
|
||||
self.storage.add_stream(stream, package, headers=headers)
|
||||
logging.info("[*] Deleting temporary snapshot")
|
||||
client_manager.clean_snapshot(snapshot)
|
||||
logging.info("[*] Deleting temporary volume")
|
||||
@ -223,194 +226,101 @@ class BackupOs:
|
||||
cinder.backups.create(volume_id, self.container, name, description)
|
||||
|
||||
|
||||
def backup(backup_opt_dict, time_stamp, manifest_meta_dict):
|
||||
def snapshot_create(backup_opt_dict):
|
||||
if is_windows():
|
||||
if backup_opt_dict.vssadmin:
|
||||
# Create a shadow copy.
|
||||
backup_opt_dict.shadow_path, backup_opt_dict.shadow = \
|
||||
vss_create_shadow_copy(backup_opt_dict.windows_volume)
|
||||
|
||||
# execute this after the snapshot creation
|
||||
if backup_opt_dict.mode == 'sqlserver':
|
||||
start_sql_server(backup_opt_dict)
|
||||
|
||||
else:
|
||||
# If lvm_auto_snap is true, the volume group and volume name will
|
||||
# be extracted automatically
|
||||
if backup_opt_dict.lvm_auto_snap:
|
||||
backup_opt_dict = get_lvm_info(backup_opt_dict)
|
||||
|
||||
# Generate the lvm_snap if lvm arguments are available
|
||||
lvm_snap(backup_opt_dict)
|
||||
|
||||
if is_windows() and backup_opt_dict.vssadmin:
|
||||
backup_opt_dict.path_to_backup = use_shadow(
|
||||
backup_opt_dict.path_to_backup,
|
||||
backup_opt_dict.windows_volume)
|
||||
return backup_opt_dict
|
||||
|
||||
|
||||
def snapshot_remove(backup_opt_dict, shadow, windows_volume):
|
||||
if is_windows():
|
||||
# Delete the shadow copy after the backup
|
||||
vss_delete_shadow_copy(shadow, windows_volume)
|
||||
else:
|
||||
# Unmount and remove lvm snapshot volume
|
||||
lvm_snap_remove(backup_opt_dict)
|
||||
|
||||
|
||||
def backup(backup_opt_dict, storage):
|
||||
"""
|
||||
|
||||
:param backup_opt_dict:
|
||||
:param storage:
|
||||
:type storage: freezer.storage.Storage
|
||||
:return:
|
||||
"""
|
||||
backup_media = backup_opt_dict.backup_media
|
||||
backup_os = BackupOs(backup_opt_dict.client_manager,
|
||||
backup_opt_dict.container)
|
||||
|
||||
if backup_media == 'fs':
|
||||
backup_mode_fs(backup_opt_dict, time_stamp, manifest_meta_dict)
|
||||
elif backup_media == 'nova':
|
||||
|
||||
try:
|
||||
backup_opt_dict = snapshot_create(backup_opt_dict)
|
||||
filepath = '.'
|
||||
chdir_path = os.path.expanduser(
|
||||
os.path.normpath(backup_opt_dict.path_to_backup.strip()))
|
||||
if not os.path.isdir(chdir_path):
|
||||
filepath = os.path.basename(chdir_path)
|
||||
chdir_path = os.path.dirname(chdir_path)
|
||||
os.chdir(chdir_path)
|
||||
builder = TarCommandBuilder(backup_opt_dict.tar_path,
|
||||
filepath=filepath)
|
||||
builder.set_dereference(backup_opt_dict.dereference_symlink)
|
||||
|
||||
if backup_opt_dict.exclude:
|
||||
builder.set_exclude(backup_opt_dict.exclude)
|
||||
if backup_opt_dict.encrypt_pass_file:
|
||||
builder.set_encryption(
|
||||
backup_opt_dict.openssl_path,
|
||||
backup_opt_dict.encrypt_pass_file)
|
||||
hostname_backup_name = backup_opt_dict.hostname_backup_name
|
||||
if not storage.is_ready():
|
||||
storage.prepare()
|
||||
incremental_backup = storage.find_previous_backup(
|
||||
hostname_backup_name,
|
||||
backup_opt_dict.no_incremental,
|
||||
backup_opt_dict.max_level,
|
||||
backup_opt_dict.always_level,
|
||||
backup_opt_dict.restart_always_level)
|
||||
storage.backup(backup_opt_dict.path_to_backup,
|
||||
hostname_backup_name, builder, incremental_backup)
|
||||
finally:
|
||||
snapshot_remove(backup_opt_dict, backup_opt_dict.shadow,
|
||||
backup_opt_dict.windows_volume)
|
||||
return
|
||||
|
||||
backup_os = BackupOs(backup_opt_dict.client_manager,
|
||||
backup_opt_dict.container,
|
||||
storage)
|
||||
|
||||
if backup_media == 'nova':
|
||||
logging.info('[*] Executing nova backup')
|
||||
backup_os.backup_nova(backup_opt_dict.nova_inst_id, time_stamp)
|
||||
backup_os.backup_nova(backup_opt_dict.nova_inst_id)
|
||||
elif backup_media == 'cindernative':
|
||||
logging.info('[*] Executing cinder backup')
|
||||
backup_os.backup_cinder(backup_opt_dict.cindernative_vol_id)
|
||||
elif backup_media == 'cinder':
|
||||
logging.info('[*] Executing cinder snapshot')
|
||||
backup_os.backup_cinder_by_glance(backup_opt_dict.cindernative_vol_id,
|
||||
time_stamp)
|
||||
backup_os.backup_cinder_by_glance(backup_opt_dict.cindernative_vol_id)
|
||||
else:
|
||||
raise Exception('unknown parameter backup_media %s' % backup_media)
|
||||
|
||||
|
||||
def backup_mode_fs(backup_opt_dict, time_stamp, manifest_meta_dict):
|
||||
"""
|
||||
Execute the necessary tasks for file system backup mode
|
||||
"""
|
||||
|
||||
logging.info('[*] File System backup is being executed...')
|
||||
try:
|
||||
|
||||
if is_windows():
|
||||
if backup_opt_dict.vssadmin:
|
||||
# Create a shadow copy.
|
||||
backup_opt_dict.shadow_path, backup_opt_dict.shadow = \
|
||||
vss_create_shadow_copy(backup_opt_dict.windows_volume)
|
||||
|
||||
# execute this after the snapshot creation
|
||||
if backup_opt_dict.mode == 'sqlserver':
|
||||
start_sql_server(backup_opt_dict)
|
||||
|
||||
else:
|
||||
# If lvm_auto_snap is true, the volume group and volume name will
|
||||
# be extracted automatically
|
||||
if backup_opt_dict.lvm_auto_snap:
|
||||
backup_opt_dict = get_lvm_info(backup_opt_dict)
|
||||
|
||||
# Generate the lvm_snap if lvm arguments are available
|
||||
lvm_snap(backup_opt_dict)
|
||||
|
||||
file_name_f = u'{0}_{1}_{2}_{3}'.format(
|
||||
backup_opt_dict.hostname,
|
||||
backup_opt_dict.backup_name,
|
||||
time_stamp,
|
||||
backup_opt_dict.curr_backup_level)
|
||||
meta_data_backup_file = u'tar_metadata_{0}'.format(file_name_f)
|
||||
|
||||
# Initialize a Queue for a maximum of 2 items
|
||||
tar_backup_queue = multiprocessing.Queue(maxsize=2)
|
||||
|
||||
if is_windows():
|
||||
backup_opt_dict.absolute_path = backup_opt_dict.path_to_backup
|
||||
if backup_opt_dict.vssadmin:
|
||||
backup_opt_dict.path_to_backup = use_shadow(
|
||||
backup_opt_dict.path_to_backup,
|
||||
backup_opt_dict.windows_volume)
|
||||
|
||||
filepath = '.'
|
||||
chdir_path = os.path.expanduser(
|
||||
os.path.normpath(backup_opt_dict.path_to_backup.strip()))
|
||||
if not os.path.isdir(chdir_path):
|
||||
filepath = os.path.basename(chdir_path)
|
||||
chdir_path = os.path.dirname(chdir_path)
|
||||
os.chdir(chdir_path)
|
||||
|
||||
# Change che current working directory to op_dict.path_to_backup
|
||||
|
||||
logging.info('[*] Changing current working directory to: {0} \
|
||||
'.format(chdir_path))
|
||||
logging.info('[*] Backup started for: {0}'.format(
|
||||
backup_opt_dict.path_to_backup))
|
||||
|
||||
builder = TarCommandBuilder(backup_opt_dict.tar_path)
|
||||
builder.set_dereference(backup_opt_dict.dereference_symlink)
|
||||
builder.set_filepath(filepath)
|
||||
curr_backup_level = manifest_meta_dict.get(
|
||||
'x-object-meta-backup-current-level', '0')
|
||||
tar_meta = manifest_meta_dict.get('x-object-meta-tar-meta-obj-name')
|
||||
|
||||
if not backup_opt_dict.no_incremental:
|
||||
builder.set_level(curr_backup_level)
|
||||
builder.set_work_dir(backup_opt_dict.work_dir)
|
||||
if tar_meta:
|
||||
builder.set_listed_incremental(tar_meta)
|
||||
else:
|
||||
builder.set_listed_incremental(meta_data_backup_file)
|
||||
if backup_opt_dict.exclude:
|
||||
builder.set_exclude(backup_opt_dict.exclude)
|
||||
|
||||
# Incremental backup section
|
||||
if not backup_opt_dict.no_incremental:
|
||||
|
||||
if not os.path.exists(backup_opt_dict.path_to_backup):
|
||||
raise Exception('Error: path-to-backup does not exist')
|
||||
# Write the tar meta data file in ~/.freezer. It will be
|
||||
# removed later on. If ~/.freezer does not exists it will
|
||||
# be created'.
|
||||
create_dir(backup_opt_dict.work_dir)
|
||||
|
||||
if tar_meta:
|
||||
sw_connector = backup_opt_dict.client_manager.get_swift()
|
||||
tar_meta_abs = "{0}/{1}".format(backup_opt_dict.work_dir,
|
||||
tar_meta)
|
||||
|
||||
file_name = tar_meta_abs.split('/')[-1]
|
||||
logging.info('[*] Downloading object {0} on {1}'.format(
|
||||
file_name, tar_meta_abs))
|
||||
|
||||
if os.path.exists(tar_meta_abs):
|
||||
os.remove(tar_meta_abs)
|
||||
|
||||
with open(tar_meta_abs, 'ab') as obj_fd:
|
||||
for obj_chunk in sw_connector.get_object(
|
||||
backup_opt_dict.container, file_name,
|
||||
resp_chunk_size=16000000)[1]:
|
||||
obj_fd.write(obj_chunk)
|
||||
|
||||
# Encrypt data if passfile is provided
|
||||
if backup_opt_dict.encrypt_pass_file:
|
||||
builder.set_encryption(
|
||||
backup_opt_dict.openssl_path,
|
||||
backup_opt_dict.encrypt_pass_file)
|
||||
|
||||
tar_backup_stream = multiprocessing.Process(
|
||||
target=tar_backup, args=(
|
||||
backup_opt_dict, builder.build(), tar_backup_queue,))
|
||||
|
||||
tar_backup_stream.daemon = True
|
||||
tar_backup_stream.start()
|
||||
|
||||
add_object = backup_opt_dict.storage.add_object
|
||||
|
||||
add_object_stream = multiprocessing.Process(
|
||||
target=add_object, args=(
|
||||
backup_opt_dict.max_segment_size, tar_backup_queue,
|
||||
file_name_f, time_stamp))
|
||||
add_object_stream.daemon = True
|
||||
add_object_stream.start()
|
||||
|
||||
tar_backup_stream.join()
|
||||
tar_backup_queue.put(({False: False}))
|
||||
tar_backup_queue.close()
|
||||
add_object_stream.join()
|
||||
|
||||
if add_object_stream.exitcode:
|
||||
raise Exception('failed to upload object to swift server')
|
||||
|
||||
(backup_opt_dict, manifest_meta_dict, tar_meta_to_upload,
|
||||
tar_meta_prev) = gen_manifest_meta(
|
||||
backup_opt_dict, manifest_meta_dict, meta_data_backup_file)
|
||||
|
||||
meta_data_abs_path = os.path.join(backup_opt_dict.work_dir,
|
||||
tar_meta_prev)
|
||||
|
||||
client_manager = backup_opt_dict.client_manager
|
||||
# Upload swift manifest for segments
|
||||
if backup_opt_dict.upload:
|
||||
# Request a new auth client in case the current token
|
||||
# is expired before uploading tar meta data or the swift manifest
|
||||
client_manager.create_swift()
|
||||
|
||||
if not backup_opt_dict.no_incremental:
|
||||
# Upload tar incremental meta data file and remove it
|
||||
logging.info('[*] Uploading tar meta data file: {0}'.format(
|
||||
tar_meta_to_upload))
|
||||
with open(meta_data_abs_path, 'r') as meta_fd:
|
||||
client_manager.get_swift().put_object(
|
||||
backup_opt_dict.container, tar_meta_to_upload, meta_fd)
|
||||
# Removing tar meta data file, so we have only one
|
||||
# authoritative version on swift
|
||||
logging.info('[*] Removing tar meta data file: {0}'.format(
|
||||
meta_data_abs_path))
|
||||
os.remove(meta_data_abs_path)
|
||||
backup_opt_dict.storage.upload_manifest(file_name_f,
|
||||
manifest_meta_dict)
|
||||
|
||||
finally:
|
||||
if is_windows():
|
||||
# Delete the shadow copy after the backup
|
||||
vss_delete_shadow_copy(backup_opt_dict.shadow,
|
||||
backup_opt_dict.windows_volume)
|
||||
else:
|
||||
# Unmount and remove lvm snapshot volume
|
||||
lvm_snap_remove(backup_opt_dict)
|
||||
|
114
freezer/job.py
114
freezer/job.py
@ -21,18 +21,25 @@ Hudson (tjh@cryptsoft.com).
|
||||
|
||||
import sys
|
||||
|
||||
from freezer import swift
|
||||
from freezer import utils
|
||||
from freezer import backup
|
||||
from freezer import restore
|
||||
from freezer import exec_cmd
|
||||
from freezer import restore
|
||||
from freezer import tar
|
||||
from freezer import winutils
|
||||
import os
|
||||
|
||||
import logging
|
||||
from freezer.restore import RestoreOs
|
||||
|
||||
|
||||
class Job:
|
||||
"""
|
||||
:type storage: freezer.storage.Storage
|
||||
"""
|
||||
|
||||
def __init__(self, conf_dict):
|
||||
self.conf = conf_dict
|
||||
self.storage = conf_dict.storage
|
||||
|
||||
def execute(self):
|
||||
logging.info('[*] Action not implemented')
|
||||
@ -44,16 +51,9 @@ class Job:
|
||||
def executemethod(func):
|
||||
def wrapper(self):
|
||||
self.start_time = utils.DateTime.now()
|
||||
self.conf.time_stamp = self.start_time.timestamp
|
||||
logging.info('[*] Job execution Started at: {0}'.
|
||||
format(self.start_time))
|
||||
|
||||
try:
|
||||
sw_connector = self.conf.client_manager.get_swift()
|
||||
self.conf.containers_list = sw_connector.get_account()[1]
|
||||
except Exception as error:
|
||||
raise Exception('Get containers list error: {0}'.format(error))
|
||||
|
||||
retval = func(self)
|
||||
|
||||
end_time = utils.DateTime.now()
|
||||
@ -68,20 +68,7 @@ class Job:
|
||||
class InfoJob(Job):
|
||||
@Job.executemethod
|
||||
def execute(self):
|
||||
if self.conf.list_containers:
|
||||
swift.show_containers(self.conf.containers_list)
|
||||
elif self.conf.list_objects:
|
||||
if not self.conf.storage.ready():
|
||||
logging.critical(
|
||||
'[*] Container {0} not available'.format(
|
||||
self.conf.container))
|
||||
return False
|
||||
swift.show_objects(self.conf)
|
||||
else:
|
||||
logging.warning(
|
||||
'[*] No retrieving info options were set. Exiting.')
|
||||
return False
|
||||
return True
|
||||
self.storage.info()
|
||||
|
||||
|
||||
class BackupJob(Job):
|
||||
@ -94,35 +81,15 @@ class BackupJob(Job):
|
||||
except Exception as error:
|
||||
logging.error('Error while sync exec: {0}'.format(error))
|
||||
self.conf.storage.prepare()
|
||||
if self.conf.no_incremental:
|
||||
if self.conf.max_level or \
|
||||
self.conf.always_level:
|
||||
raise Exception(
|
||||
'no-incremental option is not compatible '
|
||||
'with backup level options')
|
||||
manifest_meta_dict = {}
|
||||
else:
|
||||
# Check if a backup exist in swift with same name.
|
||||
# If not, set backup level to 0
|
||||
manifest_meta_dict =\
|
||||
swift.check_backup_and_tar_meta_existence(self.conf)
|
||||
|
||||
(self.conf, manifest_meta_dict) = swift.set_backup_level(
|
||||
self.conf, manifest_meta_dict)
|
||||
|
||||
self.conf.manifest_meta_dict = manifest_meta_dict
|
||||
if self.conf.mode == 'fs':
|
||||
backup.backup(
|
||||
self.conf, self.start_time.timestamp, manifest_meta_dict)
|
||||
backup.backup(self.conf, self.storage)
|
||||
elif self.conf.mode == 'mongo':
|
||||
backup.backup_mode_mongo(
|
||||
self.conf, self.start_time.timestamp, manifest_meta_dict)
|
||||
backup.backup_mode_mongo(self.conf)
|
||||
elif self.conf.mode == 'mysql':
|
||||
backup.backup_mode_mysql(
|
||||
self.conf, self.start_time.timestamp, manifest_meta_dict)
|
||||
backup.backup_mode_mysql(self.conf)
|
||||
elif self.conf.mode == 'sqlserver':
|
||||
backup.backup_mode_sql_server(
|
||||
self.conf, self.time_stamp, manifest_meta_dict)
|
||||
backup.backup_mode_sql_server(self.conf)
|
||||
else:
|
||||
raise ValueError('Please provide a valid backup mode')
|
||||
|
||||
@ -149,34 +116,47 @@ class BackupJob(Job):
|
||||
class RestoreJob(Job):
|
||||
@Job.executemethod
|
||||
def execute(self):
|
||||
conf = self.conf
|
||||
logging.info('[*] Executing FS restore...')
|
||||
restore_timestamp = None
|
||||
if conf.restore_from_date:
|
||||
restore_timestamp = utils.date_to_timestamp(conf.restore_from_date)
|
||||
restore_abs_path = conf.restore_abs_path
|
||||
if conf.backup_media == 'fs':
|
||||
builder = tar.TarCommandRestoreBuilder(conf.tar_path,
|
||||
restore_abs_path)
|
||||
if conf.dry_run:
|
||||
builder.set_dry_run()
|
||||
if winutils.is_windows():
|
||||
builder.set_windows()
|
||||
os.chdir(conf.restore_abs_path)
|
||||
if conf.encrypt_pass_file:
|
||||
builder.set_encryption(conf.openssl_path,
|
||||
conf.encrypt_pass_file)
|
||||
|
||||
if not self.conf.storage.ready():
|
||||
raise ValueError('Container: {0} not found. Please provide an '
|
||||
'existing container.'
|
||||
.format(self.conf.container))
|
||||
conf.storage.restore_from_date(conf.hostname_backup_name,
|
||||
restore_abs_path,
|
||||
builder,
|
||||
restore_timestamp)
|
||||
return
|
||||
|
||||
# Get the object list of the remote containers and store it in the
|
||||
# same dict passes as argument under the dict.remote_obj_list namespace
|
||||
res = RestoreOs(self.conf.client_manager, self.conf.container)
|
||||
restore_from_date = self.conf.restore_from_date
|
||||
backup_media = self.conf.backup_media
|
||||
if backup_media == 'fs':
|
||||
restore.restore_fs(self.conf)
|
||||
elif backup_media == 'nova':
|
||||
res.restore_nova(restore_from_date, self.conf.nova_inst_id)
|
||||
elif backup_media == 'cinder':
|
||||
res.restore_cinder_by_glance(restore_from_date, self.conf.cinder)
|
||||
elif backup_media == 'cindernative':
|
||||
res.restore_cinder(restore_from_date, self.conf.cinder_vol_id)
|
||||
res = restore.RestoreOs(conf.client_manager, conf.container)
|
||||
if conf.backup_media == 'nova':
|
||||
res.restore_nova(conf.nova_inst_id, restore_timestamp)
|
||||
elif conf.backup_media == 'cinder':
|
||||
res.restore_cinder_by_glance(conf.cinder, restore_timestamp)
|
||||
elif conf.backup_media == 'cindernative':
|
||||
res.restore_cinder(conf.cinder_vol_id, restore_timestamp)
|
||||
else:
|
||||
raise Exception("unknown backup type: %s" % backup_media)
|
||||
raise Exception("unknown backup type: %s" % conf.backup_media)
|
||||
|
||||
|
||||
class AdminJob(Job):
|
||||
@Job.executemethod
|
||||
def execute(self):
|
||||
swift.remove_obj_older_than(self.conf)
|
||||
timestamp = utils.date_to_timestamp(self.conf.remove_from_date)
|
||||
self.storage.remove_older_than(timestamp,
|
||||
self.conf.hostname_backup_name)
|
||||
|
||||
|
||||
class ExecJob(Job):
|
||||
|
133
freezer/local.py
Normal file
133
freezer/local.py
Normal file
@ -0,0 +1,133 @@
|
||||
"""
|
||||
Copyright 2014 Hewlett-Packard
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
This product includes cryptographic software written by Eric Young
|
||||
(eay@cryptsoft.com). This product includes software written by Tim
|
||||
Hudson (tjh@cryptsoft.com).
|
||||
"""
|
||||
|
||||
import subprocess
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
|
||||
from freezer import storage
|
||||
from freezer import utils
|
||||
|
||||
|
||||
class LocalStorage(storage.Storage):
|
||||
|
||||
def prepare(self):
|
||||
utils.create_dir(self.storage_directory)
|
||||
|
||||
def get_backups(self):
|
||||
backup_names = os.listdir(self.storage_directory)
|
||||
backups = []
|
||||
for backup_name in backup_names:
|
||||
backup_dir = self.storage_directory + "/" + backup_name
|
||||
timestamps = os.listdir(backup_dir)
|
||||
for timestamp in timestamps:
|
||||
increments = os.listdir(backup_dir + "/" + timestamp)
|
||||
backups.extend(self._get_backups(increments))
|
||||
return backups
|
||||
|
||||
def __init__(self, storage_directory, work_dir):
|
||||
"""
|
||||
:param storage_directory: directory of storage
|
||||
:type storage_directory: str
|
||||
:return:
|
||||
"""
|
||||
self.storage_directory = storage_directory
|
||||
self.work_dir = work_dir
|
||||
|
||||
def info(self):
|
||||
pass
|
||||
|
||||
def _backup_dir(self, backup):
|
||||
"""
|
||||
:param backup:
|
||||
:type backup: freezer.storage.Backup
|
||||
:return:
|
||||
"""
|
||||
return "{0}/{1}".format(self.storage_directory,
|
||||
backup.hostname_backup_name)
|
||||
|
||||
def _zero_backup_dir(self, backup):
|
||||
"""
|
||||
:param backup:
|
||||
:type backup: freezer.storage.Backup
|
||||
:return:
|
||||
"""
|
||||
return "{0}/{1}".format(self._backup_dir(backup), backup.timestamp)
|
||||
|
||||
def backup(self, path, hostname_backup_name, tar_builder,
|
||||
parent_backup=None):
|
||||
"""
|
||||
Backup path
|
||||
storage_dir/backup_name/timestamp/backup_name_timestamps_level
|
||||
:param path:
|
||||
:param hostname_backup_name:
|
||||
:param tar_builder:
|
||||
:type tar_builder: freezer.tar.TarCommandBuilder
|
||||
:param parent_backup:
|
||||
:type parent_backup: freezer.storage.Backup
|
||||
:return:
|
||||
"""
|
||||
new_backup = self._create_backup(hostname_backup_name, parent_backup)
|
||||
|
||||
host_backups = self._backup_dir(new_backup)
|
||||
utils.create_dir(host_backups)
|
||||
|
||||
if parent_backup:
|
||||
zero_backup = self._zero_backup_dir(parent_backup.parent)
|
||||
else:
|
||||
zero_backup = self._zero_backup_dir(new_backup)
|
||||
utils.create_dir(zero_backup)
|
||||
tar_builder.set_output_file("{0}/{1}".format(zero_backup,
|
||||
new_backup.repr()))
|
||||
|
||||
tar_incremental = "{0}/{1}".format(zero_backup, new_backup.tar())
|
||||
if parent_backup:
|
||||
shutil.copyfile("{0}/{1}".format(
|
||||
zero_backup, parent_backup.tar()), tar_incremental)
|
||||
|
||||
tar_builder.set_listed_incremental(tar_incremental)
|
||||
|
||||
logging.info('[*] Changing current working directory to: {0}'
|
||||
.format(path))
|
||||
logging.info('[*] Backup started for: {0}'.format(path))
|
||||
|
||||
subprocess.check_output(tar_builder.build(), shell=True)
|
||||
|
||||
def is_ready(self):
|
||||
return os.path.isdir(self.storage_directory)
|
||||
|
||||
def remove_older_than(self, remove_older_timestamp, hostname_backup_name):
|
||||
pass
|
||||
|
||||
def restore(self, backup, path, tar_builder, level):
|
||||
"""
|
||||
:param backup:
|
||||
:param path:
|
||||
:param tar_builder:
|
||||
:type tar_builder: freezer.tar.TarCommandRestoreBuilder
|
||||
:param level:
|
||||
:return:
|
||||
"""
|
||||
zero_dir = self._zero_backup_dir(backup)
|
||||
for level in range(0, level + 1):
|
||||
c_backup = backup.increments[level]
|
||||
tar_builder.set_archive(zero_dir + "/" + c_backup.repr())
|
||||
subprocess.check_output(tar_builder.build(), shell=True)
|
@ -22,7 +22,7 @@ Freezer LVM related functions
|
||||
"""
|
||||
|
||||
from freezer.utils import (
|
||||
create_dir, get_vol_fs_type, validate_all_args, get_mount_from_path)
|
||||
create_dir, get_vol_fs_type, get_mount_from_path)
|
||||
|
||||
import re
|
||||
import os
|
||||
@ -40,14 +40,17 @@ def lvm_eval(backup_opt_dict):
|
||||
must be set accordingly
|
||||
"""
|
||||
|
||||
required_list = [
|
||||
backup_opt_dict.lvm_volgroup,
|
||||
backup_opt_dict.lvm_srcvol,
|
||||
backup_opt_dict.lvm_dirmount]
|
||||
|
||||
if not validate_all_args(required_list):
|
||||
logging.warning('[*] Required lvm options not set. The backup will \
|
||||
execute without lvm snapshot.')
|
||||
if not backup_opt_dict.lvm_volgroup:
|
||||
logging.warning('[*] Required lvm_volgroup not set. The backup will '
|
||||
'execute without lvm snapshot.')
|
||||
return False
|
||||
if not backup_opt_dict.lvm_srcvol:
|
||||
logging.warning('[*] Required lvm_srcvol not set. The backup will '
|
||||
'execute without lvm snapshot.')
|
||||
return False
|
||||
if not backup_opt_dict.lvm_dirmount:
|
||||
logging.warning('[*] Required lvm_dirmount not set. The backup will '
|
||||
'execute without lvm snapshot.')
|
||||
return False
|
||||
|
||||
# Create lvm_dirmount dir if it doesn't exists and write action in logs
|
||||
@ -109,7 +112,6 @@ def lvm_snap(backup_opt_dict):
|
||||
Implement checks on lvm volumes availability. According to these checks
|
||||
we might create an lvm snapshot and mount it or use an existing one
|
||||
"""
|
||||
|
||||
if lvm_eval(backup_opt_dict) is not True:
|
||||
return True
|
||||
# Setting lvm snapsize to 5G is not set
|
||||
@ -233,6 +235,7 @@ def get_lvm_info(backup_opt_dict):
|
||||
backup_opt_dict.__dict__['lvm_srcvol'] = \
|
||||
u'/dev/{0}/{1}'.format(
|
||||
backup_opt_dict.lvm_volgroup, lvm_srcvol)
|
||||
break
|
||||
return backup_opt_dict
|
||||
|
||||
return backup_opt_dict
|
||||
raise Exception("Cannot find {0} in {1}".format(
|
||||
mount_point_path, mount_points))
|
||||
|
@ -21,11 +21,11 @@ Hudson (tjh@cryptsoft.com).
|
||||
Freezer main execution function
|
||||
"""
|
||||
from freezer.bandwidth import monkeypatch_socket_bandwidth
|
||||
|
||||
from freezer import job
|
||||
from freezer.arguments import backup_arguments
|
||||
from freezer.osclients import ClientManager
|
||||
from freezer.storages.swiftstorage import SwiftStorage
|
||||
from freezer.swift import SwiftStorage
|
||||
from freezer.local import LocalStorage
|
||||
from freezer.utils import create_dir
|
||||
import os
|
||||
import subprocess
|
||||
@ -33,6 +33,8 @@ import logging
|
||||
import sys
|
||||
import json
|
||||
# Initialize backup options
|
||||
from freezer.validator import Validator
|
||||
|
||||
(backup_args, arg_parse) = backup_arguments()
|
||||
|
||||
|
||||
@ -113,14 +115,29 @@ def freezer_main(args={}):
|
||||
|
||||
monkeypatch_socket_bandwidth(backup_args)
|
||||
|
||||
backup_args.__dict__['hostname_backup_name'] = "{0}_{1}".format(
|
||||
backup_args.hostname, backup_args.backup_name)
|
||||
|
||||
backup_args.__dict__['client_manager'] = ClientManager(
|
||||
backup_args.options,
|
||||
backup_args.insecure,
|
||||
backup_args.os_auth_ver,
|
||||
backup_args.dry_run)
|
||||
|
||||
backup_args.__dict__['storage'] = SwiftStorage(backup_args.client_manager,
|
||||
backup_args.container)
|
||||
if backup_args.storage == "swift":
|
||||
backup_args.__dict__['storage'] = SwiftStorage(
|
||||
backup_args.client_manager,
|
||||
backup_args.container,
|
||||
backup_args.work_dir,
|
||||
backup_args.max_segment_size)
|
||||
elif backup_args.storage == "local":
|
||||
backup_args.__dict__['storage'] = LocalStorage(
|
||||
backup_args.container,
|
||||
backup_args.work_dir)
|
||||
else:
|
||||
raise Exception("Not storage found for name " + backup_args.storage)
|
||||
|
||||
Validator.validate(backup_args)
|
||||
|
||||
freezer_job = job.create_job(backup_args)
|
||||
freezer_job.execute()
|
||||
|
@ -21,142 +21,8 @@ Hudson (tjh@cryptsoft.com).
|
||||
Freezer restore modes related functions
|
||||
"""
|
||||
|
||||
import multiprocessing
|
||||
import os
|
||||
import logging
|
||||
import re
|
||||
import datetime
|
||||
|
||||
from freezer.tar import tar_restore
|
||||
from freezer import swift
|
||||
from freezer.utils import (validate_all_args, sort_backup_list,
|
||||
date_to_timestamp, ReSizeStream)
|
||||
|
||||
|
||||
def restore_fs(backup_opt_dict):
|
||||
"""
|
||||
Restore data from swift server to your local node. Data will be restored
|
||||
in the directory specified in backup_opt_dict.restore_abs_path. The
|
||||
object specified with the --get-object option will be downloaded from
|
||||
the Swift server and will be downloaded inside the parent directory of
|
||||
backup_opt_dict.restore_abs_path. If the object was compressed during
|
||||
backup time, then it is decrypted, decompressed and de-archived to
|
||||
backup_opt_dict.restore_abs_path. Before download the file, the size of
|
||||
the local volume/disk/partition will be computed. If there is enough space
|
||||
the full restore will be executed. Please remember to stop any service
|
||||
that require access to the data before to start the restore execution
|
||||
and to start the service at the end of the restore execution
|
||||
"""
|
||||
|
||||
# List of mandatory values
|
||||
required_list = [
|
||||
os.path.exists(backup_opt_dict.restore_abs_path),
|
||||
backup_opt_dict.container,
|
||||
backup_opt_dict.backup_name
|
||||
]
|
||||
|
||||
# Arguments validation. Raise ValueError is all the arguments are not True
|
||||
if not validate_all_args(required_list):
|
||||
raise ValueError('[*] Error: please provide ALL the following '
|
||||
'arguments: a valid --restore-abs-path '
|
||||
'--container --backup-name')
|
||||
|
||||
if not backup_opt_dict.restore_from_date:
|
||||
logging.warning(('[*] Restore date time not available. Setting to '
|
||||
'current datetime'))
|
||||
backup_opt_dict.restore_from_date = \
|
||||
re.sub(
|
||||
r'^(\S+?) (.+?:\d{,2})\.\d+?$', r'\1T\2',
|
||||
str(datetime.datetime.now()))
|
||||
|
||||
# If restore_from_host is set to local hostname is not set in
|
||||
# backup_opt_dict.restore_from_host
|
||||
if backup_opt_dict.restore_from_host:
|
||||
backup_opt_dict.hostname = backup_opt_dict.restore_from_host
|
||||
|
||||
# Check if there's a backup matching. If not raise Exception
|
||||
remote_obj_list = swift.get_container_content(
|
||||
backup_opt_dict.client_manager,
|
||||
backup_opt_dict.container)
|
||||
|
||||
backup_opt_dict.remote_match_backup = \
|
||||
swift.get_match_backup(backup_opt_dict.backup_name,
|
||||
backup_opt_dict.hostname,
|
||||
remote_obj_list)
|
||||
restore_fs_sort_obj(backup_opt_dict)
|
||||
|
||||
|
||||
def restore_fs_sort_obj(backup_opt_dict):
|
||||
"""
|
||||
Take options dict as argument and sort/remove duplicate elements from
|
||||
backup_opt_dict.remote_match_backup and find the closes backup to the
|
||||
provided from backup_opt_dict.restore_from_date. Once the objects are
|
||||
looped backwards and the level 0 backup is found, along with the other
|
||||
level 1,2,n, is download the object from swift and untar them locally
|
||||
starting from level 0 to level N.
|
||||
"""
|
||||
|
||||
# Convert backup_opt_dict.restore_from_date to timestamp
|
||||
opt_backup_timestamp = date_to_timestamp(backup_opt_dict.restore_from_date)
|
||||
|
||||
# Sort remote backup list using timestamp in reverse order,
|
||||
# that is from the newest to the oldest executed backup
|
||||
sorted_backups_list = sort_backup_list(backup_opt_dict.remote_match_backup)
|
||||
# Get the closest earlier backup to date set in
|
||||
# backup_opt_dict.restore_from_date
|
||||
closest_backup_list = []
|
||||
for backup_obj in sorted_backups_list:
|
||||
if backup_obj.startswith('tar_metadata'):
|
||||
continue
|
||||
obj_name_match = re.search(
|
||||
r'\S+?_{0}_(\d+)_(\d+?)$'.format(backup_opt_dict.backup_name),
|
||||
backup_obj, re.I)
|
||||
if not obj_name_match:
|
||||
continue
|
||||
# Ensure provided timestamp is bigger then object timestamp
|
||||
if opt_backup_timestamp >= int(obj_name_match.group(1)):
|
||||
closest_backup_list.append(backup_obj)
|
||||
# If level 0 is reached, break the loop as level 0 is the first
|
||||
# backup we want to restore
|
||||
if int(obj_name_match.group(2)) == 0:
|
||||
break
|
||||
|
||||
if not closest_backup_list:
|
||||
raise ValueError('No matching backup name {0} found in '
|
||||
'container {1} for hostname {2}'
|
||||
.format(backup_opt_dict.backup_name,
|
||||
backup_opt_dict.container,
|
||||
backup_opt_dict.hostname))
|
||||
|
||||
# Backups are looped from the last element of the list going
|
||||
# backwards, as we want to restore starting from the oldest object
|
||||
for backup in closest_backup_list[::-1]:
|
||||
write_pipe, read_pipe = multiprocessing.Pipe()
|
||||
process_stream = multiprocessing.Process(
|
||||
target=swift.object_to_stream, args=(
|
||||
backup_opt_dict.container, backup_opt_dict.client_manager,
|
||||
write_pipe, read_pipe, backup,))
|
||||
process_stream.daemon = True
|
||||
process_stream.start()
|
||||
|
||||
write_pipe.close()
|
||||
# Start the tar pipe consumer process
|
||||
tar_stream = multiprocessing.Process(
|
||||
target=tar_restore, args=(backup_opt_dict, read_pipe))
|
||||
tar_stream.daemon = True
|
||||
tar_stream.start()
|
||||
read_pipe.close()
|
||||
process_stream.join()
|
||||
tar_stream.join()
|
||||
|
||||
if tar_stream.exitcode:
|
||||
raise Exception('failed to restore file')
|
||||
|
||||
logging.info(
|
||||
'[*] Restore execution successfully executed for backup name {0},\
|
||||
from container {1}, into directory {2}'.format(
|
||||
backup_opt_dict.backup_name, backup_opt_dict.container,
|
||||
backup_opt_dict.restore_abs_path))
|
||||
from freezer import utils
|
||||
|
||||
|
||||
class RestoreOs:
|
||||
@ -164,13 +30,19 @@ class RestoreOs:
|
||||
self.client_manager = client_manager
|
||||
self.container = container
|
||||
|
||||
def _get_backups(self, path, restore_from_date):
|
||||
timestamp = date_to_timestamp(restore_from_date)
|
||||
def _get_backups(self, path, restore_from_timestamp):
|
||||
"""
|
||||
:param path:
|
||||
:type path: str
|
||||
:param restore_from_timestamp:
|
||||
:type restore_from_timestamp: int
|
||||
:return:
|
||||
"""
|
||||
swift = self.client_manager.get_swift()
|
||||
info, backups = swift.get_container(self.container, path=path)
|
||||
backups = sorted(map(lambda x: int(x["name"].rsplit("/", 1)[-1]),
|
||||
backups))
|
||||
backups = filter(lambda x: x >= timestamp, backups)
|
||||
backups = filter(lambda x: x >= restore_from_timestamp, backups)
|
||||
|
||||
if not backups:
|
||||
msg = "Cannot find backups for path: %s" % path
|
||||
@ -178,16 +50,22 @@ class RestoreOs:
|
||||
raise BaseException(msg)
|
||||
return backups[-1]
|
||||
|
||||
def _create_image(self, path, restore_from_date):
|
||||
def _create_image(self, path, restore_from_timestamp):
|
||||
"""
|
||||
:param path:
|
||||
:param restore_from_timestamp:
|
||||
:type restore_from_timestamp: int
|
||||
:return:
|
||||
"""
|
||||
swift = self.client_manager.get_swift()
|
||||
glance = self.client_manager.get_glance()
|
||||
backup = self._get_backups(path, restore_from_date)
|
||||
backup = self._get_backups(path, restore_from_timestamp)
|
||||
stream = swift.get_object(
|
||||
self.container, "%s/%s" % (path, backup), resp_chunk_size=10000000)
|
||||
length = int(stream[0]["x-object-meta-length"])
|
||||
logging.info("[*] Creation glance image")
|
||||
image = glance.images.create(
|
||||
data=ReSizeStream(stream[1], length, 1),
|
||||
data=utils.ReSizeStream(stream[1], length, 1),
|
||||
container_format="bare",
|
||||
disk_format="raw")
|
||||
return stream[0], image
|
||||
@ -208,16 +86,17 @@ class RestoreOs:
|
||||
backup = min(backups, key=lambda x: x.created_at)
|
||||
cinder.restores.restore(backup_id=backup.id)
|
||||
|
||||
def restore_cinder_by_glance(self, restore_from_date, volume_id):
|
||||
def restore_cinder_by_glance(self, restore_from_timestamp, volume_id):
|
||||
"""
|
||||
1) Define swift directory
|
||||
2) Download and upload to glance
|
||||
3) Create volume from glance
|
||||
4) Delete
|
||||
:param restore_from_date - date in format '%Y-%m-%dT%H:%M:%S'
|
||||
:param restore_from_timestamp:
|
||||
:type restore_from_timestamp: int
|
||||
:param volume_id - id of attached cinder volume
|
||||
"""
|
||||
(info, image) = self._create_image(volume_id, restore_from_date)
|
||||
(info, image) = self._create_image(volume_id, restore_from_timestamp)
|
||||
length = int(info["x-object-meta-length"])
|
||||
gb = 1073741824
|
||||
size = length / gb
|
||||
@ -229,13 +108,14 @@ class RestoreOs:
|
||||
logging.info("[*] Deleting temporary image")
|
||||
self.client_manager.get_glance().images.delete(image)
|
||||
|
||||
def restore_nova(self, restore_from_date, instance_id):
|
||||
def restore_nova(self, restore_from_timestamp, instance_id):
|
||||
"""
|
||||
:param restore_from_date: date in format '%Y-%m-%dT%H:%M:%S'
|
||||
:param restore_from_timestamp:
|
||||
:type restore_from_timestamp: int
|
||||
:param instance_id: id of attached nova instance
|
||||
:return:
|
||||
"""
|
||||
(info, image) = self._create_image(instance_id, restore_from_date)
|
||||
(info, image) = self._create_image(instance_id, restore_from_timestamp)
|
||||
nova = self.client_manager.get_nova()
|
||||
flavor = nova.flavors.get(info['x-object-meta-tenant-id'])
|
||||
logging.info("[*] Creation an instance")
|
||||
|
337
freezer/storage.py
Normal file
337
freezer/storage.py
Normal file
@ -0,0 +1,337 @@
|
||||
"""
|
||||
Copyright 2014 Hewlett-Packard
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
This product includes cryptographic software written by Eric Young
|
||||
(eay@cryptsoft.com). This product includes software written by Tim
|
||||
Hudson (tjh@cryptsoft.com).
|
||||
"""
|
||||
|
||||
import re
|
||||
import utils
|
||||
import logging
|
||||
|
||||
|
||||
class Storage(object):
|
||||
"""
|
||||
Any freezer storage implementation should be inherited from this abstract
|
||||
class.
|
||||
"""
|
||||
|
||||
def is_ready(self):
|
||||
"""
|
||||
|
||||
:rtype: bool
|
||||
:return:
|
||||
"""
|
||||
raise NotImplementedError("Should have implemented this")
|
||||
|
||||
def prepare(self):
|
||||
"""
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
raise NotImplementedError("Should have implemented this")
|
||||
|
||||
def find(self, hostname_backup_name):
|
||||
"""
|
||||
Gets backups my backup_name and hostname
|
||||
:param hostname_backup_name:
|
||||
:rtype: list[Backup]
|
||||
:return: List of matched backups
|
||||
"""
|
||||
return [b for b in self.get_backups()
|
||||
if b.hostname_backup_name == hostname_backup_name]
|
||||
|
||||
def get_backups(self):
|
||||
raise NotImplementedError("Should have implemented this")
|
||||
|
||||
def backup(self, path, hostname_backup_name, tar_builder,
|
||||
parent_backup=None):
|
||||
"""
|
||||
Implements backup of path directory.
|
||||
|
||||
:type path: str
|
||||
:type hostname_backup_name: str
|
||||
:type tar_builder: freezer.tar.TarCommandBuilder
|
||||
:param parent_backup: Can be None.
|
||||
Previous backup for incremental update.
|
||||
:type parent_backup: Backup
|
||||
"""
|
||||
raise NotImplementedError("Should have implemented this")
|
||||
|
||||
def restore(self, backup, path, tar_builder, level):
|
||||
"""
|
||||
|
||||
:param backup:
|
||||
:param path:
|
||||
:param tar_builder:
|
||||
:type tar_builder: freezer.tar.TarCommandRestoreBuilder
|
||||
:param level:
|
||||
:return:
|
||||
"""
|
||||
raise NotImplementedError("Should have implemented this")
|
||||
|
||||
def restore_from_date(self, hostname_backup_name,
|
||||
path, tar_builder, restore_timestamp=None):
|
||||
"""
|
||||
:param hostname_backup_name:
|
||||
:type hostname_backup_name: str
|
||||
:param restore_timestamp:
|
||||
:type restore_timestamp: int
|
||||
:param path:
|
||||
:type path: str
|
||||
:param tar_builder:
|
||||
:type tar_builder: freezer.tar.TarCommandRestoreBuilder
|
||||
:return:
|
||||
"""
|
||||
backups = self.find(hostname_backup_name)
|
||||
if not backups:
|
||||
raise Exception("[*] No backups found")
|
||||
level = 0
|
||||
if restore_timestamp:
|
||||
backups = [b for b in backups
|
||||
if b.timestamp <= restore_timestamp and b.tar_meta]
|
||||
backup = min(backups, key=lambda b: b.timestamp)
|
||||
if not backup:
|
||||
raise ValueError('No matching backup name {0} found'
|
||||
.format(hostname_backup_name))
|
||||
while (level in backup.increments
|
||||
and backup.increments[level].timestamp
|
||||
<= restore_timestamp):
|
||||
level += 1
|
||||
|
||||
if level in backup.increments \
|
||||
and backup.increments[level].timestamp > restore_timestamp:
|
||||
level -= 1
|
||||
else:
|
||||
backup = max(backups, key=lambda b: b.timestamp)
|
||||
if not backup:
|
||||
raise ValueError('No matching backup name {0} found'
|
||||
.format(hostname_backup_name))
|
||||
level = backup.latest_update.level
|
||||
|
||||
self.restore(backup, path, tar_builder, level)
|
||||
|
||||
def remove_older_than(self, remove_older_timestamp, hostname_backup_name):
|
||||
raise NotImplementedError("Should have implemented this")
|
||||
|
||||
def info(self):
|
||||
raise NotImplementedError("Should have implemented this")
|
||||
|
||||
@staticmethod
|
||||
def _create_backup(name, backup=None):
|
||||
"""
|
||||
:param name:
|
||||
:type name: str
|
||||
:param backup:
|
||||
:type backup: Backup
|
||||
:rtype: Backup
|
||||
:return:
|
||||
"""
|
||||
return Backup(name, utils.DateTime.now().timestamp,
|
||||
backup.latest_update.level + 1 if backup else 0)
|
||||
|
||||
@staticmethod
|
||||
def _get_backups(names):
|
||||
"""
|
||||
No side effect version of get_backups
|
||||
:param names:
|
||||
:type names: list[str]
|
||||
:rtype: list[Backup]
|
||||
:return: list of zero level backups
|
||||
"""
|
||||
prefix = 'tar_metadata_'
|
||||
tar_names = set([x[len(prefix):]
|
||||
for x in names if x.startswith(prefix)])
|
||||
backup_names = [x for x in names if not x.startswith(prefix)]
|
||||
backups = []
|
||||
""":type: list[Backup]"""
|
||||
for name in backup_names:
|
||||
try:
|
||||
backup = Backup.parse(name)
|
||||
backup.tar_meta = name in tar_names
|
||||
backups.append(backup)
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
logging.error("cannot parse swift backup name: {0}"
|
||||
.format(name))
|
||||
backups.sort(key=lambda x: (x.timestamp, x.level))
|
||||
zero_backups = []
|
||||
last_backup = None
|
||||
|
||||
""":type last_backup: freezer.storage.Backup"""
|
||||
for backup in backups:
|
||||
if backup.level == 0:
|
||||
zero_backups.append(backup)
|
||||
last_backup = backup
|
||||
else:
|
||||
last_backup.add_increment(backup)
|
||||
|
||||
return zero_backups
|
||||
|
||||
def find_previous_backup(self, hostname_backup_name, no_incremental,
|
||||
max_level, always_level, restart_always_level):
|
||||
backups = self.find(hostname_backup_name)
|
||||
return self._find_previous_backup(backups, no_incremental, max_level,
|
||||
always_level, restart_always_level)
|
||||
|
||||
@staticmethod
|
||||
def _find_previous_backup(backups, no_incremental, max_level, always_level,
|
||||
restart_always_level):
|
||||
"""
|
||||
|
||||
:param backups:
|
||||
:type backups: list[Backup]
|
||||
:param no_incremental:
|
||||
:param max_level:
|
||||
:param always_level:
|
||||
:param restart_always_level:
|
||||
:return:
|
||||
"""
|
||||
if no_incremental or not backups:
|
||||
return None
|
||||
incremental_backup = max(backups, key=lambda x: x.timestamp)
|
||||
""":type : freezer.storage.Backup"""
|
||||
latest_update = incremental_backup.latest_update
|
||||
if max_level and max_level <= latest_update.level:
|
||||
latest_update = None
|
||||
elif always_level and latest_update.level >= always_level:
|
||||
if latest_update.level > 0:
|
||||
latest_update = \
|
||||
incremental_backup.increments[latest_update.level - 1]
|
||||
else:
|
||||
latest_update = None
|
||||
elif restart_always_level and utils.DateTime.now().timestamp > \
|
||||
latest_update.timestamp + restart_always_level * 86400:
|
||||
latest_update = None
|
||||
return latest_update
|
||||
|
||||
|
||||
class Backup:
|
||||
"""
|
||||
Internal freezer representation of backup.
|
||||
Includes:
|
||||
name (hostname_backup_name) of backup
|
||||
timestamp of backup (when it was executed)
|
||||
level of backup (freezer supports incremental backup)
|
||||
Completed full backup has level 0 and can be restored without any
|
||||
additional information.
|
||||
Levels 1, 2, ... means that our backup is incremental and contains
|
||||
only smart portion of information (that was actually changed
|
||||
since the last backup)
|
||||
tar_meta - boolean value, that is true when we have available meta
|
||||
information.
|
||||
Please check for additional information about tar_meta
|
||||
http://www.gnu.org/software/tar/manual/html_node/Incremental-Dumps.html
|
||||
"""
|
||||
PATTERN = r'(.*)_(\d+)_(\d+?)$'
|
||||
|
||||
def __init__(self, hostname_backup_name, timestamp, level, tar_meta=False):
|
||||
"""
|
||||
|
||||
:param hostname_backup_name: name (hostname_backup_name) of backup
|
||||
:type hostname_backup_name: str
|
||||
:param timestamp: timestamp of backup (when it was executed)
|
||||
:type timestamp: int
|
||||
:param level: level of backup (freezer supports incremental backup)
|
||||
Completed full backup has level 0 and can be restored without any
|
||||
additional information.
|
||||
Levels 1, 2, ... means that our backup is incremental and contains
|
||||
only smart portion of information (that was actually changed
|
||||
since the last backup)
|
||||
:type level: int
|
||||
:param tar_meta: Is backup has or has not an attached meta
|
||||
tar file in storage. Default = False
|
||||
:type tar_meta: bool
|
||||
:return:
|
||||
"""
|
||||
if not isinstance(level, int):
|
||||
raise ValueError("Level should have type int")
|
||||
self.hostname_backup_name = hostname_backup_name
|
||||
self._timestamp = timestamp
|
||||
self.tar_meta = tar_meta
|
||||
self._level = level
|
||||
self._increments = {0: self}
|
||||
self._latest_update = self
|
||||
self._parent = self
|
||||
|
||||
@property
|
||||
def parent(self):
|
||||
return self._parent
|
||||
|
||||
@property
|
||||
def timestamp(self):
|
||||
return self._timestamp
|
||||
|
||||
@property
|
||||
def level(self):
|
||||
return self._level
|
||||
|
||||
@property
|
||||
def increments(self):
|
||||
return self._increments
|
||||
|
||||
@property
|
||||
def latest_update(self):
|
||||
return self._latest_update
|
||||
|
||||
def tar(self):
|
||||
return "tar_metadata_{0}".format(self.repr())
|
||||
|
||||
def add_increment(self, increment):
|
||||
"""
|
||||
|
||||
:param increment:
|
||||
:type increment: Backup
|
||||
:return:
|
||||
"""
|
||||
if self.level != 0:
|
||||
raise ValueError("Can not add increment to increment")
|
||||
if increment.level == 0:
|
||||
raise ValueError("Can not add increment with level 0")
|
||||
increment._parent = self
|
||||
if (increment.level not in self._increments or
|
||||
increment.timestamp >
|
||||
self._increments[increment.level].timestamp):
|
||||
self._increments[increment.level] = increment
|
||||
if self.latest_update.level <= increment.level:
|
||||
self._latest_update = increment
|
||||
|
||||
def repr(self):
|
||||
return '_'.join([self.hostname_backup_name,
|
||||
repr(self._timestamp), repr(self._level)])
|
||||
|
||||
@staticmethod
|
||||
def parse(value):
|
||||
"""
|
||||
|
||||
:param value:
|
||||
:type value: str
|
||||
:return:
|
||||
"""
|
||||
match = re.search(Backup.PATTERN, value, re.I)
|
||||
if not match:
|
||||
raise ValueError("Cannot parse backup from string: " + value)
|
||||
return Backup(match.group(1), int(match.group(2)), int(match.group(3)))
|
||||
|
||||
def __eq__(self, other):
|
||||
if self is other:
|
||||
return True
|
||||
return type(other) is type(self) and \
|
||||
self.hostname_backup_name == other.hostname_backup_name and \
|
||||
self._timestamp == other.timestamp and \
|
||||
self.tar_meta == other.tar_meta and \
|
||||
self._level == other.level and \
|
||||
len(self.increments) == len(other.increments)
|
@ -1,34 +0,0 @@
|
||||
class AbstractStorage(object):
|
||||
|
||||
def upload_manifest(self, name, meta_dict):
|
||||
"""
|
||||
Manifest can be different for different types of storage.
|
||||
|
||||
Each storage should have idea how to work with data.
|
||||
|
||||
For example:
|
||||
Swift can create an empty file with metainformation or can
|
||||
create file with json (For example amount of information exceeds
|
||||
256 bytes (limit for metadata in Swift).
|
||||
|
||||
FileSystem can create a file with information about descriptions,
|
||||
authors and etc.
|
||||
|
||||
Amazon S3 can keep this information in its own manner.
|
||||
|
||||
:param name: Name of manifest file
|
||||
:type name: str
|
||||
:param meta_dict: Dict with metainformation
|
||||
:type meta_dict: dict
|
||||
:return:
|
||||
"""
|
||||
raise NotImplementedError("Should have implemented this")
|
||||
|
||||
def upload_chunk(self, content, path):
|
||||
raise NotImplementedError("Should have implemented this")
|
||||
|
||||
def prepare(self):
|
||||
raise NotImplementedError("Should have implemented this")
|
||||
|
||||
def ready(self):
|
||||
raise NotImplementedError("Should have implemented this")
|
@ -1,117 +0,0 @@
|
||||
import logging
|
||||
import time
|
||||
from copy import deepcopy
|
||||
from freezer.storages.storage import AbstractStorage
|
||||
from freezer.utils import segments_name
|
||||
|
||||
|
||||
class SwiftStorage(AbstractStorage):
|
||||
"""
|
||||
:type client_manager: freezer.osclients.ClientManager
|
||||
"""
|
||||
|
||||
def __init__(self, client_manager, container):
|
||||
"""
|
||||
:type client_manager: freezer.osclients.ClientManager
|
||||
:type container: str
|
||||
"""
|
||||
self.client_manager = client_manager
|
||||
self.container = container
|
||||
self.segments = segments_name(container)
|
||||
|
||||
def upload_chunk(self, content, path):
|
||||
"""
|
||||
"""
|
||||
# If for some reason the swift client object is not available anymore
|
||||
# an exception is generated and a new client object is initialized/
|
||||
# If the exception happens for 10 consecutive times for a total of
|
||||
# 1 hour, then the program will exit with an Exception.
|
||||
|
||||
count = 0
|
||||
success = False
|
||||
while not success:
|
||||
try:
|
||||
logging.info(
|
||||
'[*] Uploading file chunk index: {0}'.format(path))
|
||||
self.client_manager.get_swift().put_object(
|
||||
self.segments, path, content,
|
||||
content_type='application/octet-stream',
|
||||
content_length=len(content))
|
||||
logging.info('[*] Data successfully uploaded!')
|
||||
success = True
|
||||
except Exception as error:
|
||||
logging.info(
|
||||
'[*] Retrying to upload file chunk index: {0}'.format(
|
||||
path))
|
||||
time.sleep(60)
|
||||
self.client_manager.create_swift()
|
||||
count += 1
|
||||
if count == 10:
|
||||
logging.critical('[*] Error: add_object: {0}'
|
||||
.format(error))
|
||||
raise Exception("cannot add object to storage")
|
||||
|
||||
def upload_manifest(self, name, manifest_meta_dict):
|
||||
"""
|
||||
Upload Manifest to manage segments in Swift
|
||||
|
||||
:param name: Name of manifest file
|
||||
:type name: str
|
||||
:param manifest_meta_dict: Dict with metainformation
|
||||
:type manifest_meta_dict: dict
|
||||
"""
|
||||
|
||||
if not manifest_meta_dict:
|
||||
raise Exception('Manifest Meta dictionary not available')
|
||||
|
||||
sw = self.client_manager.get_swift()
|
||||
self.client_manager.get_nova()
|
||||
tmp_manifest_meta = dict()
|
||||
for key, value in manifest_meta_dict.items():
|
||||
if key.startswith('x-object-meta'):
|
||||
tmp_manifest_meta[key] = value
|
||||
manifest_meta_dict = deepcopy(tmp_manifest_meta)
|
||||
header = manifest_meta_dict
|
||||
manifest_meta_dict['x-object-manifest'] = u'{0}/{1}'.format(
|
||||
self.segments, name.strip())
|
||||
logging.info('[*] Uploading Swift Manifest: {0}'.format(header))
|
||||
sw.put_object(container=self.container, obj=name,
|
||||
contents=u'', headers=header)
|
||||
logging.info('[*] Manifest successfully uploaded!')
|
||||
|
||||
def ready(self):
|
||||
return self.check_container_existence()[0]
|
||||
|
||||
def prepare(self):
|
||||
containers = self.check_container_existence()
|
||||
if not containers[0]:
|
||||
self.client_manager.get_swift().put_container(self.container)
|
||||
if not containers[1]:
|
||||
self.client_manager.get_swift().put_container(
|
||||
segments_name(self.container))
|
||||
|
||||
def check_container_existence(self):
|
||||
"""
|
||||
Check if the provided container is already available on Swift.
|
||||
The verification is done by exact matching between the provided
|
||||
container name and the whole list of container available for the swift
|
||||
account.
|
||||
"""
|
||||
sw_connector = self.client_manager.get_swift()
|
||||
containers_list = [c['name'] for c in sw_connector.get_account()[1]]
|
||||
return (self.container in containers_list,
|
||||
segments_name(self.container) in containers_list)
|
||||
|
||||
def add_object(self, max_segment_size, backup_queue, absolute_file_path,
|
||||
time_stamp):
|
||||
"""
|
||||
Upload object on the remote swift server
|
||||
"""
|
||||
file_chunk_index, file_chunk = backup_queue.get().popitem()
|
||||
package_name = absolute_file_path.split('/')[-1]
|
||||
while file_chunk_index or file_chunk:
|
||||
package_name_segment = u'{0}/{1}/{2}/{3}'.format(
|
||||
package_name, time_stamp,
|
||||
max_segment_size, file_chunk_index)
|
||||
self.upload_chunk(file_chunk, package_name_segment)
|
||||
file_chunk_index, file_chunk = backup_queue.get().popitem()
|
759
freezer/swift.py
759
freezer/swift.py
@ -16,449 +16,402 @@ limitations under the License.
|
||||
This product includes cryptographic software written by Eric Young
|
||||
(eay@cryptsoft.com). This product includes software written by Tim
|
||||
Hudson (tjh@cryptsoft.com).
|
||||
========================================================================
|
||||
|
||||
Freezer functions to interact with OpenStack Swift client and server
|
||||
"""
|
||||
from freezer.storages.swiftstorage import SwiftStorage
|
||||
|
||||
from freezer.utils import (sort_backup_list, DateTime, segments_name)
|
||||
from copy import deepcopy
|
||||
import multiprocessing
|
||||
|
||||
from freezer import utils
|
||||
from freezer import tar
|
||||
import json
|
||||
import re
|
||||
import time
|
||||
import logging
|
||||
|
||||
RESP_CHUNK_SIZE = 65536
|
||||
import os
|
||||
from freezer import storage
|
||||
|
||||
|
||||
def show_containers(containers_list):
|
||||
class SwiftStorage(storage.Storage):
|
||||
"""
|
||||
Print remote containers in sorted order
|
||||
:type client_manager: freezer.osclients.ClientManager
|
||||
"""
|
||||
|
||||
ordered_container = {}
|
||||
for container in containers_list:
|
||||
ordered_container['container_name'] = container['name']
|
||||
size = '{0}'.format((int(container['bytes']) / 1024) / 1024)
|
||||
if size == '0':
|
||||
size = '1'
|
||||
ordered_container['size'] = '{0}MB'.format(size)
|
||||
ordered_container['objects_count'] = container['count']
|
||||
print json.dumps(
|
||||
ordered_container, indent=4,
|
||||
separators=(',', ': '), sort_keys=True)
|
||||
def __init__(self, client_manager, container, work_dir, max_segment_size):
|
||||
"""
|
||||
:type client_manager: freezer.osclients.ClientManager
|
||||
:type container: str
|
||||
"""
|
||||
self.client_manager = client_manager
|
||||
self.container = container
|
||||
self.segments = utils.segments_name(container)
|
||||
self.work_dir = work_dir
|
||||
self.max_segment_size = max_segment_size
|
||||
|
||||
def swift(self):
|
||||
"""
|
||||
:rtype: swiftclient.Connection
|
||||
:return:
|
||||
"""
|
||||
return self.client_manager.get_swift()
|
||||
|
||||
def show_objects(backup_opt_dict):
|
||||
"""
|
||||
Retreive the list of backups from backup_opt_dict for the specified \
|
||||
container and print them nicely to std out.
|
||||
"""
|
||||
def upload_chunk(self, content, path):
|
||||
"""
|
||||
"""
|
||||
# If for some reason the swift client object is not available anymore
|
||||
# an exception is generated and a new client object is initialized/
|
||||
# If the exception happens for 10 consecutive times for a total of
|
||||
# 1 hour, then the program will exit with an Exception.
|
||||
|
||||
if not backup_opt_dict.list_objects:
|
||||
return False
|
||||
count = 0
|
||||
success = False
|
||||
while not success:
|
||||
try:
|
||||
logging.info(
|
||||
'[*] Uploading file chunk index: {0}'.format(path))
|
||||
self.swift().put_object(
|
||||
self.segments, path, content,
|
||||
content_type='application/octet-stream',
|
||||
content_length=len(content))
|
||||
logging.info('[*] Data successfully uploaded!')
|
||||
success = True
|
||||
except Exception as error:
|
||||
logging.info(
|
||||
'[*] Retrying to upload file chunk index: {0}'.format(
|
||||
path))
|
||||
time.sleep(60)
|
||||
self.client_manager.create_swift()
|
||||
count += 1
|
||||
if count == 10:
|
||||
logging.critical('[*] Error: add_object: {0}'
|
||||
.format(error))
|
||||
raise Exception("cannot add object to storage")
|
||||
|
||||
ordered_objects = {}
|
||||
remote_obj = get_container_content(
|
||||
backup_opt_dict.client_manager,
|
||||
backup_opt_dict.container)
|
||||
def upload_manifest(self, name, headers=None):
|
||||
"""
|
||||
Upload Manifest to manage segments in Swift
|
||||
|
||||
for obj in remote_obj:
|
||||
ordered_objects['object_name'] = obj['name']
|
||||
ordered_objects['upload_date'] = obj['last_modified']
|
||||
print json.dumps(
|
||||
ordered_objects, indent=4,
|
||||
separators=(',', ': '), sort_keys=True)
|
||||
:param name: Name of manifest file
|
||||
:type name: str
|
||||
"""
|
||||
|
||||
return True
|
||||
self.client_manager.create_swift()
|
||||
headers = deepcopy(headers) or dict()
|
||||
headers['x-object-manifest'] = u'{0}/{1}'.format(self.segments,
|
||||
name.strip())
|
||||
logging.info('[*] Uploading Swift Manifest: {0}'.format(name))
|
||||
self.swift().put_object(container=self.container, obj=name,
|
||||
contents=u'', headers=headers)
|
||||
logging.info('[*] Manifest successfully uploaded!')
|
||||
|
||||
def is_ready(self):
|
||||
return self.check_container_existence()[0]
|
||||
|
||||
def _remove_object(sw_connector, container, obj):
|
||||
logging.info('[*] Removing backup object: {0}'.format(obj))
|
||||
sleep_time = 120
|
||||
retry_max_count = 60
|
||||
curr_count = 0
|
||||
while True:
|
||||
try:
|
||||
sw_connector.delete_object(container, obj)
|
||||
logging.info(
|
||||
'[*] Remote object {0} removed'.format(obj))
|
||||
break
|
||||
except Exception as error:
|
||||
curr_count += 1
|
||||
time.sleep(sleep_time)
|
||||
err_msg = (
|
||||
'[*] Remote Object {0} failed to be removed.'
|
||||
' Retrying intent {1} out of {2} totals'.format(
|
||||
obj, curr_count, retry_max_count))
|
||||
if curr_count >= retry_max_count:
|
||||
error_message = \
|
||||
'[*] Error: {0}: {1}'.format(err_msg, error)
|
||||
raise Exception(error_message)
|
||||
else:
|
||||
logging.warning(err_msg)
|
||||
def restore(self, backup, path, tar_builder, level):
|
||||
"""
|
||||
Restore data from swift server to your local node. Data will be
|
||||
restored in the directory specified in
|
||||
backup_opt_dict.restore_abs_path. The
|
||||
object specified with the --get-object option will be downloaded from
|
||||
the Swift server and will be downloaded inside the parent directory of
|
||||
backup_opt_dict.restore_abs_path. If the object was compressed during
|
||||
backup time, then it is decrypted, decompressed and de-archived to
|
||||
backup_opt_dict.restore_abs_path. Before download the file, the size of
|
||||
the local volume/disk/partition will be computed. If there is enough
|
||||
space
|
||||
the full restore will be executed. Please remember to stop any service
|
||||
that require access to the data before to start the restore execution
|
||||
and to start the service at the end of the restore execution
|
||||
|
||||
Take options dict as argument and sort/remove duplicate elements from
|
||||
backup_opt_dict.remote_match_backup and find the closes backup to the
|
||||
provided from backup_opt_dict.restore_from_date. Once the objects are
|
||||
looped backwards and the level 0 backup is found, along with the other
|
||||
level 1,2,n, is download the object from swift and untar them locally
|
||||
starting from level 0 to level N.
|
||||
:type tar_builder: freezer.tar.TarCommandRestoreBuilder
|
||||
"""
|
||||
|
||||
def remove_object(sw_connector, container, obj):
|
||||
head_info = sw_connector.head_object(container, obj)
|
||||
manifest = head_info.get('x-object-manifest', None)
|
||||
_remove_object(sw_connector, container, obj)
|
||||
if not manifest:
|
||||
return
|
||||
segments_container, segments_match = manifest.split('/')
|
||||
logging.info("Removing segments of object {0} from container {1}".
|
||||
format(obj, segments_container))
|
||||
segment_list = sw_connector.get_container(segments_container)[1]
|
||||
for segment in segment_list:
|
||||
if segment['name'].startswith(segments_match):
|
||||
_remove_object(sw_connector, segments_container, segment['name'])
|
||||
for level in range(0, level + 1):
|
||||
self._restore(backup.increments[level], path, tar_builder)
|
||||
|
||||
def _restore(self, backup, path, tar_builder):
|
||||
"""
|
||||
:type backup: freezer.storage.Backup
|
||||
:param backup:
|
||||
:type path: str
|
||||
:type tar_builder: freezer.tar.TarCommandRestoreBuilder
|
||||
:return:
|
||||
"""
|
||||
write_pipe, read_pipe = multiprocessing.Pipe()
|
||||
process_stream = multiprocessing.Process(
|
||||
target=self.object_to_stream,
|
||||
args=(write_pipe, read_pipe, backup.repr(),))
|
||||
process_stream.daemon = True
|
||||
process_stream.start()
|
||||
|
||||
def remove_obj_older_than(backup_opt_dict):
|
||||
"""
|
||||
Remove object in remote swift server which are
|
||||
older than the specified days or timestamp
|
||||
"""
|
||||
write_pipe.close()
|
||||
# Start the tar pipe consumer process
|
||||
tar_stream = multiprocessing.Process(
|
||||
target=tar.tar_restore, args=(path, tar_builder.build(),
|
||||
read_pipe))
|
||||
tar_stream.daemon = True
|
||||
tar_stream.start()
|
||||
read_pipe.close()
|
||||
process_stream.join()
|
||||
tar_stream.join()
|
||||
|
||||
remote_obj_list = get_container_content(
|
||||
backup_opt_dict.client_manager,
|
||||
backup_opt_dict.container)
|
||||
if tar_stream.exitcode:
|
||||
raise Exception('failed to restore file')
|
||||
|
||||
if not remote_obj_list:
|
||||
logging.warning('[*] No remote objects will be removed')
|
||||
return
|
||||
|
||||
if backup_opt_dict.remove_older_than is not None:
|
||||
if backup_opt_dict.remove_from_date:
|
||||
raise Exception("Please specify remove date unambiguously")
|
||||
current_timestamp = backup_opt_dict.time_stamp
|
||||
max_age = int(backup_opt_dict.remove_older_than * 86400)
|
||||
remove_from = DateTime(current_timestamp - max_age)
|
||||
else:
|
||||
if not backup_opt_dict.remove_from_date:
|
||||
raise Exception("Remove date/age not specified")
|
||||
remove_from = DateTime(backup_opt_dict.remove_from_date)
|
||||
|
||||
logging.info('[*] Removing objects older than {0} ({1})'.format(
|
||||
remove_from, remove_from.timestamp))
|
||||
|
||||
backup_name = backup_opt_dict.backup_name
|
||||
hostname = backup_opt_dict.hostname
|
||||
backup_opt_dict.remote_match_backup = \
|
||||
get_match_backup(backup_opt_dict.backup_name,
|
||||
backup_opt_dict.hostname,
|
||||
remote_obj_list)
|
||||
sorted_remote_list = sort_backup_list(backup_opt_dict.remote_match_backup)
|
||||
|
||||
tar_meta_incremental_dep_flag = False
|
||||
incremental_dep_flag = False
|
||||
|
||||
for match_object in sorted_remote_list:
|
||||
obj_name_match = re.search(r'{0}_({1})_(\d+)_(\d+?)$'.format(
|
||||
hostname, backup_name), match_object, re.I)
|
||||
|
||||
if obj_name_match:
|
||||
remote_obj_timestamp = int(obj_name_match.group(2))
|
||||
|
||||
if remote_obj_timestamp >= remove_from.timestamp:
|
||||
if match_object.startswith('tar_meta'):
|
||||
tar_meta_incremental_dep_flag = \
|
||||
(obj_name_match.group(3) != '0')
|
||||
else:
|
||||
incremental_dep_flag = \
|
||||
(obj_name_match.group(3) != '0')
|
||||
|
||||
else:
|
||||
sw_connector = backup_opt_dict.client_manager.get_swift()
|
||||
if match_object.startswith('tar_meta'):
|
||||
if not tar_meta_incremental_dep_flag:
|
||||
remove_object(sw_connector,
|
||||
backup_opt_dict.container, match_object)
|
||||
else:
|
||||
if obj_name_match.group(3) == '0':
|
||||
tar_meta_incremental_dep_flag = False
|
||||
else:
|
||||
if not incremental_dep_flag:
|
||||
remove_object(sw_connector,
|
||||
backup_opt_dict.container, match_object)
|
||||
else:
|
||||
if obj_name_match.group(3) == '0':
|
||||
incremental_dep_flag = False
|
||||
|
||||
|
||||
def get_container_content(client_manager, container):
|
||||
"""
|
||||
Download the list of object of the provided container
|
||||
and print them out as container meta-data and container object list
|
||||
"""
|
||||
|
||||
sw_connector = client_manager.get_swift()
|
||||
try:
|
||||
return sw_connector.get_container(container)[1]
|
||||
except Exception as error:
|
||||
raise Exception('[*] Error: get_object_list: {0}'.format(error))
|
||||
|
||||
|
||||
def add_stream(client_manager, container, stream,
|
||||
package_name, headers=None):
|
||||
i = 0
|
||||
container_segments = segments_name(container)
|
||||
swift_storage = SwiftStorage(client_manager, container)
|
||||
|
||||
for el in stream:
|
||||
swift_storage.upload_chunk("{0}/{1}".format(package_name, "%08d" % i),
|
||||
el)
|
||||
i += 1
|
||||
if not headers:
|
||||
headers = {}
|
||||
headers['X-Object-Manifest'] = u'{0}/{1}/'.format(
|
||||
container_segments, package_name)
|
||||
headers['x-object-meta-length'] = len(stream)
|
||||
|
||||
swift = client_manager.get_swift()
|
||||
swift.put_object(container, package_name, "", headers=headers)
|
||||
|
||||
|
||||
def object_to_stream(container, client_manager, write_pipe, read_pipe,
|
||||
obj_name):
|
||||
"""
|
||||
Take a payload downloaded from Swift
|
||||
and generate a stream to be consumed from other processes
|
||||
"""
|
||||
sw_connector = client_manager.get_swift()
|
||||
logging.info('[*] Downloading data stream...')
|
||||
|
||||
# Close the read pipe in this child as it is unneeded
|
||||
# and download the objects from swift in chunks. The
|
||||
# Chunk size is set by RESP_CHUNK_SIZE and sent to che write
|
||||
# pipe
|
||||
read_pipe.close()
|
||||
for obj_chunk in sw_connector.get_object(
|
||||
container, obj_name, resp_chunk_size=RESP_CHUNK_SIZE)[1]:
|
||||
write_pipe.send_bytes(obj_chunk)
|
||||
|
||||
# Closing the pipe after checking no data
|
||||
# is still available in the pipe.
|
||||
while True:
|
||||
if not write_pipe.poll():
|
||||
write_pipe.close()
|
||||
break
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
def get_match_backup(backup_name, hostname, remote_obj_list):
|
||||
"""
|
||||
Return a dictionary containing a list of remote matching backups from
|
||||
backup_opt_dict.remote_obj_list.
|
||||
Backup have to exactly match against backup name and hostname of the
|
||||
node where freezer is executed. The matching objects are stored and
|
||||
available in backup_opt_dict.remote_match_backup
|
||||
"""
|
||||
|
||||
backup_name = backup_name.lower()
|
||||
remote_match_backup = []
|
||||
|
||||
for container_object in remote_obj_list:
|
||||
object_name = container_object.get('name', None)
|
||||
if object_name:
|
||||
obj_name_match = re.search(r'{0}_({1})_\d+?_\d+?$'.format(
|
||||
hostname, backup_name), object_name.lower(), re.I)
|
||||
if obj_name_match:
|
||||
remote_match_backup.append(object_name)
|
||||
|
||||
return remote_match_backup
|
||||
|
||||
|
||||
def get_rel_oldest_backup(hostname, backup_name, remote_obj_list):
|
||||
"""
|
||||
Return from swift, the relative oldest backup matching the provided
|
||||
backup name and hostname of the node where freezer is executed.
|
||||
The relative oldest backup correspond the oldest backup from the
|
||||
last level 0 backup.
|
||||
"""
|
||||
first_backup_name = ''
|
||||
first_backup_ts = 0
|
||||
for container_object in remote_obj_list:
|
||||
object_name = container_object.get('name', None)
|
||||
if not object_name:
|
||||
continue
|
||||
obj_name_match = re.search(r'{0}_({1})_(\d+)_(\d+?)$'.format(
|
||||
hostname, backup_name), object_name, re.I)
|
||||
if not obj_name_match:
|
||||
continue
|
||||
remote_obj_timestamp = int(obj_name_match.group(2))
|
||||
remote_obj_level = int(obj_name_match.group(3))
|
||||
if remote_obj_level == 0 and (remote_obj_timestamp > first_backup_ts):
|
||||
first_backup_name = object_name
|
||||
first_backup_ts = remote_obj_timestamp
|
||||
|
||||
return first_backup_name
|
||||
|
||||
|
||||
def eval_restart_backup(backup_opt_dict):
|
||||
"""
|
||||
Restart backup level if the first backup execute with always_level
|
||||
is older then restart_always_level
|
||||
"""
|
||||
|
||||
if not backup_opt_dict.restart_always_level:
|
||||
logging.info('[*] No need to set Backup {0} to level 0.'.format(
|
||||
backup_opt_dict.backup_name))
|
||||
return False
|
||||
|
||||
logging.info('[*] Checking always backup level timestamp...')
|
||||
# Compute the amount of seconds to be compared with
|
||||
# the remote backup timestamp
|
||||
max_time = int(float(backup_opt_dict.restart_always_level) * 86400)
|
||||
current_timestamp = backup_opt_dict.time_stamp
|
||||
backup_name = backup_opt_dict.backup_name
|
||||
hostname = backup_opt_dict.hostname
|
||||
# Get relative oldest backup by calling get_rel_oldes_backup()
|
||||
|
||||
remote_obj_list = get_container_content(
|
||||
backup_opt_dict.client_manager,
|
||||
backup_opt_dict.container)
|
||||
backup_opt_dict.remote_rel_oldest =\
|
||||
get_rel_oldest_backup(hostname, backup_name, remote_obj_list)
|
||||
if not backup_opt_dict.remote_rel_oldest:
|
||||
logging.info('[*] Relative oldest backup for backup name {0} on \
|
||||
host {1} not available. The backup level is NOT restarted'.format(
|
||||
backup_name, hostname))
|
||||
return False
|
||||
|
||||
obj_name_match = re.search(r'{0}_({1})_(\d+)_(\d+?)$'.format(
|
||||
hostname, backup_name), backup_opt_dict.remote_rel_oldest, re.I)
|
||||
if not obj_name_match:
|
||||
err = ('[*] No backup match available for backup {0} '
|
||||
'and host {1}'.format(backup_name, hostname))
|
||||
logging.info(err)
|
||||
return Exception(err)
|
||||
|
||||
first_backup_ts = int(obj_name_match.group(2))
|
||||
if (current_timestamp - first_backup_ts) > max_time:
|
||||
logging.info(
|
||||
'[*] Backup {0} older then {1} days. Backup level set to 0'.format(
|
||||
backup_name, backup_opt_dict.restart_always_level))
|
||||
'[*] Restore execution successfully executed \
|
||||
for backup name {0}'.format(backup.repr()))
|
||||
|
||||
return True
|
||||
else:
|
||||
logging.info('[*] No need to set level 0 for Backup {0}.'.format(
|
||||
backup_name))
|
||||
def prepare(self):
|
||||
containers = self.check_container_existence()
|
||||
if not containers[0]:
|
||||
self.swift().put_container(self.container)
|
||||
if not containers[1]:
|
||||
self.swift().put_container(self.segments)
|
||||
|
||||
return False
|
||||
def check_container_existence(self):
|
||||
"""
|
||||
Check if the provided container is already available on Swift.
|
||||
The verification is done by exact matching between the provided
|
||||
container name and the whole list of container available for the swift
|
||||
account.
|
||||
"""
|
||||
containers_list = [c['name'] for c in self.swift().get_account()[1]]
|
||||
return (self.container in containers_list,
|
||||
self.segments in containers_list)
|
||||
|
||||
def add_object(self, backup_queue, current_backup):
|
||||
"""
|
||||
Upload object on the remote swift server
|
||||
:type current_backup: SwiftBackup
|
||||
"""
|
||||
file_chunk_index, file_chunk = backup_queue.get().popitem()
|
||||
while file_chunk_index or file_chunk:
|
||||
segment_package_name = u'{0}/{1}/{2}/{3}'.format(
|
||||
current_backup.repr(), current_backup.timestamp,
|
||||
self.max_segment_size, file_chunk_index)
|
||||
self.upload_chunk(file_chunk, segment_package_name)
|
||||
file_chunk_index, file_chunk = backup_queue.get().popitem()
|
||||
|
||||
def set_backup_level(backup_opt_dict, manifest_meta_dict):
|
||||
"""
|
||||
Set the backup level params in backup_opt_dict and the swift
|
||||
manifest. This is a fundamental part of the incremental backup
|
||||
"""
|
||||
RESP_CHUNK_SIZE = 65536
|
||||
|
||||
if manifest_meta_dict.get('x-object-meta-backup-name'):
|
||||
backup_opt_dict.curr_backup_level = int(
|
||||
manifest_meta_dict.get('x-object-meta-backup-current-level'))
|
||||
max_level = manifest_meta_dict.get(
|
||||
'x-object-meta-maximum-backup-level')
|
||||
always_level = manifest_meta_dict.get(
|
||||
'x-object-meta-always-backup-level')
|
||||
restart_always_level = manifest_meta_dict.get(
|
||||
'x-object-meta-restart-always-backup')
|
||||
if max_level:
|
||||
max_level = int(max_level)
|
||||
if backup_opt_dict.curr_backup_level < max_level:
|
||||
backup_opt_dict.curr_backup_level += 1
|
||||
manifest_meta_dict['x-object-meta-backup-current-level'] = \
|
||||
str(backup_opt_dict.curr_backup_level)
|
||||
else:
|
||||
manifest_meta_dict['x-object-meta-backup-current-level'] = \
|
||||
backup_opt_dict.curr_backup_level = '0'
|
||||
elif always_level:
|
||||
always_level = int(always_level)
|
||||
if backup_opt_dict.curr_backup_level < always_level:
|
||||
backup_opt_dict.curr_backup_level += 1
|
||||
manifest_meta_dict['x-object-meta-backup-current-level'] = \
|
||||
str(backup_opt_dict.curr_backup_level)
|
||||
# If restart_always_level is set, the backup_age will be computed
|
||||
# and if the backup age in days is >= restart_always_level, then
|
||||
# backup-current-level will be set to 0
|
||||
if restart_always_level:
|
||||
backup_opt_dict.restart_always_level = restart_always_level
|
||||
if eval_restart_backup(backup_opt_dict):
|
||||
backup_opt_dict.curr_backup_level = '0'
|
||||
manifest_meta_dict['x-object-meta-backup-current-level'] \
|
||||
= '0'
|
||||
else:
|
||||
backup_opt_dict.curr_backup_level = \
|
||||
manifest_meta_dict['x-object-meta-backup-current-level'] = '0'
|
||||
def info(self):
|
||||
ordered_container = {}
|
||||
containers = self.swift().get_account()[1]
|
||||
for container in containers:
|
||||
print container
|
||||
ordered_container['container_name'] = container['name']
|
||||
size = '{0}'.format((int(container['bytes']) / 1024) / 1024)
|
||||
if size == '0':
|
||||
size = '1'
|
||||
ordered_container['size'] = '{0}MB'.format(size)
|
||||
ordered_container['objects_count'] = container['count']
|
||||
print json.dumps(
|
||||
ordered_container, indent=4,
|
||||
separators=(',', ': '), sort_keys=True)
|
||||
|
||||
return backup_opt_dict, manifest_meta_dict
|
||||
def remove_older_than(self, remove_older_timestamp, hostname_backup_name):
|
||||
"""
|
||||
Remove object in remote swift server which are
|
||||
older than the specified days or timestamp
|
||||
"""
|
||||
|
||||
backups = self.find(hostname_backup_name)
|
||||
backups = [b for b in backups if b.timestamp < remove_older_timestamp]
|
||||
for b in backups:
|
||||
self.remove_backup(b)
|
||||
|
||||
def check_backup_and_tar_meta_existence(backup_opt_dict):
|
||||
"""
|
||||
Check if any backup is already available on Swift.
|
||||
The verification is done by backup_name, which needs to be unique
|
||||
in Swift. This function will return an empty dict if no backup are
|
||||
found or the Manifest metadata if the backup_name is available
|
||||
"""
|
||||
def remove_backup(self, backup):
|
||||
"""
|
||||
Removes backup, all increments, tar_meta and segments
|
||||
:param backup:
|
||||
:return:
|
||||
"""
|
||||
for i in range(backup.latest_update.level, -1, -1):
|
||||
if i in backup.increments:
|
||||
# remove segment
|
||||
for segment in self.swift().get_container(
|
||||
self.segments,
|
||||
prefix=backup.increments[i].repr())[1]:
|
||||
self.swift().delete_object(self.segments, segment['name'])
|
||||
|
||||
if not backup_opt_dict.backup_name or not backup_opt_dict.container:
|
||||
logging.warning(
|
||||
('[*] A valid Swift container, or backup name or container '
|
||||
'content not available. Level 0 backup is being executed '))
|
||||
return dict()
|
||||
# remove tar
|
||||
for segment in self.swift().get_container(
|
||||
self.container,
|
||||
prefix=backup.increments[i].tar())[1]:
|
||||
self.swift().delete_object(self.container, segment['name'])
|
||||
|
||||
logging.info("[*] Retrieving backup name {0} on container \
|
||||
{1}".format(
|
||||
backup_opt_dict.backup_name.lower(), backup_opt_dict.container))
|
||||
# remove manifest
|
||||
for segment in self.swift().get_container(
|
||||
self.container,
|
||||
prefix=backup.increments[i].repr())[1]:
|
||||
self.swift().delete_object(self.container, segment['name'])
|
||||
|
||||
remote_obj_list = get_container_content(
|
||||
backup_opt_dict.client_manager,
|
||||
backup_opt_dict.container)
|
||||
remote_match_backup = \
|
||||
get_match_backup(backup_opt_dict.backup_name,
|
||||
backup_opt_dict.hostname,
|
||||
remote_obj_list)
|
||||
try:
|
||||
remote_newest_backup = get_newest_backup(backup_opt_dict.hostname,
|
||||
backup_opt_dict.backup_name,
|
||||
remote_match_backup)
|
||||
swift = backup_opt_dict.client_manager.get_swift()
|
||||
logging.info("[*] Backup {0} found!".format(
|
||||
backup_opt_dict.backup_name))
|
||||
backup_match = swift.head_object(
|
||||
backup_opt_dict.container, remote_newest_backup)
|
||||
def add_stream(self, stream, package_name, headers=None):
|
||||
i = 0
|
||||
|
||||
return backup_match
|
||||
except Exception:
|
||||
logging.warning("[*] No such backup {0} available... Executing \
|
||||
level 0 backup".format(backup_opt_dict.backup_name))
|
||||
return dict()
|
||||
for el in stream:
|
||||
self.upload_chunk("{0}/{1}".format(package_name, "%08d" % i), el)
|
||||
i += 1
|
||||
if not headers:
|
||||
headers = {}
|
||||
headers['X-Object-Manifest'] = u'{0}/{1}/'.format(
|
||||
self.segments, package_name)
|
||||
headers['x-object-meta-length'] = len(stream)
|
||||
|
||||
self.swift().put_object(self.container, package_name, "",
|
||||
headers=headers)
|
||||
|
||||
def get_newest_backup(hostname, backup_name, remote_match_backup):
|
||||
"""
|
||||
Return from backup_opt_dict.remote_match_backup, the newest backup
|
||||
matching the provided backup name and hostname of the node where
|
||||
freezer is executed. It correspond to the previous backup executed.
|
||||
NOTE: If backup has no tar_metadata, no newest backup is returned.
|
||||
"""
|
||||
def object_to_stream(self, write_pipe, read_pipe, obj_name):
|
||||
"""
|
||||
Take a payload downloaded from Swift
|
||||
and generate a stream to be consumed from other processes
|
||||
"""
|
||||
logging.info('[*] Downloading data stream...')
|
||||
|
||||
# Sort remote backup list using timestamp in reverse order,
|
||||
# that is from the newest to the oldest executed backup
|
||||
# Close the read pipe in this child as it is unneeded
|
||||
# and download the objects from swift in chunks. The
|
||||
# Chunk size is set by RESP_CHUNK_SIZE and sent to che write
|
||||
# pipe
|
||||
read_pipe.close()
|
||||
for obj_chunk in self.swift().get_object(
|
||||
self.container, obj_name,
|
||||
resp_chunk_size=self.RESP_CHUNK_SIZE)[1]:
|
||||
write_pipe.send_bytes(obj_chunk)
|
||||
|
||||
if not remote_match_backup:
|
||||
raise Exception("remote match backups are empty")
|
||||
sorted_backups_list = sort_backup_list(remote_match_backup)
|
||||
# Closing the pipe after checking no data
|
||||
# is still available in the pipe.
|
||||
while True:
|
||||
if not write_pipe.poll():
|
||||
write_pipe.close()
|
||||
break
|
||||
time.sleep(1)
|
||||
|
||||
for remote_obj in sorted_backups_list:
|
||||
obj_name_match = re.search(r'^{0}_({1})_(\d+)_\d+?$'.format(
|
||||
hostname, backup_name), remote_obj, re.I)
|
||||
if not obj_name_match:
|
||||
continue
|
||||
tar_metadata_obj = 'tar_metadata_{0}'.format(remote_obj)
|
||||
if tar_metadata_obj in sorted_backups_list:
|
||||
return remote_obj
|
||||
raise Exception("no tar file")
|
||||
def get_backups(self):
|
||||
"""
|
||||
:rtype: list[SwiftBackup]
|
||||
:return: list of zero level backups
|
||||
"""
|
||||
try:
|
||||
files = self.swift().get_container(self.container)[1]
|
||||
names = [x['name'] for x in files if 'name' in x]
|
||||
return self._get_backups(names)
|
||||
except Exception as error:
|
||||
raise Exception('[*] Error: get_object_list: {0}'.format(error))
|
||||
|
||||
raise Exception('not backup found')
|
||||
def get_last_backup(self, hostname_backup_name):
|
||||
"""
|
||||
|
||||
:param hostname_backup_name:
|
||||
:return: last backup or throws exception
|
||||
:rtype: freezer.swift.backup.SwiftBackup
|
||||
"""
|
||||
return max(self.find(hostname_backup_name), key=lambda b: b.timestamp)
|
||||
|
||||
def _download_tar_meta(self, backup):
|
||||
"""
|
||||
Downloads meta_data to work_dir of previous backup.
|
||||
|
||||
:param backup: A backup or increment. Current backup is incremental,
|
||||
that means we should download tar_meta for detection new files and
|
||||
changes. If backup.tar_meta is false, raise Exception
|
||||
:type backup: SwiftBackup
|
||||
:return:
|
||||
"""
|
||||
if not backup.tar_meta:
|
||||
raise ValueError('Latest update have no tar_meta')
|
||||
|
||||
utils.create_dir(self.work_dir)
|
||||
tar_meta = backup.tar()
|
||||
tar_meta_abs = "{0}/{1}".format(self.work_dir, tar_meta)
|
||||
|
||||
logging.info('[*] Downloading object {0} {1}'.format(
|
||||
tar_meta, tar_meta_abs))
|
||||
|
||||
if os.path.exists(tar_meta_abs):
|
||||
os.remove(tar_meta_abs)
|
||||
|
||||
with open(tar_meta_abs, 'ab') as obj_fd:
|
||||
iterator = self.swift().get_object(
|
||||
self.container, tar_meta, resp_chunk_size=16000000)[1]
|
||||
for obj_chunk in iterator:
|
||||
obj_fd.write(obj_chunk)
|
||||
|
||||
def _execute_tar_and_upload(self, path_to_backup, current_backup,
|
||||
tar_command):
|
||||
"""
|
||||
|
||||
:param path_to_backup:
|
||||
:type path_to_backup: str
|
||||
:param current_backup:
|
||||
:type current_backup: freezer.storage.Backup
|
||||
:param tar_command:
|
||||
:type tar_command: str
|
||||
:return:
|
||||
"""
|
||||
# Initialize a Queue for a maximum of 2 items
|
||||
tar_backup_queue = multiprocessing.Queue(maxsize=2)
|
||||
|
||||
logging.info('[*] Changing current working directory to: {0} \
|
||||
'.format(path_to_backup))
|
||||
logging.info('[*] Backup started for: {0}'.format(path_to_backup))
|
||||
|
||||
tar_backup_stream = multiprocessing.Process(
|
||||
target=tar.tar_backup, args=(path_to_backup,
|
||||
self.max_segment_size,
|
||||
tar_command,
|
||||
tar_backup_queue))
|
||||
|
||||
tar_backup_stream.daemon = True
|
||||
tar_backup_stream.start()
|
||||
|
||||
add_object_stream = multiprocessing.Process(
|
||||
target=self.add_object, args=(tar_backup_queue, current_backup))
|
||||
add_object_stream.daemon = True
|
||||
add_object_stream.start()
|
||||
|
||||
tar_backup_stream.join()
|
||||
tar_backup_queue.put(({False: False}))
|
||||
tar_backup_queue.close()
|
||||
add_object_stream.join()
|
||||
|
||||
if add_object_stream.exitcode:
|
||||
raise Exception('failed to upload object to swift server')
|
||||
|
||||
def _upload_tar_meta(self, new_backup, old_backup):
|
||||
meta_data_abs_path = os.path.join(self.work_dir, old_backup.tar())
|
||||
|
||||
# Upload swift manifest for segments
|
||||
# Request a new auth client in case the current token
|
||||
# is expired before uploading tar meta data or the swift manifest
|
||||
self.client_manager.create_swift()
|
||||
|
||||
# Upload tar incremental meta data file and remove it
|
||||
logging.info('[*] Uploading tar meta data file: {0}'.format(
|
||||
new_backup.tar()))
|
||||
with open(meta_data_abs_path, 'r') as meta_fd:
|
||||
self.swift().put_object(
|
||||
self.container, new_backup.tar(), meta_fd)
|
||||
# Removing tar meta data file, so we have only one
|
||||
# authoritative version on swift
|
||||
logging.info('[*] Removing tar meta data file: {0}'.format(
|
||||
meta_data_abs_path))
|
||||
os.remove(meta_data_abs_path)
|
||||
|
||||
def backup(self, path, hostname_backup_name, tar_builder,
|
||||
parent_backup=None):
|
||||
new_backup = self._create_backup(hostname_backup_name, parent_backup)
|
||||
|
||||
if parent_backup:
|
||||
self._download_tar_meta(parent_backup)
|
||||
tar_builder.set_listed_incremental(
|
||||
"{0}/{1}".format(self.work_dir,
|
||||
(parent_backup or new_backup).tar()))
|
||||
|
||||
self._execute_tar_and_upload(path, new_backup, tar_builder.build())
|
||||
self._upload_tar_meta(new_backup, parent_backup or new_backup)
|
||||
self.upload_manifest(new_backup.repr())
|
||||
|
165
freezer/tar.py
165
freezer/tar.py
@ -35,35 +35,34 @@ class TarCommandBuilder:
|
||||
Building a tar cmd command. To build command invoke method build.
|
||||
"""
|
||||
|
||||
def __init__(self, path):
|
||||
self.dereference = ''
|
||||
self.path = path
|
||||
self.filepath = '.'
|
||||
self.level = 0
|
||||
COMMAND_TEMPLATE = "{gnutar_path} --create -z --warning=none " \
|
||||
"--no-check-device --one-file-system --preserve-permissions " \
|
||||
"--same-owner --seek --ignore-failed-read"
|
||||
|
||||
LISTED_TEMPLATE = "{tar_command} --listed-incremental={listed_incremental}"
|
||||
|
||||
DEREFERENCE_MODE = {'soft': '--dereference',
|
||||
'hard': '--hard-dereference',
|
||||
'all': '--hard-dereference --dereference',
|
||||
'none': ''}
|
||||
|
||||
def __init__(self, gnutar_path, filepath):
|
||||
self.dereference = 'none'
|
||||
self.gnutar_path = gnutar_path
|
||||
self.exclude = None
|
||||
self.dereference_mode = {
|
||||
'soft': '--dereference',
|
||||
'hard': '--hard-dereference',
|
||||
'all': '--hard-dereference --dereference',
|
||||
'none': ''
|
||||
}
|
||||
self.dereference = ''
|
||||
self.listed_incremental = None
|
||||
self.work_dir = None
|
||||
self.exclude = ''
|
||||
self.openssl_path = None
|
||||
self.encrypt_pass_file = None
|
||||
self.output_file = None
|
||||
self.filepath = filepath
|
||||
|
||||
def set_filepath(self, path):
|
||||
self.filepath = path
|
||||
def set_output_file(self, output_file):
|
||||
self.output_file = output_file
|
||||
|
||||
def set_level(self, level):
|
||||
self.level = level
|
||||
|
||||
def set_work_dir(self, work_dir):
|
||||
self.work_dir = work_dir
|
||||
|
||||
def set_listed_incremental(self, path):
|
||||
self.listed_incremental = path
|
||||
def set_listed_incremental(self, absolute_path):
|
||||
self.listed_incremental = absolute_path
|
||||
|
||||
def set_exclude(self, exclude):
|
||||
self.exclude = exclude
|
||||
@ -76,40 +75,90 @@ class TarCommandBuilder:
|
||||
'all' dereference both.
|
||||
Default 'none'.
|
||||
"""
|
||||
if mode not in self.dereference_mode:
|
||||
raise Exception("unknown dereference mode: %s" % mode)
|
||||
self.dereference = mode
|
||||
self.dereference = self.DEREFERENCE_MODE[mode]
|
||||
|
||||
def set_encryption(self, openssl_path, encrypt_pass_file):
|
||||
self.openssl_path = openssl_path
|
||||
self.encrypt_pass_file = encrypt_pass_file
|
||||
|
||||
def build(self):
|
||||
tar_command = ' {path} --create -z --warning=none --no-check-device \
|
||||
--one-file-system --preserve-permissions --same-owner --seek \
|
||||
--ignore-failed-read {dereference}'.format(
|
||||
path=self.path,
|
||||
dereference=self.dereference_mode[self.dereference])
|
||||
tar_command = self.COMMAND_TEMPLATE.format(
|
||||
gnutar_path=self.gnutar_path, dereference=self.dereference)
|
||||
if self.dereference:
|
||||
"{0} {1}".format(tar_command, self.dereference)
|
||||
if self.listed_incremental:
|
||||
tar_command = '{tar_command} --level={level} \
|
||||
--listed-incremental={work_dir}/{listed_incremental}'.format(
|
||||
tar_command = self.LISTED_TEMPLATE.format(
|
||||
tar_command=tar_command,
|
||||
level=self.level,
|
||||
work_dir=self.work_dir,
|
||||
listed_incremental=self.listed_incremental)
|
||||
|
||||
if self.output_file:
|
||||
tar_command = "{0} --file={1}".format(tar_command,
|
||||
self.output_file)
|
||||
|
||||
if self.exclude:
|
||||
tar_command = ' {tar_command} --exclude="{exclude}" '.format(
|
||||
tar_command=tar_command,
|
||||
exclude=self.exclude)
|
||||
tar_command = '{tar_command} --exclude="{exclude}"'.format(
|
||||
tar_command=tar_command, exclude=self.exclude)
|
||||
|
||||
tar_command = '{0} {1}'.format(tar_command, self.filepath)
|
||||
|
||||
if self.encrypt_pass_file:
|
||||
openssl_cmd = "{openssl_path} enc -aes-256-cfb -pass file:{file}"\
|
||||
.format(openssl_path=self.openssl_path,
|
||||
file=self.encrypt_pass_file)
|
||||
tar_command = '{0} | {1} '.format(tar_command, openssl_cmd)
|
||||
tar_command = '{0} | {1}'.format(tar_command, openssl_cmd)
|
||||
|
||||
return ' {0} {1} '.format(tar_command, self.filepath)
|
||||
return tar_command
|
||||
|
||||
|
||||
class TarCommandRestoreBuilder:
|
||||
WINDOWS_TEMPLATE = '{0} -x -z --incremental --unlink-first ' \
|
||||
'--ignore-zeros -f - '
|
||||
DRY_RUN_TEMPLATE = '{0} -z --incremental --list ' \
|
||||
'--ignore-zeros --warning=none'
|
||||
NORMAL_TEMPLATE = '{0} -z --incremental --extract --unlink-first ' \
|
||||
'--ignore-zeros --warning=none --overwrite --directory {1}'
|
||||
|
||||
def __init__(self, tar_path, restore_path):
|
||||
self.dry_run = False
|
||||
self.is_windows = False
|
||||
self.openssl_path = None
|
||||
self.encrypt_pass_file = None
|
||||
self.tar_path = tar_path
|
||||
self.restore_path = restore_path
|
||||
self.archive = None
|
||||
|
||||
def set_dry_run(self):
|
||||
self.dry_run = True
|
||||
|
||||
def set_windows(self):
|
||||
self.is_windows = True
|
||||
|
||||
def set_encryption(self, openssl_path, encrypt_pass_file):
|
||||
self.openssl_path = openssl_path
|
||||
self.encrypt_pass_file = encrypt_pass_file
|
||||
|
||||
def set_archive(self, archive):
|
||||
self.archive = archive
|
||||
|
||||
def build(self):
|
||||
if self.is_windows:
|
||||
tar_command = self.NORMAL_TEMPLATE.format(self.tar_path)
|
||||
elif self.dry_run:
|
||||
tar_command = self.DRY_RUN_TEMPLATE.format(self.tar_path)
|
||||
else:
|
||||
tar_command = self.NORMAL_TEMPLATE.format(self.tar_path,
|
||||
self.restore_path)
|
||||
|
||||
if self.archive:
|
||||
tar_command = tar_command + " --file " + self.archive
|
||||
# Check if encryption file is provided and set the openssl decrypt
|
||||
# command accordingly
|
||||
if self.encrypt_pass_file:
|
||||
openssl_cmd = "{openssl_path} enc -aes-256-cfb -pass file:{file}"\
|
||||
.format(openssl_path=self.openssl_path,
|
||||
file=self.encrypt_pass_file)
|
||||
tar_command = '{0} | {1} '.format(openssl_cmd, tar_command)
|
||||
return tar_command
|
||||
|
||||
|
||||
def tar_restore_args_valid(backup_opt_dict):
|
||||
@ -126,41 +175,18 @@ def tar_restore_args_valid(backup_opt_dict):
|
||||
return valid_args
|
||||
|
||||
|
||||
def tar_restore(backup_opt_dict, read_pipe):
|
||||
def tar_restore(restore_abs_path, tar_command, read_pipe):
|
||||
"""
|
||||
Restore the provided file into backup_opt_dict.restore_abs_path
|
||||
Decrypt the file if backup_opt_dict.encrypt_pass_file key is provided
|
||||
"""
|
||||
|
||||
if not tar_restore_args_valid(backup_opt_dict):
|
||||
sys.exit(1)
|
||||
|
||||
if backup_opt_dict.dry_run:
|
||||
tar_cmd = ' {0} -z --incremental --list \
|
||||
--ignore-zeros --warning=none'.format(
|
||||
backup_opt_dict.tar_path)
|
||||
else:
|
||||
tar_cmd = ' {0} -z --incremental --extract \
|
||||
--unlink-first --ignore-zeros --warning=none --overwrite \
|
||||
--directory {1} '.format(
|
||||
backup_opt_dict.tar_path, backup_opt_dict.restore_abs_path)
|
||||
|
||||
if is_windows():
|
||||
# on windows, chdir to restore path.
|
||||
os.chdir(backup_opt_dict.restore_abs_path)
|
||||
tar_cmd = '{0} -x -z --incremental --unlink-first ' \
|
||||
'--ignore-zeros -f - '.format(backup_opt_dict.tar_path)
|
||||
|
||||
# Check if encryption file is provided and set the openssl decrypt
|
||||
# command accordingly
|
||||
if backup_opt_dict.encrypt_pass_file:
|
||||
openssl_cmd = " {0} enc -d -aes-256-cfb -pass file:{1}".format(
|
||||
backup_opt_dict.openssl_path,
|
||||
backup_opt_dict.encrypt_pass_file)
|
||||
tar_cmd = '{0} | {1} '.format(openssl_cmd, tar_cmd)
|
||||
os.chdir(restore_abs_path)
|
||||
|
||||
tar_cmd_proc = subprocess.Popen(
|
||||
tar_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
|
||||
tar_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE, shell=True)
|
||||
# Start loop reading the pipe and pass the data to the tar std input.
|
||||
# If EOFError exception is raised, the loop end the std err will be
|
||||
@ -179,7 +205,7 @@ def tar_restore(backup_opt_dict, read_pipe):
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def tar_backup(opt_dict, tar_command, backup_queue):
|
||||
def tar_backup(path_to_backup, max_segment_size, tar_command, backup_queue):
|
||||
"""
|
||||
Execute an incremental backup using tar options, specified as
|
||||
function arguments
|
||||
@ -189,8 +215,7 @@ def tar_backup(opt_dict, tar_command, backup_queue):
|
||||
file_chunk_index = 00000000
|
||||
tar_chunk = b''
|
||||
logging.info(
|
||||
'[*] Archiving and compressing files from {0}'.format(
|
||||
opt_dict.path_to_backup))
|
||||
'[*] Archiving and compressing files from {0}'.format(path_to_backup))
|
||||
|
||||
tar_process = subprocess.Popen(
|
||||
tar_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
||||
@ -200,7 +225,7 @@ def tar_backup(opt_dict, tar_command, backup_queue):
|
||||
for file_block in tar_process.stdout:
|
||||
tar_chunk += file_block
|
||||
file_read_limit += len(file_block)
|
||||
if file_read_limit >= opt_dict.max_segment_size:
|
||||
if file_read_limit >= max_segment_size:
|
||||
backup_queue.put(
|
||||
({("%08d" % file_chunk_index): tar_chunk}))
|
||||
file_chunk_index += 1
|
||||
@ -208,6 +233,6 @@ def tar_backup(opt_dict, tar_command, backup_queue):
|
||||
file_read_limit = 0
|
||||
|
||||
# Upload segments smaller then opt_dict.max_segment_size
|
||||
if len(tar_chunk) < opt_dict.max_segment_size:
|
||||
if len(tar_chunk) < max_segment_size:
|
||||
backup_queue.put(
|
||||
({("%08d" % file_chunk_index): tar_chunk}))
|
||||
|
114
freezer/utils.py
114
freezer/utils.py
@ -67,10 +67,10 @@ class OpenstackOptions:
|
||||
def create_from_dict(src_dict):
|
||||
try:
|
||||
return OpenstackOptions(
|
||||
user_name=src_dict['OS_USERNAME'],
|
||||
tenant_name=src_dict['OS_TENANT_NAME'],
|
||||
auth_url=src_dict['OS_AUTH_URL'],
|
||||
password=src_dict['OS_PASSWORD'],
|
||||
user_name=src_dict.get('OS_USERNAME', None),
|
||||
tenant_name=src_dict.get('OS_TENANT_NAME', None),
|
||||
auth_url=src_dict.get('OS_AUTH_URL', None),
|
||||
password=src_dict.get('OS_PASSWORD', None),
|
||||
tenant_id=src_dict.get('OS_TENANT_ID', None),
|
||||
region_name=src_dict.get('OS_REGION_NAME', None),
|
||||
endpoint_type=src_dict.get('OS_ENDPOINT_TYPE', None)
|
||||
@ -80,92 +80,6 @@ class OpenstackOptions:
|
||||
.format(e))
|
||||
|
||||
|
||||
def gen_manifest_meta(
|
||||
backup_opt_dict, manifest_meta_dict, meta_data_backup_file):
|
||||
''' This function is used to load backup metadata information on Swift.
|
||||
this is used to keep information between consecutive backup
|
||||
executions.
|
||||
If the manifest_meta_dict is available, most probably this is not
|
||||
the first backup run for the provided backup name and host.
|
||||
In this case we remove all the conflictive keys -> values from
|
||||
the dictionary.
|
||||
'''
|
||||
|
||||
if manifest_meta_dict.get('x-object-meta-tar-prev-meta-obj-name'):
|
||||
tar_meta_prev = \
|
||||
manifest_meta_dict['x-object-meta-tar-prev-meta-obj-name']
|
||||
tar_meta_to_upload = \
|
||||
manifest_meta_dict['x-object-meta-tar-meta-obj-name'] = \
|
||||
manifest_meta_dict['x-object-meta-tar-prev-meta-obj-name'] = \
|
||||
meta_data_backup_file
|
||||
else:
|
||||
manifest_meta_dict['x-object-meta-tar-prev-meta-obj-name'] = \
|
||||
meta_data_backup_file
|
||||
manifest_meta_dict['x-object-meta-backup-name'] = \
|
||||
backup_opt_dict.backup_name
|
||||
manifest_meta_dict['x-object-meta-src-file-to-backup'] = \
|
||||
backup_opt_dict.path_to_backup
|
||||
manifest_meta_dict['x-object-meta-abs-file-path'] = ''
|
||||
|
||||
# Set manifest meta if encrypt_pass_file is provided
|
||||
# The file will contain a plain password that will be used
|
||||
# to encrypt and decrypt tasks
|
||||
manifest_meta_dict['x-object-meta-encrypt-data'] = 'Yes'
|
||||
if backup_opt_dict.encrypt_pass_file is False:
|
||||
manifest_meta_dict['x-object-meta-encrypt-data'] = ''
|
||||
manifest_meta_dict['x-object-meta-always-backup-level'] = ''
|
||||
if backup_opt_dict.always_level:
|
||||
manifest_meta_dict['x-object-meta-always-backup-level'] = \
|
||||
backup_opt_dict.always_level
|
||||
|
||||
# Set manifest meta if max_level argument is provided
|
||||
# Once the incremental backup arrive to max_level, it will
|
||||
# restart from level 0
|
||||
manifest_meta_dict['x-object-meta-maximum-backup-level'] = ''
|
||||
if backup_opt_dict.max_level is not False:
|
||||
manifest_meta_dict['x-object-meta-maximum-backup-level'] = \
|
||||
str(backup_opt_dict.max_level)
|
||||
|
||||
# At the end of the execution, checks the objects ages for the
|
||||
# specified swift container. If there are object older then the
|
||||
# specified days they'll be removed.
|
||||
# Unit is int and every int and 5 == five days.
|
||||
manifest_meta_dict['x-object-meta-remove-backup-older-than-days'] = ''
|
||||
if backup_opt_dict.remove_older_than is not False:
|
||||
manifest_meta_dict['x-object-meta-remove-backup-older-than-days'] \
|
||||
= '{0}'.format(backup_opt_dict.remove_older_than)
|
||||
manifest_meta_dict['x-object-meta-hostname'] = backup_opt_dict.hostname
|
||||
manifest_meta_dict['x-object-meta-segments-size-bytes'] = \
|
||||
str(backup_opt_dict.max_segment_size)
|
||||
manifest_meta_dict['x-object-meta-backup-created-timestamp'] = \
|
||||
str(backup_opt_dict.time_stamp)
|
||||
manifest_meta_dict['x-object-meta-providers-list'] = 'HP'
|
||||
manifest_meta_dict['x-object-meta-tar-meta-obj-name'] = \
|
||||
meta_data_backup_file
|
||||
tar_meta_to_upload = tar_meta_prev = \
|
||||
manifest_meta_dict['x-object-meta-tar-meta-obj-name'] = \
|
||||
manifest_meta_dict['x-object-meta-tar-prev-meta-obj-name']
|
||||
|
||||
# Need to be processed from the last existing backup file found
|
||||
# in Swift, matching with hostname and backup name
|
||||
# the last existing file can be extracted from the timestamp
|
||||
manifest_meta_dict['x-object-meta-container-segments'] = \
|
||||
segments_name(backup_opt_dict.container)
|
||||
|
||||
# Set the restart_always_level value to n days. According
|
||||
# to the following option, when the always_level is set
|
||||
# the backup will be reset to level 0 if the current backup
|
||||
# times tamp is older then the days in x-object-meta-container-segments
|
||||
manifest_meta_dict['x-object-meta-restart-always-backup'] = ''
|
||||
if backup_opt_dict.restart_always_level is not False:
|
||||
manifest_meta_dict['x-object-meta-restart-always-backup'] = \
|
||||
backup_opt_dict.restart_always_level
|
||||
|
||||
return (
|
||||
backup_opt_dict, manifest_meta_dict,
|
||||
tar_meta_to_upload, tar_meta_prev)
|
||||
|
||||
|
||||
def validate_all_args(required_list):
|
||||
'''
|
||||
Ensure ALL the elements of required_list are True. raise ValueError
|
||||
@ -183,21 +97,6 @@ def validate_all_args(required_list):
|
||||
return True
|
||||
|
||||
|
||||
def sort_backup_list(remote_match_backup):
|
||||
"""
|
||||
Sort the backups by timestamp. The provided list contains strings in the
|
||||
format hostname_backupname_timestamp_level
|
||||
"""
|
||||
|
||||
# Remove duplicates objects
|
||||
backups_list = list(set(remote_match_backup))
|
||||
|
||||
backups_list.sort(
|
||||
key=lambda x: map(lambda y: int(y), x.rsplit('_', 2)[-2:]),
|
||||
reverse=True)
|
||||
return backups_list
|
||||
|
||||
|
||||
def create_dir(directory, do_log=True):
|
||||
'''
|
||||
Creates a directory if it doesn't exists and write the execution
|
||||
@ -291,13 +190,14 @@ def get_mount_from_path(path):
|
||||
"""
|
||||
|
||||
if not os.path.exists(path):
|
||||
logging.critical('[*] Error: provided path does not exist')
|
||||
logging.critical('[*] Error: provided path does not exist: {0}'
|
||||
.format(path))
|
||||
raise IOError
|
||||
|
||||
mount_point_path = os.path.abspath(path)
|
||||
|
||||
while not os.path.ismount(mount_point_path):
|
||||
mount_point_path = os.path.dirname(mount_point_path)
|
||||
|
||||
return mount_point_path
|
||||
|
||||
|
||||
|
20
freezer/validator.py
Normal file
20
freezer/validator.py
Normal file
@ -0,0 +1,20 @@
|
||||
class Validator:
|
||||
|
||||
@staticmethod
|
||||
def validate(conf):
|
||||
if conf.no_incremental and (conf.max_level or conf.always_level):
|
||||
raise Exception(
|
||||
'no-incremental option is not compatible '
|
||||
'with backup level options')
|
||||
|
||||
if conf.restore_abs_path and not conf.action == "restore":
|
||||
raise Exception('Restore abs path with {0} action'
|
||||
.format(conf.action))
|
||||
options = conf.options
|
||||
""":type: freezer.utils.OpenstackOptions"""
|
||||
if (conf.storage == 'swift' or conf.backup_media != 'fs') and not (
|
||||
options.password and options.user_name and options.auth_url and
|
||||
options.tenant_id):
|
||||
raise Exception("Please set up in your env:"
|
||||
"OS_USERNAME, OS_TENANT_NAME, OS_AUTH_URL,"
|
||||
"OS_PASSWORD")
|
@ -89,12 +89,11 @@ def stop_sql_server(backup_opt_dict):
|
||||
', error {0}'.format(err))
|
||||
|
||||
|
||||
def start_sql_server(backup_opt_dict):
|
||||
def start_sql_server(sql_server_instance):
|
||||
""" Start the SQL Server instance after the backup is completed """
|
||||
|
||||
with DisableFileSystemRedirection():
|
||||
cmd = 'net start "SQL Server ({0})"'\
|
||||
.format(backup_opt_dict.sql_server_instance)
|
||||
cmd = 'net start "SQL Server ({0})"'.format(sql_server_instance)
|
||||
(out, err) = create_subprocess(cmd)
|
||||
if err != '':
|
||||
raise Exception('[*] Error while starting SQL Server'
|
||||
|
@ -3,6 +3,7 @@ python-keystoneclient>=0.8.0
|
||||
python-cinderclient>=1.2.1
|
||||
python-glanceclient
|
||||
python-novaclient>=2.21.0
|
||||
python-openstackclient
|
||||
|
||||
docutils>=0.8.1
|
||||
pymysql
|
||||
|
@ -1,7 +1,8 @@
|
||||
#!/usr/bin/env python
|
||||
from mock import MagicMock
|
||||
import sys
|
||||
|
||||
from freezer.backup import backup_mode_mysql, backup_mode_fs, backup_mode_mongo
|
||||
from freezer.backup import backup_mode_mysql, backup_mode_mongo
|
||||
import freezer
|
||||
import swiftclient
|
||||
import multiprocessing
|
||||
@ -13,7 +14,7 @@ import pymongo
|
||||
import re
|
||||
from collections import OrderedDict
|
||||
from glanceclient.common.utils import IterableWithLength
|
||||
from freezer.storages.swiftstorage import SwiftStorage
|
||||
from freezer import swift
|
||||
from freezer.utils import OpenstackOptions
|
||||
|
||||
os.environ['OS_REGION_NAME'] = 'testregion'
|
||||
@ -528,6 +529,7 @@ class FakeCinderClient:
|
||||
self.volumes = FakeCinderClient.Volumes()
|
||||
self.volume_snapshots = FakeCinderClient.VolumeSnapshot
|
||||
self.backups = FakeCinderClient.Backups()
|
||||
self.restores = FakeCinderClient.Restores()
|
||||
|
||||
class Backups:
|
||||
def __init__(self):
|
||||
@ -572,6 +574,14 @@ class FakeCinderClient:
|
||||
def delete(snapshot):
|
||||
pass
|
||||
|
||||
class Restores:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def restore(backup_id):
|
||||
pass
|
||||
|
||||
|
||||
class FakeGlanceClient:
|
||||
def __init__(self):
|
||||
@ -643,7 +653,8 @@ class FakeSwiftClient:
|
||||
return [{}, []]
|
||||
|
||||
def get_account(self, *args, **kwargs):
|
||||
return True, [{'name': 'test-container',
|
||||
return [{'count': 0, 'bytes': 0, 'name': '1234'}, {'count': 4, 'bytes': 156095, 'name': 'a1'}], \
|
||||
[{'name': 'test-container',
|
||||
'bytes': 200000,
|
||||
'count': 1000},
|
||||
{'name': 'test-container-segments',
|
||||
@ -756,6 +767,7 @@ class BackupOpt1:
|
||||
self.openssl_path = 'true'
|
||||
self.always_level = '0'
|
||||
self.max_level = '0'
|
||||
self.hostname_backup_name = "hostname_backup_name"
|
||||
self.remove_older_than = '0'
|
||||
self.max_segment_size = '0'
|
||||
self.time_stamp = 123456789
|
||||
@ -790,6 +802,8 @@ class BackupOpt1:
|
||||
self.restore_from_date = '2014-12-03T23:23:23'
|
||||
self.restore_from_host = 'test-hostname'
|
||||
self.action = 'info'
|
||||
self.shadow = ''
|
||||
self.windows_volume = ''
|
||||
self.insecure = True
|
||||
self.os_auth_ver = 2
|
||||
self.dry_run = False
|
||||
@ -807,7 +821,10 @@ class BackupOpt1:
|
||||
self.client_manager.get_swift = Mock(
|
||||
return_value=FakeSwiftClient().client.Connection())
|
||||
self.client_manager.create_swift = self.client_manager.get_swift
|
||||
self.storage = SwiftStorage(self.client_manager, self.container)
|
||||
self.storage = swift.SwiftStorage(self.client_manager,
|
||||
self.container,
|
||||
self.work_dir,
|
||||
self.max_segment_size)
|
||||
self.client_manager.get_glance = Mock(return_value=FakeGlanceClient())
|
||||
self.client_manager.get_cinder = Mock(return_value=FakeCinderClient())
|
||||
nova_client = MagicMock()
|
||||
@ -1171,6 +1188,8 @@ def fake_create_subprocess(cmd):
|
||||
def fake_create_subprocess2(cmd):
|
||||
return True, 'Error'
|
||||
|
||||
def tar_path():
|
||||
return "gnutar" if sys.__dict__['platform'] == 'darwin' else "tar"
|
||||
|
||||
class FakeSys:
|
||||
|
||||
|
@ -1,7 +1,9 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from freezer.backup import backup_mode_mysql, backup_mode_fs, backup_mode_mongo
|
||||
from freezer.backup import backup_mode_mysql, backup_mode_mongo
|
||||
from freezer.backup import BackupOs
|
||||
from freezer import tar
|
||||
from freezer import local
|
||||
import freezer
|
||||
import swiftclient
|
||||
import multiprocessing
|
||||
@ -17,10 +19,10 @@ from commons import *
|
||||
|
||||
class TestBackUP:
|
||||
|
||||
def test_backup_mode_mysql(self, monkeypatch):
|
||||
def test_backup_mode_mysql(self, monkeypatch, tmpdir):
|
||||
|
||||
test_meta = dict()
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.__dict__['storage'] = local.LocalStorage(tmpdir.strpath, tmpdir.strpath)
|
||||
fakemysql = FakeMySQLdb()
|
||||
expanduser = Os()
|
||||
fakere = FakeRe()
|
||||
@ -52,30 +54,28 @@ class TestBackUP:
|
||||
monkeypatch.setattr(os.path, 'expanduser', expanduser.expanduser)
|
||||
monkeypatch.setattr(os.path, 'isdir', expanduser.isdir)
|
||||
monkeypatch.setattr(os, 'makedirs', expanduser.makedirs)
|
||||
monkeypatch.setattr(os, 'chdir', expanduser.makedirs)
|
||||
monkeypatch.setattr(os.path, 'exists', expanduser.exists)
|
||||
monkeypatch.setattr(os, 'chdir', lambda x: x)
|
||||
monkeypatch.setattr(swiftclient, 'client', fakeswiftclient.client)
|
||||
|
||||
mysql_conf = backup_opt.mysql_conf
|
||||
backup_opt.__dict__['mysql_conf'] = None
|
||||
pytest.raises(Exception, backup_mode_mysql, backup_opt, 123456789, test_meta)
|
||||
pytest.raises(Exception, backup_mode_mysql, backup_opt)
|
||||
|
||||
# Generate mysql conf test file
|
||||
backup_opt.__dict__['mysql_conf'] = mysql_conf
|
||||
with open(backup_opt.mysql_conf, 'w') as mysql_conf_fd:
|
||||
mysql_conf_fd.write('host=abcd\nport=1234\nuser=abcd\npassword=abcd\n')
|
||||
assert backup_mode_mysql(
|
||||
backup_opt, 123456789, test_meta) is None
|
||||
assert backup_mode_mysql(backup_opt) is None
|
||||
|
||||
fakemysql2 = FakeMySQLdb2()
|
||||
monkeypatch.setattr(MySQLdb, 'connect', fakemysql2.connect)
|
||||
pytest.raises(Exception, backup_mode_mysql, backup_opt, 123456789, test_meta)
|
||||
pytest.raises(Exception, backup_mode_mysql)
|
||||
os.unlink(backup_opt.mysql_conf)
|
||||
|
||||
def test_backup_mode_fs(self, monkeypatch):
|
||||
def test_backup_mode_fs(self, monkeypatch, tmpdir):
|
||||
|
||||
# Class and other settings initialization
|
||||
test_meta = dict()
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.mode = 'fs'
|
||||
expanduser = Os()
|
||||
@ -100,61 +100,27 @@ class TestBackUP:
|
||||
monkeypatch.setattr(re, 'search', fakere.search)
|
||||
monkeypatch.setattr(os.path, 'exists', expanduser.exists)
|
||||
|
||||
assert backup_mode_fs(
|
||||
backup_opt, 123456789, test_meta) is None
|
||||
storage = local.LocalStorage(tmpdir.strpath, tmpdir.strpath)
|
||||
|
||||
assert storage.backup(
|
||||
"/tmp/", "hostname_backup_name",
|
||||
tar.TarCommandBuilder(tar_path(), ".")) is None
|
||||
|
||||
backup_opt.__dict__['no_incremental'] = False
|
||||
with open(
|
||||
'/tmp/tar_metadata_test-hostname_test-backup-name_123456789_0', 'w') as fd:
|
||||
fd.write('testcontent\n')
|
||||
assert backup_mode_fs(
|
||||
backup_opt, 123456789, test_meta) is None
|
||||
assert storage.backup(
|
||||
"/tmp/", "hostname_backup_name",
|
||||
tar.TarCommandBuilder(tar_path(), ".")) is None
|
||||
|
||||
def test_backup_mode_fs_dry_run(self, monkeypatch):
|
||||
def test_backup_mode_mongo(self, monkeypatch, tmpdir):
|
||||
|
||||
# Class and other settings initialization
|
||||
test_meta = dict()
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.mode = 'fs'
|
||||
backup_opt.dry_run = True
|
||||
backup_opt.__dict__['storage'] = local.LocalStorage(tmpdir.strpath, tmpdir.strpath)
|
||||
|
||||
expanduser = Os()
|
||||
fakere = FakeRe()
|
||||
fakeswiftclient = FakeSwiftClient()
|
||||
fakelvm = Lvm()
|
||||
fakemultiprocessing = FakeMultiProcessing()
|
||||
fakemultiprocessingqueue = fakemultiprocessing.Queue()
|
||||
fakemultiprocessingpipe = fakemultiprocessing.Pipe()
|
||||
fakemultiprocessinginit = fakemultiprocessing.__init__()
|
||||
|
||||
# Monkey patch
|
||||
monkeypatch.setattr(
|
||||
multiprocessing, 'Queue', fakemultiprocessingqueue)
|
||||
monkeypatch.setattr(multiprocessing, 'Pipe', fakemultiprocessingpipe)
|
||||
monkeypatch.setattr(
|
||||
multiprocessing, 'Process', fakemultiprocessing.Process)
|
||||
monkeypatch.setattr(
|
||||
multiprocessing, '__init__', fakemultiprocessinginit)
|
||||
monkeypatch.setattr(freezer.lvm, 'lvm_eval', fakelvm.lvm_eval)
|
||||
monkeypatch.setattr(swiftclient, 'client', fakeswiftclient.client)
|
||||
monkeypatch.setattr(re, 'search', fakere.search)
|
||||
monkeypatch.setattr(os.path, 'exists', expanduser.exists)
|
||||
|
||||
assert backup_mode_fs(
|
||||
backup_opt, 123456789, test_meta) is None
|
||||
|
||||
backup_opt.__dict__['no_incremental'] = False
|
||||
with open(
|
||||
'/tmp/tar_metadata_test-hostname_test-backup-name_123456789_0', 'w') as fd:
|
||||
fd.write('testcontent\n')
|
||||
assert backup_mode_fs(
|
||||
backup_opt, 123456789, test_meta) is None
|
||||
|
||||
def test_backup_mode_mongo(self, monkeypatch):
|
||||
|
||||
# Class and other settings initialization
|
||||
test_meta = dict()
|
||||
backup_opt = BackupOpt1()
|
||||
fakemongo = FakeMongoDB()
|
||||
backup_opt.mode = 'mongo'
|
||||
fakeos = Os()
|
||||
@ -182,24 +148,19 @@ class TestBackUP:
|
||||
monkeypatch.setattr(swiftclient, 'client', fakeswiftclient.client)
|
||||
#monkeypatch.setattr(__builtin__, 'open', fakeopen.open)
|
||||
|
||||
assert backup_mode_mongo(
|
||||
backup_opt, 123456789, test_meta) is None
|
||||
assert backup_mode_mongo(backup_opt) is None
|
||||
|
||||
fakemongo2 = FakeMongoDB2()
|
||||
monkeypatch.setattr(pymongo, 'MongoClient', fakemongo2)
|
||||
assert backup_mode_mongo(
|
||||
backup_opt, 123456789, test_meta) is True
|
||||
assert backup_mode_mongo(backup_opt) is True
|
||||
|
||||
def test_backup_cinder_by_glance(self):
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.volume_id = 34
|
||||
BackupOs(backup_opt.client_manager,
|
||||
backup_opt.container).backup_cinder_by_glance(
|
||||
backup_opt, 1417649003)
|
||||
backup_opt.container,
|
||||
backup_opt.storage).backup_cinder_by_glance(34)
|
||||
|
||||
def test_backup_cinder(self):
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.volume_id = 34
|
||||
BackupOs(backup_opt.client_manager,
|
||||
backup_opt.container).backup_cinder(
|
||||
backup_opt, 1417649003)
|
||||
backup_opt.container, backup_opt.storage).backup_cinder(34)
|
||||
|
@ -22,10 +22,12 @@ Hudson (tjh@cryptsoft.com).
|
||||
"""
|
||||
|
||||
from commons import *
|
||||
from freezer import (
|
||||
swift, restore, backup, exec_cmd)
|
||||
from freezer import (restore, backup, exec_cmd)
|
||||
from freezer.job import (
|
||||
Job, InfoJob, AdminJob, BackupJob, RestoreJob, ExecJob, create_job)
|
||||
from freezer import (restore, backup)
|
||||
|
||||
from freezer.job import Job, InfoJob, AdminJob, BackupJob, RestoreJob, create_job
|
||||
import logging
|
||||
from mock import patch, Mock
|
||||
import pytest
|
||||
@ -47,7 +49,7 @@ class TestJob:
|
||||
|
||||
def test_execute(self, monkeypatch):
|
||||
self.do_monkeypatch(monkeypatch)
|
||||
job = Job({})
|
||||
job = Job(BackupOpt1())
|
||||
assert job.execute() is None
|
||||
|
||||
|
||||
@ -57,37 +59,18 @@ class TestInfoJob(TestJob):
|
||||
self.do_monkeypatch(monkeypatch)
|
||||
backup_opt = BackupOpt1()
|
||||
job = InfoJob(backup_opt)
|
||||
assert job.execute() is False
|
||||
job.execute()
|
||||
|
||||
def test_execute_list_containers(self, monkeypatch):
|
||||
self.do_monkeypatch(monkeypatch)
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.list_containers = True
|
||||
job = InfoJob(backup_opt)
|
||||
assert job.execute() is True
|
||||
|
||||
def test_execute_list_objects(self, monkeypatch):
|
||||
self.do_monkeypatch(monkeypatch)
|
||||
monkeypatch.setattr(swift, 'show_containers', self.fakeswift.fake_show_containers)
|
||||
monkeypatch.setattr(swift, 'show_objects', self.fakeswift.fake_show_objects)
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.list_objects = True
|
||||
job = InfoJob(backup_opt)
|
||||
assert job.execute() is True
|
||||
job.execute()
|
||||
|
||||
|
||||
class TestBackupJob(TestJob):
|
||||
|
||||
def test_execute_backup_fs_incremental(self, monkeypatch):
|
||||
self.do_monkeypatch(monkeypatch)
|
||||
monkeypatch.setattr(swift, 'set_backup_level', self.fakeutils.fake_set_backup_level)
|
||||
monkeypatch.setattr(backup, 'backup_mode_fs', self.fakebackup.fake_backup_mode_fs)
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.mode = 'fs'
|
||||
backup_opt.no_incremental = False
|
||||
job = BackupJob(backup_opt)
|
||||
assert job.execute() is None
|
||||
|
||||
def test_execute_backup_fs_no_incremental_and_backup_level_raise(self, monkeypatch):
|
||||
self.do_monkeypatch(monkeypatch)
|
||||
backup_opt = BackupOpt1()
|
||||
@ -98,7 +81,6 @@ class TestBackupJob(TestJob):
|
||||
|
||||
def test_execute_backup_mongo(self, monkeypatch):
|
||||
self.do_monkeypatch(monkeypatch)
|
||||
monkeypatch.setattr(swift, 'set_backup_level', self.fakeutils.fake_set_backup_level)
|
||||
monkeypatch.setattr(backup, 'backup_mode_mongo', self.fakebackup.fake_backup_mode_mongo)
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.no_incremental = False
|
||||
@ -108,7 +90,6 @@ class TestBackupJob(TestJob):
|
||||
|
||||
def test_execute_backup_mysql(self, monkeypatch):
|
||||
self.do_monkeypatch(monkeypatch)
|
||||
monkeypatch.setattr(swift, 'set_backup_level', self.fakeutils.fake_set_backup_level)
|
||||
monkeypatch.setattr(backup, 'backup_mode_mysql', self.fakebackup.fake_backup_mode_mysql)
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.no_incremental = False
|
||||
@ -118,7 +99,6 @@ class TestBackupJob(TestJob):
|
||||
|
||||
def test_execute_raise(self, monkeypatch):
|
||||
self.do_monkeypatch(monkeypatch)
|
||||
monkeypatch.setattr(swift, 'set_backup_level', self.fakeutils.fake_set_backup_level)
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.no_incremental = False
|
||||
backup_opt.mode = None
|
||||
@ -126,38 +106,9 @@ class TestBackupJob(TestJob):
|
||||
pytest.raises(ValueError, job.execute)
|
||||
|
||||
|
||||
class TestRestoreJob(TestJob):
|
||||
|
||||
def test_execute(self, monkeypatch):
|
||||
self.do_monkeypatch(monkeypatch)
|
||||
fakerestore = FakeRestore()
|
||||
monkeypatch.setattr(restore, 'restore_fs', fakerestore.fake_restore_fs)
|
||||
backup_opt = BackupOpt1()
|
||||
job = RestoreJob(backup_opt)
|
||||
assert job.execute() is None
|
||||
|
||||
def test_execute_backup_with_sync_failed(self, monkeypatch):
|
||||
self.do_monkeypatch(monkeypatch)
|
||||
monkeypatch.setattr(swift, 'set_backup_level', self.fakeutils.fake_set_backup_level)
|
||||
monkeypatch.setattr(backup, 'backup_mode_fs', self.fakebackup.fake_backup_mode_fs)
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.mode = 'fs'
|
||||
backup_opt.no_incremental = False
|
||||
job = BackupJob(backup_opt)
|
||||
fakeutils = FakeUtils()
|
||||
monkeypatch.setattr(freezer.utils, 'create_subprocess',
|
||||
fakeutils.fake_create_subprocess_err)
|
||||
assert job.execute() is None
|
||||
|
||||
monkeypatch.setattr(freezer.utils, 'create_subprocess',
|
||||
fakeutils.fake_create_subprocess_raise)
|
||||
assert job.execute() is None
|
||||
|
||||
|
||||
class TestAdminJob(TestJob):
|
||||
def test_execute(self, monkeypatch):
|
||||
self.do_monkeypatch(monkeypatch)
|
||||
monkeypatch.setattr(swift, 'remove_obj_older_than', self.fakeswift.remove_obj_older_than)
|
||||
backup_opt = BackupOpt1()
|
||||
job = AdminJob(backup_opt)
|
||||
assert job.execute() is None
|
||||
|
131
tests/test_local.py
Normal file
131
tests/test_local.py
Normal file
@ -0,0 +1,131 @@
|
||||
import tempfile
|
||||
import shutil
|
||||
import pytest
|
||||
|
||||
from freezer import local
|
||||
from freezer import tar
|
||||
from freezer import utils
|
||||
import commons
|
||||
import os
|
||||
|
||||
@pytest.mark.incremental
|
||||
class TestLocalStorage(object):
|
||||
WORK_DIR_PREFIX = "freezer_test_work_dir"
|
||||
BACKUP_DIR_PREFIX = "freezer_test_backup_dir"
|
||||
FILES_DIR_PREFIX = "freezer_test_files_dir"
|
||||
HELLO = "Hello World!\n"
|
||||
temp = True
|
||||
|
||||
def create_content(self, files_dir, file_name="file_1", text=HELLO):
|
||||
f = open(files_dir + "/" + file_name, 'w')
|
||||
f.write(text)
|
||||
f.close()
|
||||
|
||||
def create_dirs(self, tmpdir):
|
||||
tmpdir = tmpdir.strpath
|
||||
if self.temp:
|
||||
work_dir = tempfile.mkdtemp(
|
||||
dir=tmpdir, prefix=self.WORK_DIR_PREFIX)
|
||||
backup_dir = tempfile.mkdtemp(
|
||||
dir=tmpdir, prefix=self.BACKUP_DIR_PREFIX)
|
||||
files_dir = tempfile.mkdtemp(
|
||||
dir=tmpdir, prefix=self.FILES_DIR_PREFIX)
|
||||
else:
|
||||
work_dir = tmpdir + self.WORK_DIR_PREFIX
|
||||
backup_dir = tmpdir + self.BACKUP_DIR_PREFIX
|
||||
files_dir = tmpdir + self.FILES_DIR_PREFIX
|
||||
utils.create_dir(work_dir)
|
||||
utils.create_dir(backup_dir)
|
||||
utils.create_dir(files_dir)
|
||||
self.create_content(files_dir)
|
||||
return work_dir, backup_dir, files_dir
|
||||
|
||||
def remove_dirs(self, work_dir, files_dir, backup_dir):
|
||||
if self.temp:
|
||||
shutil.rmtree(work_dir)
|
||||
shutil.rmtree(files_dir)
|
||||
shutil.rmtree(backup_dir, ignore_errors=True)
|
||||
|
||||
def remove_storage(self, backup_dir):
|
||||
shutil.rmtree(backup_dir)
|
||||
|
||||
def test(self, tmpdir):
|
||||
work_dir, backup_dir, files_dir = self.create_dirs(tmpdir)
|
||||
storage = local.LocalStorage(backup_dir, work_dir)
|
||||
builder = tar.TarCommandBuilder(commons.tar_path(), ".")
|
||||
storage.backup(files_dir, "file_backup", builder)
|
||||
storage.get_backups()
|
||||
|
||||
def test_is_ready(self, tmpdir):
|
||||
work_dir, backup_dir, files_dir = self.create_dirs(tmpdir)
|
||||
storage = local.LocalStorage(backup_dir, work_dir)
|
||||
assert storage.is_ready()
|
||||
|
||||
def test_prepare(self, tmpdir):
|
||||
work_dir, backup_dir, files_dir = self.create_dirs(tmpdir)
|
||||
storage = local.LocalStorage(backup_dir, work_dir)
|
||||
assert storage.is_ready()
|
||||
self.remove_storage(backup_dir)
|
||||
assert not storage.is_ready()
|
||||
storage.prepare()
|
||||
assert storage.is_ready()
|
||||
|
||||
def test_get_backups(self, tmpdir):
|
||||
work_dir, backup_dir, files_dir = self.create_dirs(tmpdir)
|
||||
storage = local.LocalStorage(backup_dir, work_dir)
|
||||
builder = tar.TarCommandBuilder(commons.tar_path(), ".")
|
||||
storage.backup(files_dir, "file_backup", builder)
|
||||
backups = storage.get_backups()
|
||||
assert len(backups) == 1
|
||||
|
||||
def test_incremental_backup(self, tmpdir):
|
||||
work_dir, backup_dir, files_dir = self.create_dirs(tmpdir)
|
||||
storage = local.LocalStorage(backup_dir, work_dir)
|
||||
builder = tar.TarCommandBuilder(commons.tar_path(), ".")
|
||||
storage.backup(files_dir, "file_backup", builder)
|
||||
backups = storage.get_backups()
|
||||
assert len(backups) == 1
|
||||
backup = backups[0]
|
||||
self.create_content(files_dir, "file_2", "foo\n")
|
||||
storage.backup(files_dir, "file_backup", builder, backup)
|
||||
|
||||
def test_incremental_restore(self, tmpdir):
|
||||
work_dir, backup_dir, files_dir = self.create_dirs(tmpdir)
|
||||
storage = local.LocalStorage(backup_dir, work_dir)
|
||||
builder = tar.TarCommandBuilder(commons.tar_path(), ".")
|
||||
os.chdir(files_dir)
|
||||
storage.backup(files_dir, "file_backup", builder)
|
||||
backups = storage.get_backups()
|
||||
assert len(backups) == 1
|
||||
backup = backups[0]
|
||||
self.create_content(files_dir, "file_2", "foo\n")
|
||||
storage.backup(files_dir, "file_backup", builder, backup)
|
||||
for path in os.listdir(files_dir):
|
||||
os.remove(files_dir + "/" + path)
|
||||
assert not os.listdir(files_dir)
|
||||
utils.create_dir(files_dir)
|
||||
backup = storage.get_backups()[0]
|
||||
builder = tar.TarCommandRestoreBuilder(commons.tar_path(), files_dir)
|
||||
storage.restore(backup, files_dir, builder, 1)
|
||||
files = os.listdir(files_dir)
|
||||
assert len(files) == 2
|
||||
with open(files_dir + "/file_1", "r") as file_1:
|
||||
assert self.HELLO == file_1.read()
|
||||
with open(files_dir + "/file_2", "r") as file_2:
|
||||
assert "foo\n" == file_2.read()
|
||||
|
||||
def test_backup_file(self, tmpdir):
|
||||
work_dir, backup_dir, files_dir = self.create_dirs(tmpdir)
|
||||
storage = local.LocalStorage(backup_dir, work_dir)
|
||||
builder = tar.TarCommandBuilder(commons.tar_path(), "file_1")
|
||||
os.chdir(files_dir)
|
||||
storage.backup(files_dir + "/file_1", "file_backup", builder)
|
||||
for path in os.listdir(files_dir):
|
||||
os.remove(files_dir + "/" + path)
|
||||
assert not os.listdir(files_dir)
|
||||
utils.create_dir(files_dir)
|
||||
backup = storage.get_backups()[0]
|
||||
builder = tar.TarCommandRestoreBuilder(commons.tar_path(), files_dir)
|
||||
storage.restore(backup, files_dir, builder, 0)
|
||||
files = os.listdir(files_dir)
|
||||
assert len(files) == 1
|
@ -74,7 +74,6 @@ class TestLvm:
|
||||
backup_opt.lvm_snapname = False
|
||||
pytest.raises(Exception, lvm_snap, backup_opt)
|
||||
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
fakeos = Os()
|
||||
fakesubprocess = FakeSubProcess()
|
||||
|
@ -20,78 +20,25 @@ Hudson (tjh@cryptsoft.com).
|
||||
========================================================================
|
||||
|
||||
"""
|
||||
import unittest
|
||||
|
||||
from commons import *
|
||||
from freezer.restore import (
|
||||
restore_fs, restore_fs_sort_obj, RestoreOs)
|
||||
from freezer import swift
|
||||
import freezer
|
||||
import logging
|
||||
import pytest
|
||||
from freezer import restore
|
||||
import commons
|
||||
|
||||
|
||||
class TestRestore:
|
||||
|
||||
def test_restore_fs(self, monkeypatch):
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
fakelogging = FakeLogging()
|
||||
|
||||
monkeypatch.setattr(logging, 'critical', fakelogging.critical)
|
||||
monkeypatch.setattr(logging, 'warning', fakelogging.warning)
|
||||
monkeypatch.setattr(logging, 'exception', fakelogging.exception)
|
||||
monkeypatch.setattr(logging, 'error', fakelogging.error)
|
||||
monkeypatch.setattr(
|
||||
freezer.restore, 'restore_fs_sort_obj', fake_restore_fs_sort_obj)
|
||||
assert restore_fs(backup_opt) is None
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.container = None
|
||||
pytest.raises(Exception, restore_fs, backup_opt)
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.restore_from_date = None
|
||||
assert restore_fs(backup_opt) is None
|
||||
|
||||
def test_restore_fs_sort_obj(self, monkeypatch):
|
||||
|
||||
fakelogging = FakeLogging()
|
||||
# TEST 1
|
||||
backup_opt = BackupOpt1()
|
||||
fakemultiprocessing = FakeMultiProcessing()
|
||||
monkeypatch.setattr(logging, 'critical', fakelogging.critical)
|
||||
monkeypatch.setattr(logging, 'warning', fakelogging.warning)
|
||||
monkeypatch.setattr(logging, 'exception', fakelogging.exception)
|
||||
monkeypatch.setattr(logging, 'error', fakelogging.error)
|
||||
monkeypatch.setattr(multiprocessing, 'Process', fakemultiprocessing.Process)
|
||||
assert restore_fs_sort_obj(backup_opt) is None
|
||||
|
||||
# TEST 2
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.backup_name = 'abcdtest'
|
||||
monkeypatch.setattr(multiprocessing, 'Process', fakemultiprocessing.Process)
|
||||
pytest.raises(Exception, restore_fs_sort_obj, backup_opt)
|
||||
|
||||
# TEST 3
|
||||
backup_opt = BackupOpt1()
|
||||
fakemultiprocessing = FakeMultiProcessing1()
|
||||
monkeypatch.setattr(multiprocessing, 'Process', fakemultiprocessing.Process)
|
||||
pytest.raises(Exception, restore_fs_sort_obj, backup_opt)
|
||||
class TestRestore(unittest.TestCase):
|
||||
|
||||
def test_restore_cinder_by_glance(self):
|
||||
backup_opt = BackupOpt1()
|
||||
ros = RestoreOs(backup_opt.client_manager, backup_opt.container)
|
||||
ros.restore_cinder_by_glance(backup_opt.restore_from_date, 34)
|
||||
backup_opt = commons.BackupOpt1()
|
||||
ros = restore.RestoreOs(backup_opt.client_manager, backup_opt.container)
|
||||
ros.restore_cinder_by_glance(35, 34)
|
||||
|
||||
def test_restore_cinder(self):
|
||||
backup_opt = BackupOpt1()
|
||||
ros = RestoreOs(backup_opt.client_manager, backup_opt.container)
|
||||
ros.restore_cinder(backup_opt.restore_from_date, 34)
|
||||
|
||||
backup_opt = commons.BackupOpt1()
|
||||
ros = restore.RestoreOs(backup_opt.client_manager, backup_opt.container)
|
||||
ros.restore_cinder(35, 34)
|
||||
|
||||
def test_restore_nova(self):
|
||||
backup_opt = BackupOpt1()
|
||||
ros = RestoreOs(backup_opt.client_manager, backup_opt.container)
|
||||
ros.restore_nova(backup_opt.restore_from_date, 34)
|
||||
|
||||
|
||||
backup_opt = commons.BackupOpt1()
|
||||
ros = restore.RestoreOs(backup_opt.client_manager, backup_opt.container)
|
||||
ros.restore_nova(35, 34)
|
||||
|
67
tests/test_storage.py
Normal file
67
tests/test_storage.py
Normal file
@ -0,0 +1,67 @@
|
||||
import unittest
|
||||
from freezer import storage
|
||||
|
||||
|
||||
class TestBackup(unittest.TestCase):
|
||||
def test_backup_parse(self):
|
||||
self.assertRaises(ValueError, storage.Backup.parse, "asdfasdfasdf")
|
||||
backup = storage.Backup.parse("test_name_host_1234_0")
|
||||
self.assertEqual(backup.level, 0)
|
||||
self.assertEqual(backup.timestamp, 1234)
|
||||
self.assertEqual(backup.hostname_backup_name, "test_name_host")
|
||||
|
||||
def test_backup_creation(self):
|
||||
backup = storage.Backup("name", 1234, 0)
|
||||
self.assertEqual(backup.hostname_backup_name, "name")
|
||||
self.assertEqual(backup.timestamp, 1234)
|
||||
self.assertEqual(backup.level, 0)
|
||||
self.assertEqual(backup.latest_update.level, 0)
|
||||
self.assertEqual(backup.latest_update.timestamp, 1234)
|
||||
self.assertEqual(backup.latest_update.hostname_backup_name, "name")
|
||||
self.assertEqual(len(backup.increments), 1)
|
||||
|
||||
def test_backup_increment(self):
|
||||
backup = storage.Backup("name", 1234, 0)
|
||||
self.assertRaises(ValueError, backup.add_increment, backup)
|
||||
increment = storage.Backup("name", 4567, 1)
|
||||
backup.add_increment(increment)
|
||||
self.assertEqual(len(backup.increments), 2)
|
||||
|
||||
def test__find_previous_backup(self):
|
||||
backup = storage.Backup("name", 1234, 0)
|
||||
b = storage.Storage._find_previous_backup([backup], False, 2, False, 0)
|
||||
assert b == backup
|
||||
|
||||
def test__find_previous_backup_with_max_level(self):
|
||||
backup = storage.Backup("name", 1234, 0)
|
||||
i1 = storage.Backup("name", 1234, 1)
|
||||
i2 = storage.Backup("name", 1234, 2)
|
||||
backup.add_increment(i1)
|
||||
backup.add_increment(i2)
|
||||
b = storage.Storage._find_previous_backup([backup], False, 2, False, 0)
|
||||
assert not b
|
||||
|
||||
def test__find_previous_backup_with_max_level_not_reached(self):
|
||||
backup = storage.Backup("name", 1234, 0)
|
||||
i1 = storage.Backup("name", 1234, 1)
|
||||
backup.add_increment(i1)
|
||||
b = storage.Storage._find_previous_backup([backup], False, 2, False, 0)
|
||||
assert b == i1
|
||||
|
||||
def test__find_previous_backup_with_always_level_reached(self):
|
||||
backup = storage.Backup("name", 1234, 0)
|
||||
i1 = storage.Backup("name", 1234, 1)
|
||||
i2 = storage.Backup("name", 1234, 2)
|
||||
backup.add_increment(i1)
|
||||
backup.add_increment(i2)
|
||||
b = storage.Storage._find_previous_backup([backup], False, False, 2, 0)
|
||||
assert b == i1
|
||||
|
||||
def test__find_previous_backup_with_always_level_reached_2(self):
|
||||
backup = storage.Backup("name", 1234, 0)
|
||||
i1 = storage.Backup("name", 1234, 1)
|
||||
i2 = storage.Backup("name", 1234, 2)
|
||||
backup.add_increment(i1)
|
||||
backup.add_increment(i2)
|
||||
b = storage.Storage._find_previous_backup([backup], False, False, 3, 0)
|
||||
assert b == i2
|
@ -1,187 +1,80 @@
|
||||
"""Freezer swift.py related tests
|
||||
|
||||
Copyright 2014 Hewlett-Packard
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
This product includes cryptographic software written by Eric Young
|
||||
(eay@cryptsoft.com). This product includes software written by Tim
|
||||
Hudson (tjh@cryptsoft.com).
|
||||
========================================================================
|
||||
|
||||
"""
|
||||
|
||||
from commons import *
|
||||
from freezer.storages.swiftstorage import SwiftStorage
|
||||
from freezer.swift import (
|
||||
show_containers, show_objects, remove_obj_older_than,
|
||||
get_container_content, object_to_stream, _remove_object, remove_object)
|
||||
import logging
|
||||
import pytest
|
||||
import time
|
||||
import unittest
|
||||
from freezer import osclients
|
||||
from freezer import utils
|
||||
from freezer import swift
|
||||
from freezer import storage
|
||||
|
||||
|
||||
class TestSwift:
|
||||
class TestSwiftStorage(unittest.TestCase):
|
||||
|
||||
def test_show_containers(self, monkeypatch):
|
||||
def setUp(self):
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
fakelogging = FakeLogging()
|
||||
self.storage = swift.SwiftStorage(
|
||||
osclients.ClientManager(
|
||||
utils.OpenstackOptions.create_from_env()
|
||||
),
|
||||
"freezer_ops-aw1ops1-gerrit0001.aw1.hpcloud.net",
|
||||
"/tmp/",
|
||||
100
|
||||
)
|
||||
|
||||
monkeypatch.setattr(logging, 'critical', fakelogging.critical)
|
||||
monkeypatch.setattr(logging, 'warning', fakelogging.warning)
|
||||
monkeypatch.setattr(logging, 'exception', fakelogging.exception)
|
||||
monkeypatch.setattr(logging, 'error', fakelogging.error)
|
||||
self.files = [
|
||||
"tar_metadata_hostname_backup_1000_0",
|
||||
"hostname_backup_1000_0",
|
||||
]
|
||||
|
||||
backup_opt.__dict__['list_containers'] = True
|
||||
show_containers(backup_opt.containers_list)
|
||||
self.increments = [
|
||||
"tar_metadata_hostname_backup_1000_0",
|
||||
"hostname_backup_1000_0",
|
||||
"tar_metadata_hostname_backup_2000_1",
|
||||
"hostname_backup_2000_1",
|
||||
]
|
||||
|
||||
def test_show_objects(self, monkeypatch):
|
||||
self.cycles_increments = [
|
||||
"tar_metadata_hostname_backup_1000_0",
|
||||
"hostname_backup_1000_0",
|
||||
"tar_metadata_hostname_backup_2000_1",
|
||||
"hostname_backup_2000_1",
|
||||
"tar_metadata_hostname_backup_3000_0",
|
||||
"hostname_backup_3000_0",
|
||||
"tar_metadata_hostname_backup_4000_1",
|
||||
"hostname_backup_4000_1",
|
||||
]
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
fakelogging = FakeLogging()
|
||||
self.backup = storage.Backup("hostname_backup", 1000, 0, True)
|
||||
self.backup_2 = storage.Backup("hostname_backup", 3000, 0, True)
|
||||
self.increment = storage.Backup("hostname_backup", 2000, 1, True)
|
||||
self.increment_2 = storage.Backup("hostname_backup", 4000, 1, True)
|
||||
|
||||
monkeypatch.setattr(logging, 'critical', fakelogging.critical)
|
||||
monkeypatch.setattr(logging, 'warning', fakelogging.warning)
|
||||
monkeypatch.setattr(logging, 'exception', fakelogging.exception)
|
||||
monkeypatch.setattr(logging, 'error', fakelogging.error)
|
||||
def test__get_backups(self):
|
||||
backups = swift.SwiftStorage._get_backups(self.files)
|
||||
self.assertEqual(len(backups), 1)
|
||||
backup = backups[0]
|
||||
self.assertEqual(backup, self.backup)
|
||||
|
||||
backup_opt.__dict__['list_objects'] = True
|
||||
assert show_objects(backup_opt) is True
|
||||
def test__get_backups_with_tar_only(self):
|
||||
backups = swift.SwiftStorage._get_backups(
|
||||
["tar_metadata_hostname_backup_1000_0"])
|
||||
self.assertEqual(len(backups), 0)
|
||||
|
||||
backup_opt.__dict__['list_objects'] = False
|
||||
assert show_objects(backup_opt) is False
|
||||
def test__get_backups_without_tar(self):
|
||||
backups = swift.SwiftStorage._get_backups(["hostname_backup_1000_0"])
|
||||
self.assertEqual(len(backups), 1)
|
||||
self.backup.tar_meta = False
|
||||
backup = backups[0]
|
||||
self.assertEqual(backup, self.backup)
|
||||
|
||||
def test__remove_object(self, monkeypatch):
|
||||
backup_opt = BackupOpt1()
|
||||
fakelogging = FakeLogging()
|
||||
fakeclient = FakeSwiftClient()
|
||||
fakeconnector = fakeclient.client()
|
||||
fakeswclient = fakeconnector.Connection()
|
||||
backup_opt.sw_connector = fakeswclient
|
||||
faketime = FakeTime()
|
||||
def test__get_backups_increment(self):
|
||||
backups = swift.SwiftStorage._get_backups(self.increments)
|
||||
self.assertEqual(len(backups), 1)
|
||||
self.backup.add_increment(self.increment)
|
||||
backup = backups[0]
|
||||
self.assertEqual(backup, self.backup)
|
||||
|
||||
monkeypatch.setattr(logging, 'critical', fakelogging.critical)
|
||||
monkeypatch.setattr(logging, 'warning', fakelogging.warning)
|
||||
monkeypatch.setattr(logging, 'exception', fakelogging.exception)
|
||||
monkeypatch.setattr(logging, 'error', fakelogging.error)
|
||||
monkeypatch.setattr(time, 'sleep', faketime.sleep)
|
||||
|
||||
assert _remove_object(fakeswclient, 'container', 'obj_name') is None
|
||||
|
||||
fakeswclient.num_try = 59
|
||||
assert _remove_object(fakeswclient, 'container', 'obj_name') is None
|
||||
|
||||
fakeswclient.num_try = 60
|
||||
pytest.raises(Exception, _remove_object, fakeclient, 'container', 'obj_name')
|
||||
|
||||
def test_remove_object(self, monkeypatch):
|
||||
backup_opt = BackupOpt1()
|
||||
fakelogging = FakeLogging()
|
||||
fakeclient = FakeSwiftClient()
|
||||
fakeconnector = fakeclient.client()
|
||||
fakeswclient = fakeconnector.Connection()
|
||||
backup_opt.sw_connector = fakeswclient
|
||||
faketime = FakeTime()
|
||||
|
||||
monkeypatch.setattr(logging, 'critical', fakelogging.critical)
|
||||
monkeypatch.setattr(logging, 'warning', fakelogging.warning)
|
||||
monkeypatch.setattr(logging, 'exception', fakelogging.exception)
|
||||
monkeypatch.setattr(logging, 'error', fakelogging.error)
|
||||
monkeypatch.setattr(time, 'sleep', faketime.sleep)
|
||||
|
||||
assert remove_object(fakeswclient, 'freezer_segments', 'has_segments') is None
|
||||
|
||||
def test_remove_obj_older_than(self, monkeypatch):
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
fakelogging = FakeLogging()
|
||||
fakeclient = FakeSwiftClient1()
|
||||
fakeconnector = fakeclient.client()
|
||||
fakeswclient = fakeconnector.Connection()
|
||||
backup_opt.sw_connector = fakeswclient
|
||||
faketime = FakeTime()
|
||||
|
||||
monkeypatch.setattr(logging, 'critical', fakelogging.critical)
|
||||
monkeypatch.setattr(logging, 'warning', fakelogging.warning)
|
||||
monkeypatch.setattr(logging, 'exception', fakelogging.exception)
|
||||
monkeypatch.setattr(logging, 'error', fakelogging.error)
|
||||
monkeypatch.setattr(time, 'sleep', faketime.sleep)
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.__dict__['remove_older_than'] = None
|
||||
backup_opt.__dict__['remove_from_date'] = '2014-12-03T23:23:23'
|
||||
assert remove_obj_older_than(backup_opt) is None
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.__dict__['remove_older_than'] = 0
|
||||
backup_opt.__dict__['remove_from_date'] = None
|
||||
assert remove_obj_older_than(backup_opt) is None
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.__dict__['remote_obj_list'] = []
|
||||
assert remove_obj_older_than(backup_opt) is None
|
||||
|
||||
def test_get_container_content(self, monkeypatch):
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
fakelogging = FakeLogging()
|
||||
|
||||
monkeypatch.setattr(logging, 'critical', fakelogging.critical)
|
||||
monkeypatch.setattr(logging, 'warning', fakelogging.warning)
|
||||
monkeypatch.setattr(logging, 'exception', fakelogging.exception)
|
||||
monkeypatch.setattr(logging, 'error', fakelogging.error)
|
||||
|
||||
assert get_container_content(backup_opt.client_manager,
|
||||
backup_opt.container) is not False
|
||||
assert get_container_content(backup_opt.client_manager,
|
||||
backup_opt.container) is not None
|
||||
|
||||
def test_manifest_upload(self, monkeypatch):
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
fakelogging = FakeLogging()
|
||||
|
||||
monkeypatch.setattr(logging, 'critical', fakelogging.critical)
|
||||
monkeypatch.setattr(logging, 'warning', fakelogging.warning)
|
||||
monkeypatch.setattr(logging, 'exception', fakelogging.exception)
|
||||
monkeypatch.setattr(logging, 'error', fakelogging.error)
|
||||
|
||||
file_prefix = '000000'
|
||||
manifest_meta_dict = {'x-object-manifest': 'test-x-object'}
|
||||
storage = SwiftStorage(backup_opt.client_manager, backup_opt.container)
|
||||
|
||||
assert storage.upload_manifest(file_prefix, manifest_meta_dict) is None
|
||||
|
||||
manifest_meta_dict = {}
|
||||
pytest.raises(
|
||||
Exception, storage.upload_manifest,
|
||||
file_prefix, manifest_meta_dict)
|
||||
|
||||
def test_object_to_stream(self, monkeypatch):
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
fakelogging = FakeLogging()
|
||||
|
||||
monkeypatch.setattr(logging, 'critical', fakelogging.critical)
|
||||
monkeypatch.setattr(logging, 'warning', fakelogging.warning)
|
||||
monkeypatch.setattr(logging, 'exception', fakelogging.exception)
|
||||
monkeypatch.setattr(logging, 'error', fakelogging.error)
|
||||
|
||||
obj_name = 'test-obj-name'
|
||||
fakemultiprocessing = FakeMultiProcessing1()
|
||||
backup_pipe_read = backup_pipe_write = fakemultiprocessing.Pipe()
|
||||
|
||||
assert object_to_stream(
|
||||
backup_opt.container, backup_opt.client_manager,
|
||||
backup_pipe_write, backup_pipe_read, obj_name) is None
|
||||
def test__get_backups_increments(self):
|
||||
backups = swift.SwiftStorage._get_backups(self.cycles_increments)
|
||||
self.assertEqual(len(backups), 2)
|
||||
self.backup.add_increment(self.increment)
|
||||
self.backup_2.add_increment(self.increment_2)
|
||||
self.assertEqual(backups[0], self.backup)
|
||||
self.assertEqual(backups[1], self.backup_2)
|
||||
|
@ -47,14 +47,13 @@ class TestTar:
|
||||
monkeypatch.setattr(os, 'remove', fakeos.remove)
|
||||
monkeypatch.setattr(
|
||||
subprocess.Popen, 'communicate', fakesubprocesspopen.communicate)
|
||||
monkeypatch.setattr(
|
||||
subprocess, 'Popen', fakesubprocesspopen)
|
||||
monkeypatch.setattr(subprocess, 'Popen', fakesubprocesspopen)
|
||||
monkeypatch.setattr(logging, 'critical', fakelogging.critical)
|
||||
monkeypatch.setattr(logging, 'warning', fakelogging.warning)
|
||||
monkeypatch.setattr(logging, 'exception', fakelogging.exception)
|
||||
monkeypatch.setattr(logging, 'error', fakelogging.error)
|
||||
|
||||
pytest.raises(SystemExit, tar_restore, backup_opt, fakepipe)
|
||||
pytest.raises(SystemExit, tar_restore, "", "", fakepipe)
|
||||
|
||||
fakesubprocess = FakeSubProcess()
|
||||
fakesubprocesspopen = fakesubprocess.Popen()
|
||||
@ -62,16 +61,16 @@ class TestTar:
|
||||
subprocess.Popen, 'communicate', fakesubprocesspopen.communicate)
|
||||
monkeypatch.setattr(
|
||||
subprocess, 'Popen', fakesubprocesspopen)
|
||||
assert tar_restore(backup_opt, fakepipe) is None
|
||||
assert tar_restore("", "", fakepipe) is None
|
||||
|
||||
# expected_tar_cmd = 'gzip -dc | tar -xf - --unlink-first --ignore-zeros'
|
||||
monkeypatch.setattr(winutils, 'is_windows', True)
|
||||
fake_os = Os()
|
||||
monkeypatch.setattr(os, 'chdir', fake_os.chdir)
|
||||
assert tar_restore(backup_opt, fakepipe) is None
|
||||
assert tar_restore("", "", fakepipe) is None
|
||||
|
||||
monkeypatch.setattr(os, 'chdir', fake_os.chdir2)
|
||||
pytest.raises(Exception, tar_restore(backup_opt, fakepipe))
|
||||
pytest.raises(Exception, tar_restore(backup_opt, "", fakepipe))
|
||||
|
||||
def test_tar_backup(self, monkeypatch):
|
||||
|
||||
@ -92,7 +91,7 @@ class TestTar:
|
||||
monkeypatch.setattr(logging, 'error', fakelogging.error)
|
||||
|
||||
backup_opt.__dict__['max_segment_size'] = 1
|
||||
assert tar_backup(backup_opt, 'tar_command', fakebackup_queue) is not False
|
||||
assert tar_backup(backup_opt, 100, 'tar_command', fakebackup_queue) is not False
|
||||
|
||||
def test_tar_restore_args_valid(self, monkeypatch):
|
||||
|
||||
|
46
tests/test_tar_builders.py
Normal file
46
tests/test_tar_builders.py
Normal file
@ -0,0 +1,46 @@
|
||||
import unittest
|
||||
from freezer import tar
|
||||
|
||||
|
||||
class TestTarCommandBuilder(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.builder = tar.TarCommandBuilder("gnutar", ".")
|
||||
|
||||
def test_build(self):
|
||||
self.assertEquals(
|
||||
self.builder.build(),
|
||||
"gnutar --create -z --warning=none --no-check-device "
|
||||
"--one-file-system --preserve-permissions "
|
||||
"--same-owner --seek --ignore-failed-read .")
|
||||
|
||||
def test_build_listed(self):
|
||||
self.builder.set_listed_incremental("listed-file.tar")
|
||||
self.assertEquals(
|
||||
self.builder.build(),
|
||||
"gnutar --create -z --warning=none --no-check-device "
|
||||
"--one-file-system --preserve-permissions --same-owner --seek "
|
||||
"--ignore-failed-read --listed-incremental=listed-file.tar .")
|
||||
|
||||
def test_build_every_arg(self):
|
||||
self.builder.set_listed_incremental("listed-file.tar")
|
||||
self.builder.set_encryption("openssl", "encrypt_pass_file")
|
||||
self.builder.set_dereference("hard")
|
||||
self.builder.set_exclude("excluded_files")
|
||||
self.assertEquals(
|
||||
self.builder.build(),
|
||||
"gnutar --create -z --warning=none --no-check-device "
|
||||
"--one-file-system --preserve-permissions --same-owner "
|
||||
"--seek --ignore-failed-read --listed-incremental=listed-file.tar "
|
||||
"--exclude=\"excluded_files\" . | openssl enc -aes-256-cfb -pass "
|
||||
"file:encrypt_pass_file")
|
||||
|
||||
class TestTarCommandRestoreBuilder(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.builder = tar.TarCommandRestoreBuilder("gnutar", "restore_path")
|
||||
|
||||
def test(self):
|
||||
self.assertEquals(
|
||||
self.builder.build(),
|
||||
"gnutar -z --incremental --extract --unlink-first --ignore-zeros "
|
||||
"--warning=none --overwrite --directory restore_path")
|
@ -1,15 +1,10 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from freezer.utils import (
|
||||
gen_manifest_meta, validate_all_args,
|
||||
sort_backup_list, create_dir, get_vol_fs_type,
|
||||
validate_all_args,
|
||||
create_dir, get_vol_fs_type,
|
||||
get_mount_from_path, human2bytes, DateTime, date_to_timestamp)
|
||||
|
||||
from freezer.swift import (get_match_backup,
|
||||
get_newest_backup,get_rel_oldest_backup,
|
||||
eval_restart_backup, set_backup_level,
|
||||
check_backup_and_tar_meta_existence)
|
||||
from freezer import swift
|
||||
import pytest
|
||||
import datetime
|
||||
from commons import *
|
||||
@ -17,28 +12,6 @@ from commons import *
|
||||
|
||||
class TestUtils:
|
||||
|
||||
def test_gen_manifest_meta(self):
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
manifest_meta = {}
|
||||
|
||||
gen_manifest_meta(
|
||||
backup_opt, manifest_meta, meta_data_backup_file='testfile')
|
||||
|
||||
manifest_meta['x-object-meta-tar-meta-obj-name'] = 'testtar'
|
||||
gen_manifest_meta(
|
||||
backup_opt, manifest_meta, meta_data_backup_file='testfile')
|
||||
del manifest_meta['x-object-meta-tar-meta-obj-name']
|
||||
|
||||
manifest_meta['x-object-meta-tar-prev-meta-obj-name'] = 'testtar'
|
||||
gen_manifest_meta(
|
||||
backup_opt, manifest_meta, meta_data_backup_file='testfile')
|
||||
del manifest_meta['x-object-meta-tar-prev-meta-obj-name']
|
||||
|
||||
backup_opt.__dict__['encrypt_pass_file'] = False
|
||||
gen_manifest_meta(
|
||||
backup_opt, manifest_meta, meta_data_backup_file='testfile')
|
||||
|
||||
def test_validate_all_args(self):
|
||||
|
||||
elements1 = ['test1', 'test2', 'test3']
|
||||
@ -49,23 +22,6 @@ class TestUtils:
|
||||
assert validate_all_args(elements2) is False
|
||||
pytest.raises(Exception, validate_all_args, elements3)
|
||||
|
||||
def test_sort_backup_list(self):
|
||||
|
||||
sorted_backups = sort_backup_list(BackupOpt1().remote_match_backup)
|
||||
|
||||
sort_params = map(
|
||||
lambda x: map(lambda y: int(y), x.rsplit('_', 2)[-2:]),
|
||||
sorted_backups)
|
||||
|
||||
(max_time, max_level) = sort_params[0]
|
||||
|
||||
for param in sort_params:
|
||||
(backup_time, level) = param
|
||||
assert not backup_time > max_time
|
||||
assert not (backup_time == max_time and level > max_level)
|
||||
max_time = backup_time
|
||||
max_level = level
|
||||
|
||||
def test_create_dir(self, monkeypatch):
|
||||
|
||||
dir1 = '/tmp'
|
||||
@ -80,101 +36,6 @@ class TestUtils:
|
||||
monkeypatch.setattr(os, 'makedirs', fakeos.makedirs2)
|
||||
pytest.raises(Exception, create_dir, dir2)
|
||||
|
||||
def test_get_match_backup(self):
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
|
||||
assert len(get_match_backup(backup_opt.backup_name,
|
||||
backup_opt.hostname,
|
||||
[{'name': 'test-hostname_test-backup-name_1234567_0'}])) > 0
|
||||
|
||||
def test_get_newest_backup(self):
|
||||
|
||||
# no backups
|
||||
pytest.raises(Exception, get_newest_backup, "hostname", "backup", [])
|
||||
|
||||
# different hostname
|
||||
pytest.raises(Exception, get_newest_backup, "hostname", "backup",
|
||||
["notthesamename_backup_1234_12",
|
||||
"tar_metadata_hostname_backup_1234_2"])
|
||||
|
||||
# no tar file
|
||||
pytest.raises(Exception, get_newest_backup, "hostname", "backup",
|
||||
["hostname_backup_1234_2"])
|
||||
|
||||
assert get_newest_backup("hostname", "backup",
|
||||
["hostname_backup_1234_2", "tar_metadata_hostname_backup_1234_2"]) == \
|
||||
"hostname_backup_1234_2"
|
||||
|
||||
def test_get_rel_oldest_backup(self):
|
||||
remote_rel_oldest = get_rel_oldest_backup("test-hostname",
|
||||
"test-backup-name",
|
||||
[{"name": "test-hostname_test-backup-name_1234569_0"}])
|
||||
assert len(remote_rel_oldest) > 0
|
||||
|
||||
def test_eval_restart_backup(self, monkeypatch):
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
assert eval_restart_backup(backup_opt) is False
|
||||
|
||||
backup_opt.__dict__['restart_always_level'] = None
|
||||
assert eval_restart_backup(backup_opt) is False
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
assert eval_restart_backup(backup_opt) is False
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
fakere2 = FakeRe2()
|
||||
monkeypatch.setattr(re, 'search', fakere2.search)
|
||||
assert eval_restart_backup(backup_opt) is not None
|
||||
#pytest.raises(Exception, eval_restart_backup, backup_opt)
|
||||
|
||||
def test_set_backup_level(self):
|
||||
|
||||
manifest_meta = dict()
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.__dict__['no_incremental'] = False
|
||||
manifest_meta['x-object-meta-backup-name'] = True
|
||||
manifest_meta['x-object-meta-backup-current-level'] = 1
|
||||
manifest_meta['x-object-meta-always-backup-level'] = 3
|
||||
manifest_meta['x-object-meta-restart-always-backup'] = 3
|
||||
|
||||
(backup_opt, manifest_meta_dict) = set_backup_level(
|
||||
backup_opt, manifest_meta)
|
||||
assert manifest_meta['x-object-meta-backup-current-level'] is not False
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.__dict__['no_incremental'] = False
|
||||
manifest_meta['x-object-meta-maximum-backup-level'] = 2
|
||||
(backup_opt, manifest_meta_dict) = set_backup_level(
|
||||
backup_opt, manifest_meta)
|
||||
assert manifest_meta['x-object-meta-backup-current-level'] is not False
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.__dict__['no_incremental'] = False
|
||||
backup_opt.__dict__['curr_backup_level'] = 1
|
||||
(backup_opt, manifest_meta_dict) = set_backup_level(
|
||||
backup_opt, manifest_meta)
|
||||
assert manifest_meta['x-object-meta-backup-current-level'] is not False
|
||||
|
||||
manifest_meta = dict()
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.__dict__['no_incremental'] = False
|
||||
manifest_meta['x-object-meta-backup-name'] = False
|
||||
manifest_meta['x-object-meta-maximum-backup-level'] = 0
|
||||
manifest_meta['x-object-meta-backup-current-level'] = 1
|
||||
(backup_opt, manifest_meta) = set_backup_level(
|
||||
backup_opt, manifest_meta)
|
||||
assert manifest_meta['x-object-meta-backup-current-level'] == '0'
|
||||
|
||||
manifest_meta = dict()
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.__dict__['max_level'] = False
|
||||
backup_opt.__dict__['always_level'] = False
|
||||
(backup_opt, manifest_meta) = set_backup_level(
|
||||
backup_opt, manifest_meta)
|
||||
assert manifest_meta['x-object-meta-backup-current-level'] == '0'
|
||||
|
||||
def test_get_vol_fs_type(self, monkeypatch):
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
@ -189,19 +50,6 @@ class TestUtils:
|
||||
monkeypatch.setattr(re, 'search', fakere.search)
|
||||
assert type(get_vol_fs_type(backup_opt)) is str
|
||||
|
||||
def test_check_backup_existance(self, monkeypatch):
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.__dict__['backup_name'] = None
|
||||
assert type(check_backup_and_tar_meta_existence(backup_opt)) is dict
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
assert type(check_backup_and_tar_meta_existence(backup_opt)) is dict
|
||||
|
||||
fake_get_newest_backup = Fakeget_newest_backup()
|
||||
monkeypatch.setattr(swift, 'get_newest_backup', fake_get_newest_backup)
|
||||
assert type(check_backup_and_tar_meta_existence(backup_opt)) is dict
|
||||
|
||||
def test_get_mount_from_path(self):
|
||||
dir1 = '/tmp'
|
||||
dir2 = '/tmp/nonexistentpathasdf'
|
||||
@ -239,19 +87,6 @@ class TestUtils:
|
||||
assert options.region_name is None
|
||||
assert options.tenant_id is None
|
||||
|
||||
def test_OpenstackOption_creation_error_for_missing_parameter(self):
|
||||
env_dict = dict(OS_TENANT_NAME='testtenantename', OS_AUTH_URL='testauthurl', OS_PASSWORD='testpassword')
|
||||
pytest.raises(Exception, OpenstackOptions.create_from_dict, env_dict)
|
||||
|
||||
env_dict = dict(OS_USERNAME='testusername', OS_AUTH_URL='testauthurl', OS_PASSWORD='testpassword')
|
||||
pytest.raises(Exception, OpenstackOptions.create_from_dict, env_dict)
|
||||
|
||||
env_dict = dict(OS_USERNAME='testusername', OS_TENANT_NAME='testtenantename', OS_PASSWORD='testpassword')
|
||||
pytest.raises(Exception, OpenstackOptions.create_from_dict, env_dict)
|
||||
|
||||
env_dict = dict(OS_USERNAME='testusername', OS_TENANT_NAME='testtenantename', OS_AUTH_URL='testauthurl')
|
||||
pytest.raises(Exception, OpenstackOptions.create_from_dict, env_dict)
|
||||
|
||||
def test_date_to_timestamp(self):
|
||||
#ensure that timestamp is check with appropriate timezone offset
|
||||
assert (1417649003+time.timezone) == date_to_timestamp("2014-12-03T23:23:23")
|
||||
|
Loading…
x
Reference in New Issue
Block a user