Merge "Pluggable storages"
This commit is contained in:
commit
643d823cf2
@ -409,14 +409,10 @@ def backup_arguments(args_dict={}):
|
||||
|
||||
# Set additional namespace attributes
|
||||
backup_args.__dict__['remote_match_backup'] = []
|
||||
backup_args.__dict__['remote_objects'] = []
|
||||
backup_args.__dict__['remote_obj_list'] = []
|
||||
backup_args.__dict__['remote_newest_backup'] = u''
|
||||
# Set default workdir to ~/.freezer
|
||||
backup_args.__dict__['workdir'] = os.path.join(home, '.freezer')
|
||||
# Create a new namespace attribute for container_segments
|
||||
backup_args.__dict__['container_segments'] = u'{0}_segments'.format(
|
||||
backup_args.container)
|
||||
backup_args.__dict__['work_dir'] = os.path.join(home, '.freezer')
|
||||
|
||||
# The containers used by freezer to executed backups needs to have
|
||||
# freezer_ prefix in the name. If the user provider container doesn't
|
||||
@ -426,8 +422,6 @@ def backup_arguments(args_dict={}):
|
||||
if not backup_args.container.startswith('freezer_'):
|
||||
backup_args.container = 'freezer_{0}'.format(
|
||||
backup_args.container)
|
||||
backup_args.container_segments = 'freezer_{0}'.format(
|
||||
backup_args.container_segments)
|
||||
|
||||
# If hostname is not set, hostname of the current node will be used
|
||||
if not backup_args.hostname:
|
||||
|
@ -27,9 +27,8 @@ from os.path import expanduser
|
||||
import time
|
||||
|
||||
from freezer.lvm import lvm_snap, lvm_snap_remove, get_lvm_info
|
||||
from freezer.tar import tar_backup, gen_tar_command
|
||||
from freezer.swift import add_object, manifest_upload
|
||||
from freezer.utils import gen_manifest_meta, add_host_name_ts_level
|
||||
from freezer.tar import tar_backup, TarCommandBuilder
|
||||
from freezer.utils import gen_manifest_meta, create_dir
|
||||
from freezer.vss import vss_create_shadow_copy
|
||||
from freezer.vss import vss_delete_shadow_copy
|
||||
from freezer.winutils import start_sql_server
|
||||
@ -148,10 +147,9 @@ def backup_mode_mongo(backup_opt_dict, time_stamp, manifest_meta_dict):
|
||||
|
||||
class BackupOs:
|
||||
|
||||
def __init__(self, client_manager, container, container_segments):
|
||||
def __init__(self, client_manager, container):
|
||||
self.client_manager = client_manager
|
||||
self.container = container
|
||||
self.container_segments = container_segments
|
||||
|
||||
def backup_nova(self, instance_id, time_stamp):
|
||||
"""
|
||||
@ -182,7 +180,6 @@ class BackupOs:
|
||||
headers = {"x-object-meta-name": instance._info['name'],
|
||||
"x-object-meta-tenant_id": instance._info['tenant_id']}
|
||||
swift.add_stream(client_manager,
|
||||
self.container_segments,
|
||||
self.container, stream, package, headers)
|
||||
logging.info("[*] Deleting temporary image")
|
||||
glance.images.delete(image)
|
||||
@ -212,7 +209,6 @@ class BackupOs:
|
||||
logging.info("[*] Uploading image to swift")
|
||||
headers = {}
|
||||
swift.add_stream(self.client_manager,
|
||||
self.container_segments,
|
||||
self.container, stream, package, headers=headers)
|
||||
logging.info("[*] Deleting temporary snapshot")
|
||||
client_manager.clean_snapshot(snapshot)
|
||||
@ -230,8 +226,7 @@ class BackupOs:
|
||||
def backup(backup_opt_dict, time_stamp, manifest_meta_dict):
|
||||
backup_media = backup_opt_dict.backup_media
|
||||
backup_os = BackupOs(backup_opt_dict.client_manager,
|
||||
backup_opt_dict.container,
|
||||
backup_opt_dict.container_segments)
|
||||
backup_opt_dict.container)
|
||||
if backup_media == 'fs':
|
||||
backup_mode_fs(backup_opt_dict, time_stamp, manifest_meta_dict)
|
||||
elif backup_media == 'nova':
|
||||
@ -275,38 +270,94 @@ def backup_mode_fs(backup_opt_dict, time_stamp, manifest_meta_dict):
|
||||
# Generate the lvm_snap if lvm arguments are available
|
||||
lvm_snap(backup_opt_dict)
|
||||
|
||||
# Generate a string hostname, backup name, timestamp and backup level
|
||||
file_name = add_host_name_ts_level(backup_opt_dict, time_stamp)
|
||||
meta_data_backup_file = u'tar_metadata_{0}'.format(file_name)
|
||||
backup_opt_dict.meta_data_file = meta_data_backup_file
|
||||
file_name_f = u'{0}_{1}_{2}_{3}'.format(
|
||||
backup_opt_dict.hostname,
|
||||
backup_opt_dict.backup_name,
|
||||
time_stamp,
|
||||
backup_opt_dict.curr_backup_level)
|
||||
meta_data_backup_file = u'tar_metadata_{0}'.format(file_name_f)
|
||||
|
||||
# Initialize a Queue for a maximum of 2 items
|
||||
tar_backup_queue = multiprocessing.Queue(maxsize=2)
|
||||
|
||||
if is_windows():
|
||||
backup_opt_dict.absolute_path = backup_opt_dict.path_to_backup
|
||||
backup_opt_dict.path_to_backup = use_shadow(
|
||||
backup_opt_dict.path_to_backup,
|
||||
backup_opt_dict.volume)
|
||||
if backup_opt_dict.vssadmin:
|
||||
backup_opt_dict.path_to_backup = use_shadow(
|
||||
backup_opt_dict.path_to_backup,
|
||||
backup_opt_dict.windows_volume)
|
||||
|
||||
# Execute a tar gzip of the specified directory and return
|
||||
# small chunks (default 128MB), timestamp, backup, filename,
|
||||
# file chunk index and the tar meta-data file
|
||||
(backup_opt_dict, tar_command, manifest_meta_dict) = \
|
||||
gen_tar_command(opt_dict=backup_opt_dict,
|
||||
time_stamp=time_stamp,
|
||||
remote_manifest_meta=manifest_meta_dict)
|
||||
path_to_backup = backup_opt_dict.path_to_backup
|
||||
# Change che current working directory to op_dict.path_to_backup
|
||||
os.chdir(os.path.normpath(path_to_backup.strip()))
|
||||
|
||||
logging.info('[*] Changing current working directory to: {0} \
|
||||
'.format(path_to_backup))
|
||||
logging.info('[*] Backup started for: {0}'.format(path_to_backup))
|
||||
|
||||
builder = TarCommandBuilder(backup_opt_dict.tar_path)
|
||||
builder.set_dereference(backup_opt_dict.dereference_symlink)
|
||||
curr_backup_level = manifest_meta_dict.get(
|
||||
'x-object-meta-backup-current-level', '0')
|
||||
tar_meta = manifest_meta_dict.get('x-object-meta-tar-meta-obj-name')
|
||||
|
||||
if not backup_opt_dict.no_incremental:
|
||||
builder.set_level(curr_backup_level)
|
||||
builder.set_work_dir(backup_opt_dict.work_dir)
|
||||
if tar_meta:
|
||||
builder.set_listed_incremental(tar_meta)
|
||||
else:
|
||||
builder.set_listed_incremental(meta_data_backup_file)
|
||||
if backup_opt_dict.exclude:
|
||||
builder.set_exclude(backup_opt_dict.exclude)
|
||||
|
||||
# Incremental backup section
|
||||
if not backup_opt_dict.no_incremental:
|
||||
|
||||
if not os.path.exists(backup_opt_dict.path_to_backup):
|
||||
raise Exception('Error: path-to-backup does not exist')
|
||||
# Write the tar meta data file in ~/.freezer. It will be
|
||||
# removed later on. If ~/.freezer does not exists it will
|
||||
# be created'.
|
||||
create_dir(backup_opt_dict.work_dir)
|
||||
|
||||
if tar_meta:
|
||||
sw_connector = backup_opt_dict.client_manager.get_swift()
|
||||
tar_meta_abs = "{0}/{1}".format(backup_opt_dict.work_dir,
|
||||
tar_meta)
|
||||
|
||||
file_name = tar_meta_abs.split('/')[-1]
|
||||
logging.info('[*] Downloading object {0} on {1}'.format(
|
||||
file_name, tar_meta_abs))
|
||||
|
||||
if os.path.exists(tar_meta_abs):
|
||||
os.remove(tar_meta_abs)
|
||||
|
||||
with open(tar_meta_abs, 'ab') as obj_fd:
|
||||
for obj_chunk in sw_connector.get_object(
|
||||
backup_opt_dict.container, file_name,
|
||||
resp_chunk_size=16000000)[1]:
|
||||
obj_fd.write(obj_chunk)
|
||||
|
||||
# Encrypt data if passfile is provided
|
||||
if backup_opt_dict.encrypt_pass_file:
|
||||
builder.set_encryption(
|
||||
backup_opt_dict.openssl_path,
|
||||
backup_opt_dict.encrypt_pass_file)
|
||||
|
||||
tar_backup_stream = multiprocessing.Process(
|
||||
target=tar_backup, args=(
|
||||
backup_opt_dict, tar_command, tar_backup_queue,))
|
||||
backup_opt_dict, builder.build(), tar_backup_queue,))
|
||||
|
||||
tar_backup_stream.daemon = True
|
||||
tar_backup_stream.start()
|
||||
|
||||
add_object = backup_opt_dict.storage.add_object
|
||||
|
||||
add_object_stream = multiprocessing.Process(
|
||||
target=add_object, args=(
|
||||
backup_opt_dict, tar_backup_queue, file_name, time_stamp))
|
||||
backup_opt_dict.max_segment_size, tar_backup_queue,
|
||||
file_name_f, time_stamp))
|
||||
add_object_stream.daemon = True
|
||||
add_object_stream.start()
|
||||
|
||||
@ -322,8 +373,7 @@ def backup_mode_fs(backup_opt_dict, time_stamp, manifest_meta_dict):
|
||||
tar_meta_prev) = gen_manifest_meta(
|
||||
backup_opt_dict, manifest_meta_dict, meta_data_backup_file)
|
||||
|
||||
manifest_file = u''
|
||||
meta_data_abs_path = os.path.join(backup_opt_dict.workdir,
|
||||
meta_data_abs_path = os.path.join(backup_opt_dict.work_dir,
|
||||
tar_meta_prev)
|
||||
|
||||
client_manager = backup_opt_dict.client_manager
|
||||
@ -345,9 +395,8 @@ def backup_mode_fs(backup_opt_dict, time_stamp, manifest_meta_dict):
|
||||
logging.info('[*] Removing tar meta data file: {0}'.format(
|
||||
meta_data_abs_path))
|
||||
os.remove(meta_data_abs_path)
|
||||
# Upload manifest to swift
|
||||
manifest_upload(
|
||||
manifest_file, backup_opt_dict, file_name, manifest_meta_dict)
|
||||
backup_opt_dict.storage.upload_manifest(file_name_f,
|
||||
manifest_meta_dict)
|
||||
|
||||
finally:
|
||||
if is_windows():
|
||||
|
@ -23,7 +23,6 @@ from freezer import swift
|
||||
from freezer import utils
|
||||
from freezer import backup
|
||||
from freezer import restore
|
||||
from freezer.osclients import ClientManager
|
||||
|
||||
import logging
|
||||
from freezer.restore import RestoreOs
|
||||
@ -44,16 +43,12 @@ class Job:
|
||||
logging.info('[*] Job execution Started at: {0}'.
|
||||
format(self.start_time))
|
||||
|
||||
if not hasattr(self.conf, 'client_manager'):
|
||||
self.conf.client_manager = ClientManager(
|
||||
self.conf.options,
|
||||
self.conf.insecure,
|
||||
self.conf.download_limit,
|
||||
self.conf.upload_limit,
|
||||
self.conf.os_auth_ver,
|
||||
self.conf.dry_run
|
||||
)
|
||||
self.conf = swift.get_containers_list(self.conf)
|
||||
try:
|
||||
sw_connector = self.conf.client_manager.get_swift()
|
||||
self.conf.containers_list = sw_connector.get_account()[1]
|
||||
except Exception as error:
|
||||
raise Exception('Get containers list error: {0}'.format(error))
|
||||
|
||||
retval = func(self)
|
||||
|
||||
end_time = utils.DateTime.now()
|
||||
@ -69,15 +64,13 @@ class InfoJob(Job):
|
||||
@Job.executemethod
|
||||
def execute(self):
|
||||
if self.conf.list_containers:
|
||||
swift.show_containers(self.conf)
|
||||
swift.show_containers(self.conf.containers_list)
|
||||
elif self.conf.list_objects:
|
||||
containers = swift.check_container_existance(self.conf)
|
||||
if containers['main_container'] is not True:
|
||||
if not self.conf.storage.ready():
|
||||
logging.critical(
|
||||
'[*] Container {0} not available'.format(
|
||||
self.conf.container))
|
||||
return False
|
||||
self.conf = swift.get_container_content(self.conf)
|
||||
swift.show_objects(self.conf)
|
||||
else:
|
||||
logging.warning(
|
||||
@ -89,11 +82,7 @@ class InfoJob(Job):
|
||||
class BackupJob(Job):
|
||||
@Job.executemethod
|
||||
def execute(self):
|
||||
containers = swift.check_container_existance(self.conf)
|
||||
|
||||
if containers['main_container'] is not True:
|
||||
swift.create_containers(self.conf)
|
||||
|
||||
self.conf.storage.prepare()
|
||||
if self.conf.no_incremental:
|
||||
if self.conf.max_level or \
|
||||
self.conf.always_level:
|
||||
@ -102,16 +91,12 @@ class BackupJob(Job):
|
||||
'with backup level options')
|
||||
manifest_meta_dict = {}
|
||||
else:
|
||||
# Get the object list of the remote containers
|
||||
# and store it in self.conf.remote_obj_list
|
||||
self.conf = swift.get_container_content(self.conf)
|
||||
|
||||
# Check if a backup exist in swift with same name.
|
||||
# If not, set backup level to 0
|
||||
manifest_meta_dict =\
|
||||
utils.check_backup_and_tar_meta_existence(self.conf)
|
||||
swift.check_backup_and_tar_meta_existence(self.conf)
|
||||
|
||||
(self.conf, manifest_meta_dict) = utils.set_backup_level(
|
||||
(self.conf, manifest_meta_dict) = swift.set_backup_level(
|
||||
self.conf, manifest_meta_dict)
|
||||
|
||||
self.conf.manifest_meta_dict = manifest_meta_dict
|
||||
@ -136,16 +121,13 @@ class RestoreJob(Job):
|
||||
def execute(self):
|
||||
logging.info('[*] Executing FS restore...')
|
||||
|
||||
# Check if the provided container already exists in swift.
|
||||
containers = swift.check_container_existance(self.conf)
|
||||
if containers['main_container'] is not True:
|
||||
if not self.conf.storage.ready():
|
||||
raise ValueError('Container: {0} not found. Please provide an '
|
||||
'existing container.'
|
||||
.format(self.conf.container))
|
||||
|
||||
# Get the object list of the remote containers and store it in the
|
||||
# same dict passes as argument under the dict.remote_obj_list namespace
|
||||
self.conf = swift.get_container_content(self.conf)
|
||||
res = RestoreOs(self.conf.client_manager, self.conf.container)
|
||||
restore_from_date = self.conf.restore_from_date
|
||||
backup_media = self.conf.backup_media
|
||||
@ -164,7 +146,6 @@ class RestoreJob(Job):
|
||||
class AdminJob(Job):
|
||||
@Job.executemethod
|
||||
def execute(self):
|
||||
self.conf = swift.get_container_content(self.conf)
|
||||
swift.remove_obj_older_than(self.conf)
|
||||
|
||||
|
||||
|
@ -86,7 +86,7 @@ def lvm_snap_remove(backup_opt_dict):
|
||||
.format(mount_point, mount_err))
|
||||
else:
|
||||
# Change working directory to be able to unmount
|
||||
os.chdir(backup_opt_dict.workdir)
|
||||
os.chdir(backup_opt_dict.work_dir)
|
||||
logging.info('[*] Volume {0} unmounted'.format(
|
||||
mapper_snap_vol))
|
||||
snap_rm_proc = subprocess.Popen(
|
||||
|
@ -20,9 +20,12 @@ Hudson (tjh@cryptsoft.com).
|
||||
|
||||
Freezer main execution function
|
||||
"""
|
||||
from freezer.bandwidth import monkeypatch_socket_bandwidth
|
||||
|
||||
from freezer import job
|
||||
from freezer.arguments import backup_arguments
|
||||
from freezer.osclients import ClientManager
|
||||
from freezer.storages.swiftstorage import SwiftStorage
|
||||
from freezer.utils import create_dir
|
||||
import os
|
||||
import subprocess
|
||||
@ -114,6 +117,17 @@ def freezer_main(args={}):
|
||||
if backup_args.max_priority:
|
||||
set_max_process_priority()
|
||||
|
||||
monkeypatch_socket_bandwidth(backup_args)
|
||||
|
||||
backup_args.__dict__['client_manager'] = ClientManager(
|
||||
backup_args.options,
|
||||
backup_args.insecure,
|
||||
backup_args.os_auth_ver,
|
||||
backup_args.dry_run)
|
||||
|
||||
backup_args.__dict__['storage'] = SwiftStorage(backup_args.client_manager,
|
||||
backup_args.container)
|
||||
|
||||
freezer_job = job.create_job(backup_args)
|
||||
freezer_job.execute()
|
||||
|
||||
|
@ -1,27 +1,33 @@
|
||||
from bandwidth import monkeypatch_bandwidth
|
||||
from utils import Bunch
|
||||
import logging
|
||||
import time
|
||||
|
||||
from cinderclient.v1 import client as cclient
|
||||
from glanceclient.v1 import client as gclient
|
||||
from novaclient.v2 import client as nclient
|
||||
import swiftclient
|
||||
|
||||
from utils import Bunch
|
||||
from utils import ReSizeStream
|
||||
|
||||
|
||||
class ClientManager:
|
||||
"""
|
||||
:type swift: swiftclient.Connection
|
||||
:type glance: glanceclient.v1.client.Client
|
||||
:type nova: novaclient.v2.client.Client
|
||||
:type cinder: cinderclient.v1.client.Client
|
||||
"""
|
||||
def __init__(self, options, insecure=True,
|
||||
download_bytes_per_sec=-1, upload_bytes_per_sec=-1,
|
||||
swift_auth_version=2, dry_run=False):
|
||||
"""
|
||||
Creates manager of connections to swift, nova, glance and cinder
|
||||
:param options: OpenstackOptions
|
||||
:param insecure:
|
||||
:param download_bytes_per_sec: information about bandwidth throttling
|
||||
:param upload_bytes_per_sec: information about bandwidth throttling
|
||||
:param swift_auth_version:
|
||||
:param dry_run:
|
||||
:return:
|
||||
"""
|
||||
self.options = options
|
||||
self.download_bytes_per_sec = download_bytes_per_sec
|
||||
self.upload_bytes_per_sec = upload_bytes_per_sec
|
||||
self.insecure = insecure
|
||||
self.swift_auth_version = swift_auth_version
|
||||
self.dry_run = dry_run
|
||||
@ -30,26 +36,38 @@ class ClientManager:
|
||||
self.glance = None
|
||||
self.nova = None
|
||||
|
||||
def _monkey_patch(self):
|
||||
monkeypatch_bandwidth(self.download_bytes_per_sec,
|
||||
self.upload_bytes_per_sec)
|
||||
|
||||
def get_cinder(self):
|
||||
"""
|
||||
:rtype cinderclient.v1.client.Client
|
||||
:return:
|
||||
"""
|
||||
if not self.cinder:
|
||||
self.create_cinder()
|
||||
return self.cinder
|
||||
|
||||
def get_swift(self):
|
||||
"""
|
||||
:rtype swiftclient.Connection
|
||||
:return: instance of swift client
|
||||
"""
|
||||
if not self.swift:
|
||||
self.create_swift()
|
||||
return self.swift
|
||||
|
||||
def get_glance(self):
|
||||
"""
|
||||
:rtype glanceclient.v1.client.Client
|
||||
:return:
|
||||
"""
|
||||
if not self.glance:
|
||||
self.create_glance()
|
||||
return self.glance
|
||||
|
||||
def get_nova(self):
|
||||
"""
|
||||
:rtype
|
||||
:return:
|
||||
"""
|
||||
if not self.nova:
|
||||
self.create_nova()
|
||||
return self.nova
|
||||
@ -57,13 +75,12 @@ class ClientManager:
|
||||
def create_cinder(self):
|
||||
"""
|
||||
Creates client for cinder and caches it
|
||||
:return:
|
||||
:rtype cinderclient.v1.client.Client
|
||||
:return: instance of cinder client
|
||||
"""
|
||||
from cinderclient.v1 import client
|
||||
self._monkey_patch()
|
||||
options = self.options
|
||||
logging.info("[*] Creation of cinder client")
|
||||
self.cinder = client.Client(
|
||||
self.cinder = cclient.Client(
|
||||
username=options.user_name,
|
||||
api_key=options.password,
|
||||
project_id=options.tenant_name,
|
||||
@ -76,10 +93,9 @@ class ClientManager:
|
||||
def create_swift(self):
|
||||
"""
|
||||
Creates client for swift and caches it
|
||||
:return:
|
||||
:rtype swiftclient.Connection
|
||||
:return: instance of swift client
|
||||
"""
|
||||
import swiftclient
|
||||
self._monkey_patch()
|
||||
options = self.options
|
||||
logging.info("[*] Creation of swift client")
|
||||
|
||||
@ -98,12 +114,12 @@ class ClientManager:
|
||||
def create_glance(self):
|
||||
"""
|
||||
Creates client for glance and caches it
|
||||
:return:
|
||||
:rtype glanceclient.v1.client.Client
|
||||
:return: instance of glance client
|
||||
"""
|
||||
from glanceclient.v1 import client
|
||||
|
||||
from glanceclient.shell import OpenStackImagesShell
|
||||
|
||||
self._monkey_patch()
|
||||
options = self.options
|
||||
|
||||
logging.info("[*] Creation of glance client")
|
||||
@ -116,7 +132,7 @@ class ClientManager:
|
||||
os_region_name=options.region_name,
|
||||
force_auth=False))
|
||||
|
||||
self.glance = client.Client(endpoint=endpoint, token=token)
|
||||
self.glance = gclient.Client(endpoint=endpoint, token=token)
|
||||
return self.glance
|
||||
|
||||
def create_nova(self):
|
||||
@ -124,13 +140,10 @@ class ClientManager:
|
||||
Creates client for nova and caches it
|
||||
:return:
|
||||
"""
|
||||
from novaclient.v2 import client
|
||||
|
||||
self._monkey_patch()
|
||||
options = self.options
|
||||
logging.info("[*] Creation of nova client")
|
||||
|
||||
self.nova = client.Client(
|
||||
self.nova = nclient.Client(
|
||||
username=options.user_name,
|
||||
api_key=options.password,
|
||||
project_id=options.tenant_name,
|
||||
|
@ -1,4 +1,4 @@
|
||||
'''
|
||||
"""
|
||||
Copyright 2014 Hewlett-Packard
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -19,7 +19,7 @@ Hudson (tjh@cryptsoft.com).
|
||||
========================================================================
|
||||
|
||||
Freezer restore modes related functions
|
||||
'''
|
||||
"""
|
||||
|
||||
import multiprocessing
|
||||
import os
|
||||
@ -28,13 +28,13 @@ import re
|
||||
import datetime
|
||||
|
||||
from freezer.tar import tar_restore
|
||||
from freezer.swift import object_to_stream
|
||||
from freezer.utils import (validate_all_args, get_match_backup,
|
||||
sort_backup_list, date_to_timestamp, ReSizeStream)
|
||||
from freezer import swift
|
||||
from freezer.utils import (validate_all_args, sort_backup_list,
|
||||
date_to_timestamp, ReSizeStream)
|
||||
|
||||
|
||||
def restore_fs(backup_opt_dict):
|
||||
'''
|
||||
"""
|
||||
Restore data from swift server to your local node. Data will be restored
|
||||
in the directory specified in backup_opt_dict.restore_abs_path. The
|
||||
object specified with the --get-object option will be downloaded from
|
||||
@ -46,12 +46,11 @@ def restore_fs(backup_opt_dict):
|
||||
the full restore will be executed. Please remember to stop any service
|
||||
that require access to the data before to start the restore execution
|
||||
and to start the service at the end of the restore execution
|
||||
'''
|
||||
"""
|
||||
|
||||
# List of mandatory values
|
||||
required_list = [
|
||||
os.path.exists(backup_opt_dict.restore_abs_path),
|
||||
backup_opt_dict.remote_obj_list,
|
||||
backup_opt_dict.container,
|
||||
backup_opt_dict.backup_name
|
||||
]
|
||||
@ -76,31 +75,33 @@ def restore_fs(backup_opt_dict):
|
||||
backup_opt_dict.hostname = backup_opt_dict.restore_from_host
|
||||
|
||||
# Check if there's a backup matching. If not raise Exception
|
||||
backup_opt_dict = get_match_backup(backup_opt_dict)
|
||||
if not backup_opt_dict.remote_match_backup:
|
||||
raise ValueError('No backup found matching for '
|
||||
'backup name: {0}, hostname: {1}'
|
||||
.format(backup_opt_dict.backup_name,
|
||||
backup_opt_dict.hostname))
|
||||
remote_obj_list = swift.get_container_content(
|
||||
backup_opt_dict.client_manager,
|
||||
backup_opt_dict.container)
|
||||
|
||||
backup_opt_dict.remote_match_backup = \
|
||||
swift.get_match_backup(backup_opt_dict.backup_name,
|
||||
backup_opt_dict.hostname,
|
||||
remote_obj_list)
|
||||
restore_fs_sort_obj(backup_opt_dict)
|
||||
|
||||
|
||||
def restore_fs_sort_obj(backup_opt_dict):
|
||||
'''
|
||||
"""
|
||||
Take options dict as argument and sort/remove duplicate elements from
|
||||
backup_opt_dict.remote_match_backup and find the closes backup to the
|
||||
provided from backup_opt_dict.restore_from_date. Once the objects are
|
||||
looped backwards and the level 0 backup is found, along with the other
|
||||
level 1,2,n, is download the object from swift and untar them locally
|
||||
starting from level 0 to level N.
|
||||
'''
|
||||
"""
|
||||
|
||||
# Convert backup_opt_dict.restore_from_date to timestamp
|
||||
opt_backup_timestamp = date_to_timestamp(backup_opt_dict.restore_from_date)
|
||||
|
||||
# Sort remote backup list using timestamp in reverse order,
|
||||
# that is from the newest to the oldest executed backup
|
||||
sorted_backups_list = sort_backup_list(backup_opt_dict)
|
||||
sorted_backups_list = sort_backup_list(backup_opt_dict.remote_match_backup)
|
||||
# Get the closest earlier backup to date set in
|
||||
# backup_opt_dict.restore_from_date
|
||||
closest_backup_list = []
|
||||
@ -132,8 +133,9 @@ def restore_fs_sort_obj(backup_opt_dict):
|
||||
for backup in closest_backup_list[::-1]:
|
||||
write_pipe, read_pipe = multiprocessing.Pipe()
|
||||
process_stream = multiprocessing.Process(
|
||||
target=object_to_stream, args=(
|
||||
backup_opt_dict, write_pipe, read_pipe, backup,))
|
||||
target=swift.object_to_stream, args=(
|
||||
backup_opt_dict.container, backup_opt_dict.client_manager,
|
||||
write_pipe, read_pipe, backup,))
|
||||
process_stream.daemon = True
|
||||
process_stream.start()
|
||||
|
||||
|
@ -28,6 +28,6 @@ $s2 = gwmi Win32_ShadowCopy | ? { $_.ID -eq $s1.ShadowID }
|
||||
$d = $s2.DeviceObject + "\"
|
||||
|
||||
# create a symlink for the shadow path
|
||||
cmd /c mklink /d $volume\shadowcopy "$d"
|
||||
cmd /c mklink /d $volume\freezer_shadowcopy "$d"
|
||||
|
||||
echo "shadow id:" $s2
|
0
freezer/storages/__init__.py
Normal file
0
freezer/storages/__init__.py
Normal file
34
freezer/storages/storage.py
Normal file
34
freezer/storages/storage.py
Normal file
@ -0,0 +1,34 @@
|
||||
class AbstractStorage(object):
|
||||
|
||||
def upload_manifest(self, name, meta_dict):
|
||||
"""
|
||||
Manifest can be different for different types of storage.
|
||||
|
||||
Each storage should have idea how to work with data.
|
||||
|
||||
For example:
|
||||
Swift can create an empty file with metainformation or can
|
||||
create file with json (For example amount of information exceeds
|
||||
256 bytes (limit for metadata in Swift).
|
||||
|
||||
FileSystem can create a file with information about descriptions,
|
||||
authors and etc.
|
||||
|
||||
Amazon S3 can keep this information in its own manner.
|
||||
|
||||
:param name: Name of manifest file
|
||||
:type name: str
|
||||
:param meta_dict: Dict with metainformation
|
||||
:type meta_dict: dict
|
||||
:return:
|
||||
"""
|
||||
raise NotImplementedError("Should have implemented this")
|
||||
|
||||
def upload_chunk(self, content, path):
|
||||
raise NotImplementedError("Should have implemented this")
|
||||
|
||||
def prepare(self):
|
||||
raise NotImplementedError("Should have implemented this")
|
||||
|
||||
def ready(self):
|
||||
raise NotImplementedError("Should have implemented this")
|
117
freezer/storages/swiftstorage.py
Normal file
117
freezer/storages/swiftstorage.py
Normal file
@ -0,0 +1,117 @@
|
||||
import logging
|
||||
import time
|
||||
from copy import deepcopy
|
||||
from freezer.storages.storage import AbstractStorage
|
||||
from freezer.utils import segments_name
|
||||
|
||||
|
||||
class SwiftStorage(AbstractStorage):
|
||||
"""
|
||||
:type client_manager: freezer.osclients.ClientManager
|
||||
"""
|
||||
|
||||
def __init__(self, client_manager, container):
|
||||
"""
|
||||
:type client_manager: freezer.osclients.ClientManager
|
||||
:type container: str
|
||||
"""
|
||||
self.client_manager = client_manager
|
||||
self.container = container
|
||||
self.segments = segments_name(container)
|
||||
|
||||
def upload_chunk(self, content, path):
|
||||
"""
|
||||
"""
|
||||
# If for some reason the swift client object is not available anymore
|
||||
# an exception is generated and a new client object is initialized/
|
||||
# If the exception happens for 10 consecutive times for a total of
|
||||
# 1 hour, then the program will exit with an Exception.
|
||||
|
||||
count = 0
|
||||
success = False
|
||||
while not success:
|
||||
try:
|
||||
logging.info(
|
||||
'[*] Uploading file chunk index: {0}'.format(path))
|
||||
self.client_manager.get_swift().put_object(
|
||||
self.segments, path, content,
|
||||
content_type='application/octet-stream',
|
||||
content_length=len(content))
|
||||
logging.info('[*] Data successfully uploaded!')
|
||||
success = True
|
||||
except Exception as error:
|
||||
logging.info(
|
||||
'[*] Retrying to upload file chunk index: {0}'.format(
|
||||
path))
|
||||
time.sleep(60)
|
||||
self.client_manager.create_swift()
|
||||
count += 1
|
||||
if count == 10:
|
||||
logging.critical('[*] Error: add_object: {0}'
|
||||
.format(error))
|
||||
raise Exception("cannot add object to storage")
|
||||
|
||||
def upload_manifest(self, name, manifest_meta_dict):
|
||||
"""
|
||||
Upload Manifest to manage segments in Swift
|
||||
|
||||
:param name: Name of manifest file
|
||||
:type name: str
|
||||
:param manifest_meta_dict: Dict with metainformation
|
||||
:type manifest_meta_dict: dict
|
||||
"""
|
||||
|
||||
if not manifest_meta_dict:
|
||||
raise Exception('Manifest Meta dictionary not available')
|
||||
|
||||
sw = self.client_manager.get_swift()
|
||||
self.client_manager.get_nova()
|
||||
tmp_manifest_meta = dict()
|
||||
for key, value in manifest_meta_dict.items():
|
||||
if key.startswith('x-object-meta'):
|
||||
tmp_manifest_meta[key] = value
|
||||
manifest_meta_dict = deepcopy(tmp_manifest_meta)
|
||||
header = manifest_meta_dict
|
||||
manifest_meta_dict['x-object-manifest'] = u'{0}/{1}'.format(
|
||||
self.segments, name.strip())
|
||||
logging.info('[*] Uploading Swift Manifest: {0}'.format(header))
|
||||
sw.put_object(container=self.container, obj=name,
|
||||
contents=u'', headers=header)
|
||||
logging.info('[*] Manifest successfully uploaded!')
|
||||
|
||||
def ready(self):
|
||||
return self.check_container_existence()[0]
|
||||
|
||||
def prepare(self):
|
||||
containers = self.check_container_existence()
|
||||
if not containers[0]:
|
||||
self.client_manager.get_swift().put_container(self.container)
|
||||
if not containers[1]:
|
||||
self.client_manager.get_swift().put_container(
|
||||
segments_name(self.container))
|
||||
|
||||
def check_container_existence(self):
|
||||
"""
|
||||
Check if the provided container is already available on Swift.
|
||||
The verification is done by exact matching between the provided
|
||||
container name and the whole list of container available for the swift
|
||||
account.
|
||||
"""
|
||||
sw_connector = self.client_manager.get_swift()
|
||||
containers_list = [c['name'] for c in sw_connector.get_account()[1]]
|
||||
return (self.container in containers_list,
|
||||
segments_name(self.container) in containers_list)
|
||||
|
||||
def add_object(self, max_segment_size, backup_queue, absolute_file_path,
|
||||
time_stamp):
|
||||
"""
|
||||
Upload object on the remote swift server
|
||||
"""
|
||||
file_chunk_index, file_chunk = backup_queue.get().popitem()
|
||||
package_name = absolute_file_path.split('/')[-1]
|
||||
while file_chunk_index or file_chunk:
|
||||
package_name = u'{0}/{1}/{2}/{3}'.format(
|
||||
package_name, time_stamp,
|
||||
max_segment_size, file_chunk_index)
|
||||
self.upload_chunk(file_chunk, package_name)
|
||||
file_chunk_index, file_chunk = backup_queue.get().popitem()
|
501
freezer/swift.py
501
freezer/swift.py
@ -20,53 +20,24 @@ Hudson (tjh@cryptsoft.com).
|
||||
|
||||
Freezer functions to interact with OpenStack Swift client and server
|
||||
"""
|
||||
from freezer.storages.swiftstorage import SwiftStorage
|
||||
|
||||
from freezer.utils import (
|
||||
validate_all_args, get_match_backup,
|
||||
sort_backup_list, DateTime)
|
||||
import os
|
||||
from freezer.utils import (sort_backup_list, DateTime, segments_name)
|
||||
import json
|
||||
import re
|
||||
from copy import deepcopy
|
||||
import time
|
||||
import logging
|
||||
import sys
|
||||
|
||||
RESP_CHUNK_SIZE = 65536
|
||||
|
||||
|
||||
def create_containers(backup_opt):
|
||||
"""Create backup containers
|
||||
The function is used to create object and segments
|
||||
containers
|
||||
|
||||
:param backup_opt:
|
||||
:return: True if both containers are successfully created
|
||||
"""
|
||||
|
||||
# Create backup container
|
||||
logging.warning(
|
||||
"[*] Creating container {0}".format(backup_opt.container))
|
||||
sw_connector = backup_opt.client_manager.get_swift()
|
||||
sw_connector.put_container(backup_opt.container)
|
||||
|
||||
# Create segments container
|
||||
logging.warning(
|
||||
"[*] Creating container segments: {0}".format(
|
||||
backup_opt.container_segments))
|
||||
sw_connector.put_container(backup_opt.container_segments)
|
||||
|
||||
|
||||
def show_containers(backup_opt_dict):
|
||||
def show_containers(containers_list):
|
||||
"""
|
||||
Print remote containers in sorted order
|
||||
"""
|
||||
|
||||
if not backup_opt_dict.list_containers:
|
||||
return False
|
||||
|
||||
ordered_container = {}
|
||||
for container in backup_opt_dict.containers_list:
|
||||
for container in containers_list:
|
||||
ordered_container['container_name'] = container['name']
|
||||
size = '{0}'.format((int(container['bytes']) / 1024) / 1024)
|
||||
if size == '0':
|
||||
@ -76,7 +47,6 @@ def show_containers(backup_opt_dict):
|
||||
print json.dumps(
|
||||
ordered_container, indent=4,
|
||||
separators=(',', ': '), sort_keys=True)
|
||||
return True
|
||||
|
||||
|
||||
def show_objects(backup_opt_dict):
|
||||
@ -88,14 +58,10 @@ def show_objects(backup_opt_dict):
|
||||
if not backup_opt_dict.list_objects:
|
||||
return False
|
||||
|
||||
required_list = [
|
||||
backup_opt_dict.remote_obj_list]
|
||||
|
||||
if not validate_all_args(required_list):
|
||||
raise Exception('Remote Object list not avaiblale')
|
||||
|
||||
ordered_objects = {}
|
||||
remote_obj = backup_opt_dict.remote_obj_list
|
||||
remote_obj = get_container_content(
|
||||
backup_opt_dict.client_manager,
|
||||
backup_opt_dict.container)
|
||||
|
||||
for obj in remote_obj:
|
||||
ordered_objects['object_name'] = obj['name']
|
||||
@ -154,7 +120,11 @@ def remove_obj_older_than(backup_opt_dict):
|
||||
older than the specified days or timestamp
|
||||
"""
|
||||
|
||||
if not backup_opt_dict.remote_obj_list:
|
||||
remote_obj_list = get_container_content(
|
||||
backup_opt_dict.client_manager,
|
||||
backup_opt_dict.container)
|
||||
|
||||
if not remote_obj_list:
|
||||
logging.warning('[*] No remote objects will be removed')
|
||||
return
|
||||
|
||||
@ -174,8 +144,11 @@ def remove_obj_older_than(backup_opt_dict):
|
||||
|
||||
backup_name = backup_opt_dict.backup_name
|
||||
hostname = backup_opt_dict.hostname
|
||||
backup_opt_dict = get_match_backup(backup_opt_dict)
|
||||
sorted_remote_list = sort_backup_list(backup_opt_dict)
|
||||
backup_opt_dict.remote_match_backup = \
|
||||
get_match_backup(backup_opt_dict.backup_name,
|
||||
backup_opt_dict.hostname,
|
||||
remote_obj_list)
|
||||
sorted_remote_list = sort_backup_list(backup_opt_dict.remote_match_backup)
|
||||
|
||||
tar_meta_incremental_dep_flag = False
|
||||
incremental_dep_flag = False
|
||||
@ -213,103 +186,28 @@ def remove_obj_older_than(backup_opt_dict):
|
||||
incremental_dep_flag = False
|
||||
|
||||
|
||||
def get_container_content(backup_opt_dict):
|
||||
def get_container_content(client_manager, container):
|
||||
"""
|
||||
Download the list of object of the provided container
|
||||
and print them out as container meta-data and container object list
|
||||
"""
|
||||
|
||||
if not backup_opt_dict.container:
|
||||
raise Exception('please provide a valid container name')
|
||||
|
||||
sw_connector = backup_opt_dict.client_manager.get_swift()
|
||||
sw_connector = client_manager.get_swift()
|
||||
try:
|
||||
backup_opt_dict.remote_obj_list = \
|
||||
sw_connector.get_container(backup_opt_dict.container)[1]
|
||||
return backup_opt_dict
|
||||
return sw_connector.get_container(container)[1]
|
||||
except Exception as error:
|
||||
raise Exception('[*] Error: get_object_list: {0}'.format(error))
|
||||
|
||||
|
||||
def check_container_existance(backup_opt_dict):
|
||||
"""
|
||||
Check if the provided container is already available on Swift.
|
||||
The verification is done by exact matching between the provided container
|
||||
name and the whole list of container available for the swift account.
|
||||
"""
|
||||
|
||||
required_list = [
|
||||
backup_opt_dict.container_segments,
|
||||
backup_opt_dict.container]
|
||||
|
||||
if not validate_all_args(required_list):
|
||||
raise Exception('please provide the following arg: --container')
|
||||
|
||||
logging.info(
|
||||
"[*] Retrieving container {0}".format(backup_opt_dict.container))
|
||||
sw_connector = backup_opt_dict.client_manager.get_swift()
|
||||
containers_list = sw_connector.get_account()[1]
|
||||
|
||||
match_container = [
|
||||
container_object['name'] for container_object in containers_list
|
||||
if container_object['name'] == backup_opt_dict.container]
|
||||
match_container_seg = [
|
||||
container_object['name'] for container_object in containers_list
|
||||
if container_object['name'] == backup_opt_dict.container_segments]
|
||||
|
||||
# Initialize container dict
|
||||
containers = {'main_container': False, 'container_segments': False}
|
||||
|
||||
if not match_container:
|
||||
logging.warning("[*] No such container {0} available... ".format(
|
||||
backup_opt_dict.container))
|
||||
else:
|
||||
logging.info(
|
||||
"[*] Container {0} found!".format(backup_opt_dict.container))
|
||||
containers['main_container'] = True
|
||||
|
||||
if not match_container_seg:
|
||||
logging.warning(
|
||||
"[*] No segments container {0} available...".format(
|
||||
backup_opt_dict.container_segments))
|
||||
else:
|
||||
logging.info("[*] Container Segments {0} found!".format(
|
||||
backup_opt_dict.container_segments))
|
||||
containers['container_segments'] = True
|
||||
|
||||
return containers
|
||||
|
||||
|
||||
def manifest_upload(
|
||||
manifest_file, backup_opt_dict, file_prefix, manifest_meta_dict):
|
||||
"""
|
||||
Upload Manifest to manage segments in Swift
|
||||
"""
|
||||
|
||||
if not manifest_meta_dict:
|
||||
raise Exception('Manifest Meta dictionary not available')
|
||||
|
||||
sw_connector = backup_opt_dict.client_manager.get_swift()
|
||||
tmp_manifest_meta = dict()
|
||||
for key, value in manifest_meta_dict.items():
|
||||
if key.startswith('x-object-meta'):
|
||||
tmp_manifest_meta[key] = value
|
||||
manifest_meta_dict = deepcopy(tmp_manifest_meta)
|
||||
header = manifest_meta_dict
|
||||
manifest_meta_dict['x-object-manifest'] = u'{0}/{1}'.format(
|
||||
backup_opt_dict.container_segments.strip(), file_prefix.strip())
|
||||
logging.info('[*] Uploading Swift Manifest: {0}'.format(header))
|
||||
sw_connector.put_object(
|
||||
backup_opt_dict.container, file_prefix, manifest_file, headers=header)
|
||||
logging.info('[*] Manifest successfully uploaded!')
|
||||
|
||||
|
||||
def add_stream(client_manager, container_segments, container, stream,
|
||||
def add_stream(client_manager, container, stream,
|
||||
package_name, headers=None):
|
||||
i = 0
|
||||
container_segments = segments_name(container)
|
||||
swift_storage = SwiftStorage(client_manager, container)
|
||||
|
||||
for el in stream:
|
||||
add_chunk(client_manager, container_segments,
|
||||
"{0}/{1}".format(package_name, "%08d" % i), el)
|
||||
swift_storage.upload_chunk("{0}/{1}".format(package_name, "%08d" % i),
|
||||
el)
|
||||
i += 1
|
||||
if not headers:
|
||||
headers = {}
|
||||
@ -321,128 +219,13 @@ def add_stream(client_manager, container_segments, container, stream,
|
||||
swift.put_object(container, package_name, "", headers=headers)
|
||||
|
||||
|
||||
def add_chunk(client_manager, container_segments, package_name, content):
|
||||
# If for some reason the swift client object is not available anymore
|
||||
# an exception is generated and a new client object is initialized/
|
||||
# If the exception happens for 10 consecutive times for a total of
|
||||
# 1 hour, then the program will exit with an Exception.
|
||||
count = 0
|
||||
while True:
|
||||
try:
|
||||
logging.info(
|
||||
'[*] Uploading file chunk index: {0}'.format(
|
||||
package_name))
|
||||
client_manager.get_swift().put_object(
|
||||
container_segments,
|
||||
package_name, content,
|
||||
content_type='application/octet-stream',
|
||||
content_length=len(content))
|
||||
logging.info('[*] Data successfully uploaded!')
|
||||
print '[*] Data successfully uploaded!'
|
||||
break
|
||||
except Exception as error:
|
||||
logging.info('[*] Retrying to upload file chunk index: {0}'.format(
|
||||
package_name))
|
||||
time.sleep(60)
|
||||
client_manager.create_swift()
|
||||
count += 1
|
||||
if count == 10:
|
||||
logging.critical('[*] Error: add_object: {0}'
|
||||
.format(error))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def add_object(
|
||||
backup_opt_dict, backup_queue, absolute_file_path=None,
|
||||
time_stamp=None):
|
||||
"""
|
||||
Upload object on the remote swift server
|
||||
"""
|
||||
|
||||
if not backup_opt_dict.container:
|
||||
err_msg = ('[*] Error: Please specify the container '
|
||||
'name with -C or --container option')
|
||||
logging.exception(err_msg)
|
||||
sys.exit(1)
|
||||
|
||||
if absolute_file_path is None and backup_queue is None:
|
||||
err_msg = ('[*] Error: Please specify the file or fs path '
|
||||
'you want to upload on swift with -d or --dst-file')
|
||||
logging.exception(err_msg)
|
||||
sys.exit(1)
|
||||
|
||||
while True:
|
||||
package_name = absolute_file_path.split('/')[-1]
|
||||
file_chunk_index, file_chunk = backup_queue.get().popitem()
|
||||
if not file_chunk_index and not file_chunk:
|
||||
break
|
||||
package_name = u'{0}/{1}/{2}/{3}'.format(
|
||||
package_name, time_stamp,
|
||||
backup_opt_dict.max_segment_size, file_chunk_index)
|
||||
add_chunk(backup_opt_dict.client_manager,
|
||||
backup_opt_dict.container_segments,
|
||||
package_name, file_chunk)
|
||||
|
||||
|
||||
def get_containers_list(backup_opt_dict):
|
||||
"""
|
||||
Get a list and information of all the available containers
|
||||
"""
|
||||
|
||||
try:
|
||||
sw_connector = backup_opt_dict.client_manager.get_swift()
|
||||
backup_opt_dict.containers_list = sw_connector.get_account()[1]
|
||||
return backup_opt_dict
|
||||
except Exception as error:
|
||||
raise Exception('Get containers list error: {0}'.format(error))
|
||||
|
||||
|
||||
def object_to_file(backup_opt_dict, file_name_abs_path):
|
||||
"""
|
||||
Take a payload downloaded from Swift
|
||||
and save it to the disk as file_name
|
||||
"""
|
||||
|
||||
required_list = [
|
||||
backup_opt_dict.container,
|
||||
file_name_abs_path]
|
||||
|
||||
if not validate_all_args(required_list):
|
||||
raise ValueError('Error in object_to_file(): Please provide ALL the '
|
||||
'following arguments: --container file_name_abs_path')
|
||||
|
||||
sw_connector = backup_opt_dict.client_manager.get_swift()
|
||||
file_name = file_name_abs_path.split('/')[-1]
|
||||
logging.info('[*] Downloading object {0} on {1}'.format(
|
||||
file_name, file_name_abs_path))
|
||||
|
||||
# As the file is download by chunks and each chunk will be appened
|
||||
# to file_name_abs_path, we make sure file_name_abs_path does not
|
||||
# exists by removing it before
|
||||
if os.path.exists(file_name_abs_path):
|
||||
os.remove(file_name_abs_path)
|
||||
|
||||
with open(file_name_abs_path, 'ab') as obj_fd:
|
||||
for obj_chunk in sw_connector.get_object(
|
||||
backup_opt_dict.container, file_name,
|
||||
resp_chunk_size=16000000)[1]:
|
||||
obj_fd.write(obj_chunk)
|
||||
|
||||
|
||||
def object_to_stream(backup_opt_dict, write_pipe, read_pipe, obj_name):
|
||||
def object_to_stream(container, client_manager, write_pipe, read_pipe,
|
||||
obj_name):
|
||||
"""
|
||||
Take a payload downloaded from Swift
|
||||
and generate a stream to be consumed from other processes
|
||||
"""
|
||||
|
||||
required_list = [
|
||||
backup_opt_dict.container]
|
||||
|
||||
if not validate_all_args(required_list):
|
||||
raise ValueError('Error in object_to_stream(): Please provide '
|
||||
'ALL the following argument: --container')
|
||||
|
||||
sw_connector = backup_opt_dict.client_manager.get_swift()
|
||||
sw_connector = client_manager.get_swift()
|
||||
logging.info('[*] Downloading data stream...')
|
||||
|
||||
# Close the read pipe in this child as it is unneeded
|
||||
@ -451,14 +234,234 @@ def object_to_stream(backup_opt_dict, write_pipe, read_pipe, obj_name):
|
||||
# pipe
|
||||
read_pipe.close()
|
||||
for obj_chunk in sw_connector.get_object(
|
||||
backup_opt_dict.container, obj_name,
|
||||
resp_chunk_size=RESP_CHUNK_SIZE)[1]:
|
||||
container, obj_name, resp_chunk_size=RESP_CHUNK_SIZE)[1]:
|
||||
write_pipe.send_bytes(obj_chunk)
|
||||
|
||||
# Closing the pipe after checking no data
|
||||
# is still vailable in the pipe.
|
||||
# is still available in the pipe.
|
||||
while True:
|
||||
if not write_pipe.poll():
|
||||
write_pipe.close()
|
||||
break
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
def get_match_backup(backup_name, hostname, remote_obj_list):
|
||||
"""
|
||||
Return a dictionary containing a list of remote matching backups from
|
||||
backup_opt_dict.remote_obj_list.
|
||||
Backup have to exactly match against backup name and hostname of the
|
||||
node where freezer is executed. The matching objects are stored and
|
||||
available in backup_opt_dict.remote_match_backup
|
||||
"""
|
||||
|
||||
backup_name = backup_name.lower()
|
||||
remote_match_backup = []
|
||||
|
||||
for container_object in remote_obj_list:
|
||||
object_name = container_object.get('name', None)
|
||||
if object_name:
|
||||
obj_name_match = re.search(r'{0}_({1})_\d+?_\d+?$'.format(
|
||||
hostname, backup_name), object_name.lower(), re.I)
|
||||
if obj_name_match:
|
||||
remote_match_backup.append(object_name)
|
||||
|
||||
return remote_match_backup
|
||||
|
||||
|
||||
def get_rel_oldest_backup(hostname, backup_name, remote_obj_list):
|
||||
"""
|
||||
Return from swift, the relative oldest backup matching the provided
|
||||
backup name and hostname of the node where freezer is executed.
|
||||
The relative oldest backup correspond the oldest backup from the
|
||||
last level 0 backup.
|
||||
"""
|
||||
first_backup_name = ''
|
||||
first_backup_ts = 0
|
||||
for container_object in remote_obj_list:
|
||||
object_name = container_object.get('name', None)
|
||||
if not object_name:
|
||||
continue
|
||||
obj_name_match = re.search(r'{0}_({1})_(\d+)_(\d+?)$'.format(
|
||||
hostname, backup_name), object_name, re.I)
|
||||
if not obj_name_match:
|
||||
continue
|
||||
remote_obj_timestamp = int(obj_name_match.group(2))
|
||||
remote_obj_level = int(obj_name_match.group(3))
|
||||
if remote_obj_level == 0 and (remote_obj_timestamp > first_backup_ts):
|
||||
first_backup_name = object_name
|
||||
first_backup_ts = remote_obj_timestamp
|
||||
|
||||
return first_backup_name
|
||||
|
||||
|
||||
def eval_restart_backup(backup_opt_dict):
|
||||
"""
|
||||
Restart backup level if the first backup execute with always_level
|
||||
is older then restart_always_level
|
||||
"""
|
||||
|
||||
if not backup_opt_dict.restart_always_level:
|
||||
logging.info('[*] No need to set Backup {0} to level 0.'.format(
|
||||
backup_opt_dict.backup_name))
|
||||
return False
|
||||
|
||||
logging.info('[*] Checking always backup level timestamp...')
|
||||
# Compute the amount of seconds to be compared with
|
||||
# the remote backup timestamp
|
||||
max_time = int(float(backup_opt_dict.restart_always_level) * 86400)
|
||||
current_timestamp = backup_opt_dict.time_stamp
|
||||
backup_name = backup_opt_dict.backup_name
|
||||
hostname = backup_opt_dict.hostname
|
||||
# Get relative oldest backup by calling get_rel_oldes_backup()
|
||||
|
||||
remote_obj_list = get_container_content(
|
||||
backup_opt_dict.client_manager,
|
||||
backup_opt_dict.container)
|
||||
backup_opt_dict.remote_rel_oldest =\
|
||||
get_rel_oldest_backup(hostname, backup_name, remote_obj_list)
|
||||
if not backup_opt_dict.remote_rel_oldest:
|
||||
logging.info('[*] Relative oldest backup for backup name {0} on \
|
||||
host {1} not available. The backup level is NOT restarted'.format(
|
||||
backup_name, hostname))
|
||||
return False
|
||||
|
||||
obj_name_match = re.search(r'{0}_({1})_(\d+)_(\d+?)$'.format(
|
||||
hostname, backup_name), backup_opt_dict.remote_rel_oldest, re.I)
|
||||
if not obj_name_match:
|
||||
err = ('[*] No backup match available for backup {0} '
|
||||
'and host {1}'.format(backup_name, hostname))
|
||||
logging.info(err)
|
||||
return Exception(err)
|
||||
|
||||
first_backup_ts = int(obj_name_match.group(2))
|
||||
if (current_timestamp - first_backup_ts) > max_time:
|
||||
logging.info(
|
||||
'[*] Backup {0} older then {1} days. Backup level set to 0'.format(
|
||||
backup_name, backup_opt_dict.restart_always_level))
|
||||
|
||||
return True
|
||||
else:
|
||||
logging.info('[*] No need to set level 0 for Backup {0}.'.format(
|
||||
backup_name))
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def set_backup_level(backup_opt_dict, manifest_meta_dict):
|
||||
"""
|
||||
Set the backup level params in backup_opt_dict and the swift
|
||||
manifest. This is a fundamental part of the incremental backup
|
||||
"""
|
||||
|
||||
if manifest_meta_dict.get('x-object-meta-backup-name'):
|
||||
backup_opt_dict.curr_backup_level = int(
|
||||
manifest_meta_dict.get('x-object-meta-backup-current-level'))
|
||||
max_level = manifest_meta_dict.get(
|
||||
'x-object-meta-maximum-backup-level')
|
||||
always_level = manifest_meta_dict.get(
|
||||
'x-object-meta-always-backup-level')
|
||||
restart_always_level = manifest_meta_dict.get(
|
||||
'x-object-meta-restart-always-backup')
|
||||
if max_level:
|
||||
max_level = int(max_level)
|
||||
if backup_opt_dict.curr_backup_level < max_level:
|
||||
backup_opt_dict.curr_backup_level += 1
|
||||
manifest_meta_dict['x-object-meta-backup-current-level'] = \
|
||||
str(backup_opt_dict.curr_backup_level)
|
||||
else:
|
||||
manifest_meta_dict['x-object-meta-backup-current-level'] = \
|
||||
backup_opt_dict.curr_backup_level = '0'
|
||||
elif always_level:
|
||||
always_level = int(always_level)
|
||||
if backup_opt_dict.curr_backup_level < always_level:
|
||||
backup_opt_dict.curr_backup_level += 1
|
||||
manifest_meta_dict['x-object-meta-backup-current-level'] = \
|
||||
str(backup_opt_dict.curr_backup_level)
|
||||
# If restart_always_level is set, the backup_age will be computed
|
||||
# and if the backup age in days is >= restart_always_level, then
|
||||
# backup-current-level will be set to 0
|
||||
if restart_always_level:
|
||||
backup_opt_dict.restart_always_level = restart_always_level
|
||||
if eval_restart_backup(backup_opt_dict):
|
||||
backup_opt_dict.curr_backup_level = '0'
|
||||
manifest_meta_dict['x-object-meta-backup-current-level'] \
|
||||
= '0'
|
||||
else:
|
||||
backup_opt_dict.curr_backup_level = \
|
||||
manifest_meta_dict['x-object-meta-backup-current-level'] = '0'
|
||||
|
||||
return backup_opt_dict, manifest_meta_dict
|
||||
|
||||
|
||||
def check_backup_and_tar_meta_existence(backup_opt_dict):
|
||||
"""
|
||||
Check if any backup is already available on Swift.
|
||||
The verification is done by backup_name, which needs to be unique
|
||||
in Swift. This function will return an empty dict if no backup are
|
||||
found or the Manifest metadata if the backup_name is available
|
||||
"""
|
||||
|
||||
if not backup_opt_dict.backup_name or not backup_opt_dict.container:
|
||||
logging.warning(
|
||||
('[*] A valid Swift container, or backup name or container '
|
||||
'content not available. Level 0 backup is being executed '))
|
||||
return dict()
|
||||
|
||||
logging.info("[*] Retrieving backup name {0} on container \
|
||||
{1}".format(
|
||||
backup_opt_dict.backup_name.lower(), backup_opt_dict.container))
|
||||
|
||||
remote_obj_list = get_container_content(
|
||||
backup_opt_dict.client_manager,
|
||||
backup_opt_dict.container)
|
||||
remote_match_backup = \
|
||||
get_match_backup(backup_opt_dict.backup_name,
|
||||
backup_opt_dict.hostname,
|
||||
remote_obj_list)
|
||||
try:
|
||||
remote_newest_backup = get_newest_backup(backup_opt_dict.hostname,
|
||||
backup_opt_dict.backup_name,
|
||||
remote_match_backup)
|
||||
swift = backup_opt_dict.client_manager.get_swift()
|
||||
logging.info("[*] Backup {0} found!".format(
|
||||
backup_opt_dict.backup_name))
|
||||
backup_match = swift.head_object(
|
||||
backup_opt_dict.container, remote_newest_backup)
|
||||
|
||||
return backup_match
|
||||
except Exception:
|
||||
logging.warning("[*] No such backup {0} available... Executing \
|
||||
level 0 backup".format(backup_opt_dict.backup_name))
|
||||
return dict()
|
||||
|
||||
|
||||
def get_newest_backup(hostname, backup_name, remote_match_backup):
|
||||
"""
|
||||
Return from backup_opt_dict.remote_match_backup, the newest backup
|
||||
matching the provided backup name and hostname of the node where
|
||||
freezer is executed. It correspond to the previous backup executed.
|
||||
NOTE: If backup has no tar_metadata, no newest backup is returned.
|
||||
"""
|
||||
|
||||
# Sort remote backup list using timestamp in reverse order,
|
||||
# that is from the newest to the oldest executed backup
|
||||
|
||||
if not remote_match_backup:
|
||||
raise Exception("remote match backups are empty")
|
||||
sorted_backups_list = sort_backup_list(remote_match_backup)
|
||||
|
||||
print sorted_backups_list
|
||||
|
||||
for remote_obj in sorted_backups_list:
|
||||
obj_name_match = re.search(r'^{0}_({1})_(\d+)_\d+?$'.format(
|
||||
hostname, backup_name), remote_obj, re.I)
|
||||
print obj_name_match
|
||||
if not obj_name_match:
|
||||
continue
|
||||
tar_metadata_obj = 'tar_metadata_{0}'.format(remote_obj)
|
||||
if tar_metadata_obj in sorted_backups_list:
|
||||
return remote_obj
|
||||
raise Exception("no tar file")
|
||||
|
||||
raise Exception('not backup found')
|
||||
|
205
freezer/tar.py
205
freezer/tar.py
@ -21,18 +21,93 @@ Hudson (tjh@cryptsoft.com).
|
||||
Freezer Tar related functions
|
||||
"""
|
||||
|
||||
from freezer.utils import (
|
||||
validate_all_args, add_host_name_ts_level, create_dir)
|
||||
from freezer.swift import object_to_file
|
||||
from freezer.utils import validate_all_args
|
||||
from freezer.winutils import is_windows
|
||||
|
||||
import os
|
||||
import logging
|
||||
import subprocess
|
||||
import time
|
||||
import sys
|
||||
|
||||
|
||||
class TarCommandBuilder:
|
||||
"""
|
||||
Building a tar cmd command. To build command invoke method build.
|
||||
"""
|
||||
|
||||
def __init__(self, path):
|
||||
self.dereference = ''
|
||||
self.path = path
|
||||
self.level = 0
|
||||
self.exclude = None
|
||||
self.dereference_mode = {
|
||||
'soft': '--dereference',
|
||||
'hard': '--hard-dereference',
|
||||
'all': '--hard-dereference --dereference',
|
||||
'none': ''
|
||||
}
|
||||
self.listed_incremental = None
|
||||
self.work_dir = None
|
||||
self.exclude = ''
|
||||
self.openssl_path = None
|
||||
self.encrypt_pass_file = None
|
||||
|
||||
def set_level(self, level):
|
||||
self.level = level
|
||||
|
||||
def set_work_dir(self, work_dir):
|
||||
self.work_dir = work_dir
|
||||
|
||||
def set_listed_incremental(self, path):
|
||||
self.listed_incremental = path
|
||||
|
||||
def set_exclude(self, exclude):
|
||||
self.exclude = exclude
|
||||
|
||||
def set_dereference(self, mode):
|
||||
"""
|
||||
Dereference hard and soft links according option choices.
|
||||
'soft' dereference soft links,
|
||||
'hard' dereference hardlinks,
|
||||
'all' dereference both.
|
||||
Default 'none'.
|
||||
"""
|
||||
if mode not in self.dereference_mode:
|
||||
raise Exception("unknown dereference mode: %s" % mode)
|
||||
self.dereference = mode
|
||||
|
||||
def set_encryption(self, openssl_path, encrypt_pass_file):
|
||||
self.openssl_path = openssl_path
|
||||
self.encrypt_pass_file = encrypt_pass_file
|
||||
|
||||
def build(self):
|
||||
tar_command = ' {path} --create -z --warning=none --no-check-device \
|
||||
--one-file-system --preserve-permissions --same-owner --seek \
|
||||
--ignore-failed-read {dereference}'.format(
|
||||
path=self.path,
|
||||
dereference=self.dereference_mode[self.dereference])
|
||||
if self.listed_incremental:
|
||||
tar_command = '{tar_command} --level={level} \
|
||||
--listed-incremental={work_dir}/{listed_incremental}'.format(
|
||||
tar_command=tar_command,
|
||||
level=self.level,
|
||||
work_dir=self.work_dir,
|
||||
listed_incremental=self.listed_incremental)
|
||||
|
||||
if self.exclude:
|
||||
tar_command = ' {tar_command} --exclude="{exclude}" '.format(
|
||||
tar_command=tar_command,
|
||||
exclude=self.exclude)
|
||||
|
||||
if self.encrypt_pass_file:
|
||||
openssl_cmd = "{openssl_path} enc -aes-256-cfb -pass file:{file}"\
|
||||
.format(openssl_path=self.openssl_path,
|
||||
file=self.encrypt_pass_file)
|
||||
tar_command = '{0} | {1} '.format(tar_command, openssl_cmd)
|
||||
|
||||
return ' {0} . '.format(tar_command)
|
||||
|
||||
|
||||
def tar_restore_args_valid(backup_opt_dict):
|
||||
required_list = [
|
||||
os.path.exists(backup_opt_dict.restore_abs_path)]
|
||||
@ -100,128 +175,6 @@ def tar_restore(backup_opt_dict, read_pipe):
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def tar_incremental(
|
||||
tar_cmd, backup_opt_dict, curr_tar_meta, remote_manifest_meta=None):
|
||||
"""
|
||||
Check if the backup already exist in swift. If the backup already
|
||||
exists, the related meta data and the tar incremental meta file will be
|
||||
downloaded. According to the meta data content, new options will be
|
||||
provided for the next meta data upload to swift and the existing tar meta
|
||||
file will be used in the current incremental backup. Also the level
|
||||
options will be checked and updated respectively
|
||||
"""
|
||||
|
||||
if not tar_cmd or not backup_opt_dict:
|
||||
logging.error(('[*] Error: tar_incremental, please provide tar_cmd '
|
||||
'and backup options'))
|
||||
raise ValueError
|
||||
|
||||
if not remote_manifest_meta:
|
||||
remote_manifest_meta = dict()
|
||||
# If returned object from check_backup is not a dict, the backup
|
||||
# is considered at first run, so a backup level 0 will be executed
|
||||
curr_backup_level = remote_manifest_meta.get(
|
||||
'x-object-meta-backup-current-level', '0')
|
||||
tar_meta = remote_manifest_meta.get(
|
||||
'x-object-meta-tar-meta-obj-name')
|
||||
tar_cmd_level = '--level={0} '.format(curr_backup_level)
|
||||
# Write the tar meta data file in ~/.freezer. It will be
|
||||
# removed later on. If ~/.freezer does not exists it will be created'.
|
||||
create_dir(backup_opt_dict.workdir)
|
||||
|
||||
curr_tar_meta = '{0}/{1}'.format(
|
||||
backup_opt_dict.workdir, curr_tar_meta)
|
||||
tar_cmd_incr = ' --listed-incremental={0} '.format(curr_tar_meta)
|
||||
if tar_meta:
|
||||
# If tar meta data file is available, download it and use it
|
||||
# as for tar incremental backup. Afte this, the
|
||||
# remote_manifest_meta['x-object-meta-tar-meta-obj-name'] will be
|
||||
# update with the current tar meta data name and uploaded again
|
||||
tar_cmd_incr = ' --listed-incremental={0}/{1} '.format(
|
||||
backup_opt_dict.workdir, tar_meta)
|
||||
tar_meta_abs = "{0}/{1}".format(backup_opt_dict.workdir, tar_meta)
|
||||
try:
|
||||
object_to_file(
|
||||
backup_opt_dict, tar_meta_abs)
|
||||
except Exception:
|
||||
logging.warning(
|
||||
'[*] Tar metadata {0} not found. Executing level 0 backup\
|
||||
'.format(tar_meta))
|
||||
|
||||
tar_cmd = ' {0} {1} {2} '.format(tar_cmd, tar_cmd_level, tar_cmd_incr)
|
||||
return tar_cmd, backup_opt_dict, remote_manifest_meta
|
||||
|
||||
|
||||
def gen_tar_command(
|
||||
opt_dict, meta_data_backup_file=False, time_stamp=int(time.time()),
|
||||
remote_manifest_meta=False):
|
||||
"""
|
||||
Generate tar command options.
|
||||
"""
|
||||
|
||||
required_list = [
|
||||
opt_dict.backup_name,
|
||||
opt_dict.path_to_backup]
|
||||
|
||||
if not validate_all_args(required_list):
|
||||
raise Exception('Error: Please ALL the following options: '
|
||||
'--path-to-backup, --backup-name')
|
||||
if not os.path.exists(opt_dict.path_to_backup):
|
||||
raise Exception('Error: path-to-backup does not exist')
|
||||
|
||||
# Change che current working directory to op_dict.path_to_backup
|
||||
os.chdir(os.path.normpath(opt_dict.path_to_backup.strip()))
|
||||
|
||||
logging.info('[*] Changing current working directory to: {0} \
|
||||
'.format(opt_dict.path_to_backup))
|
||||
logging.info('[*] Backup started for: {0} \
|
||||
'.format(opt_dict.path_to_backup))
|
||||
|
||||
# Tar option for default behavior. Please refer to man tar to have
|
||||
# a better options explanation
|
||||
tar_command = ' {0} --create -z --warning=none \
|
||||
--no-check-device --one-file-system \
|
||||
--preserve-permissions --same-owner --seek \
|
||||
--ignore-failed-read '.format(opt_dict.tar_path)
|
||||
|
||||
# Dereference hard and soft links according option choices.
|
||||
# 'soft' dereference soft links, 'hard' dereference hardlinks,
|
||||
# 'all' dereference both. Defaul 'none'.
|
||||
if opt_dict.dereference_symlink == 'soft':
|
||||
tar_command = ' {0} --dereference '.format(
|
||||
tar_command)
|
||||
if opt_dict.dereference_symlink == 'hard':
|
||||
tar_command = ' {0} --hard-dereference '.format(
|
||||
tar_command)
|
||||
if opt_dict.dereference_symlink == 'all':
|
||||
tar_command = ' {0} --hard-dereference --dereference '.format(
|
||||
tar_command)
|
||||
|
||||
file_name = add_host_name_ts_level(opt_dict, time_stamp)
|
||||
meta_data_backup_file = u'tar_metadata_{0}'.format(file_name)
|
||||
# Incremental backup section
|
||||
if not opt_dict.no_incremental:
|
||||
(tar_command, opt_dict, remote_manifest_meta) = tar_incremental(
|
||||
tar_command, opt_dict, meta_data_backup_file,
|
||||
remote_manifest_meta)
|
||||
|
||||
# End incremental backup section
|
||||
if opt_dict.exclude:
|
||||
tar_command = ' {0} --exclude="{1}" '.format(
|
||||
tar_command,
|
||||
opt_dict.exclude)
|
||||
|
||||
tar_command = ' {0} . '.format(tar_command)
|
||||
|
||||
# Encrypt data if passfile is provided
|
||||
if opt_dict.encrypt_pass_file:
|
||||
openssl_cmd = "{0} enc -aes-256-cfb -pass file:{1}".format(
|
||||
opt_dict.openssl_path, opt_dict.encrypt_pass_file)
|
||||
tar_command = '{0} | {1} '.format(tar_command, openssl_cmd)
|
||||
|
||||
return opt_dict, tar_command, remote_manifest_meta
|
||||
|
||||
|
||||
def tar_backup(opt_dict, tar_command, backup_queue):
|
||||
"""
|
||||
Execute an incremental backup using tar options, specified as
|
||||
|
263
freezer/utils.py
263
freezer/utils.py
@ -147,7 +147,7 @@ def gen_manifest_meta(
|
||||
# in Swift, matching with hostname and backup name
|
||||
# the last existing file can be extracted from the timestamp
|
||||
manifest_meta_dict['x-object-meta-container-segments'] = \
|
||||
backup_opt_dict.container_segments
|
||||
segments_name(backup_opt_dict.container)
|
||||
|
||||
# Set the restart_always_level value to n days. According
|
||||
# to the following option, when the always_level is set
|
||||
@ -180,14 +180,14 @@ def validate_all_args(required_list):
|
||||
return True
|
||||
|
||||
|
||||
def sort_backup_list(backup_opt_dict):
|
||||
def sort_backup_list(remote_match_backup):
|
||||
"""
|
||||
Sort the backups by timestamp. The provided list contains strings in the
|
||||
format hostname_backupname_timestamp_level
|
||||
"""
|
||||
|
||||
# Remove duplicates objects
|
||||
backups_list = list(set(backup_opt_dict.remote_match_backup))
|
||||
backups_list = list(set(remote_match_backup))
|
||||
|
||||
backups_list.sort(
|
||||
key=lambda x: map(lambda y: int(y), x.rsplit('_', 2)[-2:]),
|
||||
@ -217,154 +217,6 @@ def create_dir(directory, do_log=True):
|
||||
raise Exception(err)
|
||||
|
||||
|
||||
def get_match_backup(backup_opt_dict):
|
||||
'''
|
||||
Return a dictionary containing a list of remote matching backups from
|
||||
backup_opt_dict.remote_obj_list.
|
||||
Backup have to exactly match against backup name and hostname of the
|
||||
node where freezer is executed. The matching objects are stored and
|
||||
available in backup_opt_dict.remote_match_backup
|
||||
'''
|
||||
|
||||
if not backup_opt_dict.backup_name or not backup_opt_dict.container \
|
||||
or not backup_opt_dict.remote_obj_list:
|
||||
raise Exception("[*] Error: please provide a valid Swift container,\
|
||||
backup name and the container contents")
|
||||
|
||||
backup_name = backup_opt_dict.backup_name.lower()
|
||||
if backup_opt_dict.remote_obj_list:
|
||||
hostname = backup_opt_dict.hostname
|
||||
for container_object in backup_opt_dict.remote_obj_list:
|
||||
object_name = container_object.get('name', None)
|
||||
if object_name:
|
||||
obj_name_match = re.search(r'{0}_({1})_\d+?_\d+?$'.format(
|
||||
hostname, backup_name), object_name.lower(), re.I)
|
||||
if obj_name_match:
|
||||
backup_opt_dict.remote_match_backup.append(
|
||||
object_name)
|
||||
backup_opt_dict.remote_objects.append(container_object)
|
||||
|
||||
return backup_opt_dict
|
||||
|
||||
|
||||
def get_newest_backup(backup_opt_dict):
|
||||
'''
|
||||
Return from backup_opt_dict.remote_match_backup, the newest backup
|
||||
matching the provided backup name and hostname of the node where
|
||||
freezer is executed. It correspond to the previous backup executed.
|
||||
NOTE: If backup has no tar_metadata, no newest backup is returned.
|
||||
'''
|
||||
|
||||
if not backup_opt_dict.remote_match_backup:
|
||||
return backup_opt_dict
|
||||
|
||||
backup_timestamp = 0
|
||||
hostname = backup_opt_dict.hostname
|
||||
# Sort remote backup list using timestamp in reverse order,
|
||||
# that is from the newest to the oldest executed backup
|
||||
sorted_backups_list = sort_backup_list(backup_opt_dict)
|
||||
for remote_obj in sorted_backups_list:
|
||||
obj_name_match = re.search(r'^{0}_({1})_(\d+)_\d+?$'.format(
|
||||
hostname, backup_opt_dict.backup_name), remote_obj, re.I)
|
||||
if not obj_name_match:
|
||||
continue
|
||||
remote_obj_timestamp = int(obj_name_match.group(2))
|
||||
if remote_obj_timestamp > backup_timestamp:
|
||||
backup_timestamp = remote_obj_timestamp
|
||||
break
|
||||
|
||||
tar_metadata_obj = 'tar_metadata_{0}'.format(remote_obj)
|
||||
if tar_metadata_obj in sorted_backups_list:
|
||||
backup_opt_dict.remote_newest_backup = remote_obj
|
||||
|
||||
return backup_opt_dict
|
||||
|
||||
|
||||
def get_rel_oldest_backup(backup_opt_dict):
|
||||
'''
|
||||
Return from swift, the relative oldest backup matching the provided
|
||||
backup name and hostname of the node where freezer is executed.
|
||||
The relative oldest backup correspond the oldest backup from the
|
||||
last level 0 backup.
|
||||
'''
|
||||
|
||||
if not backup_opt_dict.backup_name:
|
||||
err = "[*] Error: please provide a valid backup name in \
|
||||
backup_opt_dict.backup_name"
|
||||
logging.exception(err)
|
||||
raise Exception(err)
|
||||
|
||||
backup_opt_dict.remote_rel_oldest = u''
|
||||
backup_name = backup_opt_dict.backup_name
|
||||
hostname = backup_opt_dict.hostname
|
||||
first_backup_name = False
|
||||
first_backup_ts = 0
|
||||
for container_object in backup_opt_dict.remote_obj_list:
|
||||
object_name = container_object.get('name', None)
|
||||
if not object_name:
|
||||
continue
|
||||
obj_name_match = re.search(r'{0}_({1})_(\d+)_(\d+?)$'.format(
|
||||
hostname, backup_name), object_name, re.I)
|
||||
if not obj_name_match:
|
||||
continue
|
||||
remote_obj_timestamp = int(obj_name_match.group(2))
|
||||
remote_obj_level = int(obj_name_match.group(3))
|
||||
if remote_obj_level == 0 and (remote_obj_timestamp > first_backup_ts):
|
||||
first_backup_name = object_name
|
||||
first_backup_ts = remote_obj_timestamp
|
||||
|
||||
backup_opt_dict.remote_rel_oldest = first_backup_name
|
||||
return backup_opt_dict
|
||||
|
||||
|
||||
def eval_restart_backup(backup_opt_dict):
|
||||
'''
|
||||
Restart backup level if the first backup execute with always_level
|
||||
is older then restart_always_level
|
||||
'''
|
||||
|
||||
if not backup_opt_dict.restart_always_level:
|
||||
logging.info('[*] No need to set Backup {0} to level 0.'.format(
|
||||
backup_opt_dict.backup_name))
|
||||
return False
|
||||
|
||||
logging.info('[*] Checking always backup level timestamp...')
|
||||
# Compute the amount of seconds to be compared with
|
||||
# the remote backup timestamp
|
||||
max_time = int(float(backup_opt_dict.restart_always_level) * 86400)
|
||||
current_timestamp = backup_opt_dict.time_stamp
|
||||
backup_name = backup_opt_dict.backup_name
|
||||
hostname = backup_opt_dict.hostname
|
||||
# Get relative oldest backup by calling get_rel_oldes_backup()
|
||||
backup_opt_dict = get_rel_oldest_backup(backup_opt_dict)
|
||||
if not backup_opt_dict.remote_rel_oldest:
|
||||
logging.info('[*] Relative oldest backup for backup name {0} on \
|
||||
host {1} not available. The backup level is NOT restarted'.format(
|
||||
backup_name, hostname))
|
||||
return False
|
||||
|
||||
obj_name_match = re.search(r'{0}_({1})_(\d+)_(\d+?)$'.format(
|
||||
hostname, backup_name), backup_opt_dict.remote_rel_oldest, re.I)
|
||||
if not obj_name_match:
|
||||
err = ('[*] No backup match available for backup {0} '
|
||||
'and host {1}'.format(backup_name, hostname))
|
||||
logging.info(err)
|
||||
return Exception(err)
|
||||
|
||||
first_backup_ts = int(obj_name_match.group(2))
|
||||
if (current_timestamp - first_backup_ts) > max_time:
|
||||
logging.info(
|
||||
'[*] Backup {0} older then {1} days. Backup level set to 0'.format(
|
||||
backup_name, backup_opt_dict.restart_always_level))
|
||||
|
||||
return True
|
||||
else:
|
||||
logging.info('[*] No need to set level 0 for Backup {0}.'.format(
|
||||
backup_name))
|
||||
|
||||
return False
|
||||
|
||||
|
||||
class DateTime(object):
|
||||
def __init__(self, value):
|
||||
if isinstance(value, int):
|
||||
@ -394,52 +246,6 @@ class DateTime(object):
|
||||
return DateTime(datetime.datetime.now())
|
||||
|
||||
|
||||
def set_backup_level(backup_opt_dict, manifest_meta_dict):
|
||||
'''
|
||||
Set the backup level params in backup_opt_dict and the swift
|
||||
manifest. This is a fundamental part of the incremental backup
|
||||
'''
|
||||
|
||||
if manifest_meta_dict.get('x-object-meta-backup-name'):
|
||||
backup_opt_dict.curr_backup_level = int(
|
||||
manifest_meta_dict.get('x-object-meta-backup-current-level'))
|
||||
max_level = manifest_meta_dict.get(
|
||||
'x-object-meta-maximum-backup-level')
|
||||
always_level = manifest_meta_dict.get(
|
||||
'x-object-meta-always-backup-level')
|
||||
restart_always_level = manifest_meta_dict.get(
|
||||
'x-object-meta-restart-always-backup')
|
||||
if max_level:
|
||||
max_level = int(max_level)
|
||||
if backup_opt_dict.curr_backup_level < max_level:
|
||||
backup_opt_dict.curr_backup_level += 1
|
||||
manifest_meta_dict['x-object-meta-backup-current-level'] = \
|
||||
str(backup_opt_dict.curr_backup_level)
|
||||
else:
|
||||
manifest_meta_dict['x-object-meta-backup-current-level'] = \
|
||||
backup_opt_dict.curr_backup_level = '0'
|
||||
elif always_level:
|
||||
always_level = int(always_level)
|
||||
if backup_opt_dict.curr_backup_level < always_level:
|
||||
backup_opt_dict.curr_backup_level += 1
|
||||
manifest_meta_dict['x-object-meta-backup-current-level'] = \
|
||||
str(backup_opt_dict.curr_backup_level)
|
||||
# If restart_always_level is set, the backup_age will be computed
|
||||
# and if the backup age in days is >= restart_always_level, then
|
||||
# backup-current-level will be set to 0
|
||||
if restart_always_level:
|
||||
backup_opt_dict.restart_always_level = restart_always_level
|
||||
if eval_restart_backup(backup_opt_dict):
|
||||
backup_opt_dict.curr_backup_level = '0'
|
||||
manifest_meta_dict['x-object-meta-backup-current-level'] \
|
||||
= '0'
|
||||
else:
|
||||
backup_opt_dict.curr_backup_level = \
|
||||
manifest_meta_dict['x-object-meta-backup-current-level'] = '0'
|
||||
|
||||
return backup_opt_dict, manifest_meta_dict
|
||||
|
||||
|
||||
def get_vol_fs_type(backup_opt_dict):
|
||||
'''
|
||||
The argument need to be a full path lvm name i.e. /dev/vg0/var
|
||||
@ -472,61 +278,6 @@ def get_vol_fs_type(backup_opt_dict):
|
||||
return filesys_type.lower().strip()
|
||||
|
||||
|
||||
def check_backup_and_tar_meta_existence(backup_opt_dict):
|
||||
'''
|
||||
Check if any backup is already available on Swift.
|
||||
The verification is done by backup_name, which needs to be unique
|
||||
in Swift. This function will return an empty dict if no backup are
|
||||
found or the Manifest metadata if the backup_name is available
|
||||
'''
|
||||
|
||||
if not backup_opt_dict.backup_name or not backup_opt_dict.container or \
|
||||
not backup_opt_dict.remote_obj_list:
|
||||
logging.warning(
|
||||
('[*] A valid Swift container, or backup name or container '
|
||||
'content not available. Level 0 backup is being executed '))
|
||||
return dict()
|
||||
|
||||
logging.info("[*] Retrieving backup name {0} on container \
|
||||
{1}".format(
|
||||
backup_opt_dict.backup_name.lower(), backup_opt_dict.container))
|
||||
backup_opt_dict = get_match_backup(backup_opt_dict)
|
||||
backup_opt_dict = get_newest_backup(backup_opt_dict)
|
||||
|
||||
if backup_opt_dict.remote_newest_backup:
|
||||
swift = backup_opt_dict.client_manager.get_swift()
|
||||
logging.info("[*] Backup {0} found!".format(
|
||||
backup_opt_dict.backup_name))
|
||||
backup_match = swift.head_object(
|
||||
backup_opt_dict.container, backup_opt_dict.remote_newest_backup)
|
||||
|
||||
return backup_match
|
||||
else:
|
||||
logging.warning("[*] No such backup {0} available... Executing \
|
||||
level 0 backup".format(backup_opt_dict.backup_name))
|
||||
return dict()
|
||||
|
||||
|
||||
def add_host_name_ts_level(backup_opt_dict, time_stamp=int(time.time())):
|
||||
'''
|
||||
Create the object name as:
|
||||
hostname_backupname_timestamp_backup_level
|
||||
'''
|
||||
|
||||
if backup_opt_dict.backup_name is False:
|
||||
err = ('[*] Error: Please specify the backup name with '
|
||||
'--backup-name option')
|
||||
logging.exception(err)
|
||||
raise Exception(err)
|
||||
|
||||
backup_name = u'{0}_{1}_{2}_{3}'.format(
|
||||
backup_opt_dict.hostname,
|
||||
backup_opt_dict.backup_name,
|
||||
time_stamp, backup_opt_dict.curr_backup_level)
|
||||
|
||||
return backup_name
|
||||
|
||||
|
||||
def get_mount_from_path(path):
|
||||
"""
|
||||
Take a file system path as argument and return the mount point
|
||||
@ -610,6 +361,14 @@ def date_to_timestamp(date):
|
||||
return int(time.mktime(opt_backup_date.timetuple()))
|
||||
|
||||
|
||||
def segments_name(container):
|
||||
"""
|
||||
Create a new namespace attribute for container_segments
|
||||
:param container: name of swift container
|
||||
"""
|
||||
return u'{0}_segments'.format(container)
|
||||
|
||||
|
||||
class Bunch:
|
||||
def __init__(self, **kwds):
|
||||
self.__dict__.update(kwds)
|
||||
|
@ -98,10 +98,11 @@ def vss_delete_shadow_copy(shadow_id, windows_volume):
|
||||
', error {1}' .format(shadow_id, err))
|
||||
|
||||
try:
|
||||
os.rmdir(os.path.join(windows_volume, 'shadowcopy'))
|
||||
os.rmdir(os.path.join(windows_volume, 'freezer_shadowcopy'))
|
||||
except Exception:
|
||||
logging.error('Failed to delete shadow copy symlink {0}'.
|
||||
format(os.path.join(windows_volume, 'shadowcopy')))
|
||||
format(os.path.join(windows_volume,
|
||||
'freezer_shadowcopy')))
|
||||
|
||||
logging.info('[*] Deleting shadow copy {0}'.
|
||||
format(shadow_id))
|
||||
|
@ -13,6 +13,7 @@ import pymongo
|
||||
import re
|
||||
from collections import OrderedDict
|
||||
from glanceclient.common.utils import IterableWithLength
|
||||
from freezer.storages.swiftstorage import SwiftStorage
|
||||
from freezer.utils import OpenstackOptions
|
||||
|
||||
os.environ['OS_REGION_NAME'] = 'testregion'
|
||||
@ -606,7 +607,9 @@ class FakeSwiftClient:
|
||||
def __init__(self, key=True, os_options=True, auth_version=True, user=True, authurl=True, tenant_name=True, retries=True, insecure=True):
|
||||
self.num_try = 0
|
||||
|
||||
def put_object(self, opt1=True, opt2=True, opt3=True, opt4=True, opt5=True, headers=True, content_length=True, content_type=True):
|
||||
def put_object(self, container, obj, contents, content_length=None,
|
||||
etag=None, chunk_size=None, content_type=None,
|
||||
headers=None, query_string=None, response_dict=None):
|
||||
return True
|
||||
|
||||
def head_object(self, container_name='', object_name=''):
|
||||
@ -640,7 +643,12 @@ class FakeSwiftClient:
|
||||
return [{}, []]
|
||||
|
||||
def get_account(self, *args, **kwargs):
|
||||
return True, [{'name': 'test-container'}, {'name': 'test-container-segments'}]
|
||||
return True, [{'name': 'test-container',
|
||||
'bytes': 200000,
|
||||
'count': 1000},
|
||||
{'name': 'test-container-segments',
|
||||
'bytes': 300000,
|
||||
'count': 656}]
|
||||
|
||||
def get_object(self, *args, **kwargs):
|
||||
return [{'x-object-meta-length': "123",
|
||||
@ -720,6 +728,7 @@ class BackupOpt1:
|
||||
fakeclient = FakeSwiftClient()
|
||||
fakeconnector = fakeclient.client()
|
||||
fakeswclient = fakeconnector.Connection()
|
||||
self.dereference_symlink = 'none'
|
||||
self.mysql_conf = '/tmp/freezer-test-conf-file'
|
||||
self.backup_media = 'fs'
|
||||
self.mysql_db_inst = FakeMySQLdb()
|
||||
@ -741,7 +750,6 @@ class BackupOpt1:
|
||||
self.curr_backup_level = 0
|
||||
self.path_to_backup = '/tmp'
|
||||
self.tar_path = 'true'
|
||||
self.dereference_symlink = 'true'
|
||||
self.no_incremental = 'true'
|
||||
self.exclude = 'true'
|
||||
self.encrypt_pass_file = 'true'
|
||||
@ -751,9 +759,8 @@ class BackupOpt1:
|
||||
self.remove_older_than = '0'
|
||||
self.max_segment_size = '0'
|
||||
self.time_stamp = 123456789
|
||||
self.container_segments = 'test-container-segments'
|
||||
self.container = 'test-container'
|
||||
self.workdir = '/tmp'
|
||||
self.work_dir = '/tmp'
|
||||
self.upload = 'true'
|
||||
self.sw_connector = fakeswclient
|
||||
self.max_level = '20'
|
||||
@ -774,18 +781,6 @@ class BackupOpt1:
|
||||
'tar_metadata_test-hostname_test-backup-name_1234569_2',
|
||||
'tar_metadata_test-hostname_test-backup-name_1234568_1',
|
||||
'tar_metadata_test-hostname_test-backup-name_1234567_0']
|
||||
self.remote_obj_list = [
|
||||
{'name': 'test-hostname_test-backup-name_1234567_0',
|
||||
'last_modified': 'testdate'},
|
||||
{'name': 'test-hostname_test-backup-name_1234567_1',
|
||||
'last_modified': 'testdate'},
|
||||
{'name': 'test-hostname_test-backup-name_1234567_2',
|
||||
'last_modified': 'testdate'},
|
||||
{'name': 'tar_metadata_test-hostname_test-backup-name_1234567_2',
|
||||
'last_modified': 'testdate'},
|
||||
{'name': 'test-hostname-test-backup-name-asdfa-asdfasdf',
|
||||
'last_modified': 'testdate'}]
|
||||
self.remote_objects = []
|
||||
self.restore_abs_path = '/tmp'
|
||||
self.containers_list = [
|
||||
{'name' : 'testcontainer1', 'bytes' : 123423, 'count' : 10}
|
||||
@ -808,10 +803,11 @@ class BackupOpt1:
|
||||
self.options = OpenstackOptions.create_from_dict(os.environ)
|
||||
from freezer.osclients import ClientManager
|
||||
from mock import Mock
|
||||
self.client_manager = ClientManager(None, False, -1, -1, 2, False)
|
||||
self.client_manager = ClientManager(None, False, 2, False)
|
||||
self.client_manager.get_swift = Mock(
|
||||
return_value=FakeSwiftClient().client.Connection())
|
||||
self.client_manager.create_swift = self.client_manager.get_swift
|
||||
self.storage = SwiftStorage(self.client_manager, self.container)
|
||||
self.client_manager.get_glance = Mock(return_value=FakeGlanceClient())
|
||||
self.client_manager.get_cinder = Mock(return_value=FakeCinderClient())
|
||||
nova_client = MagicMock()
|
||||
@ -1030,18 +1026,7 @@ def fake_restore_fs_sort_obj(*args, **kwargs):
|
||||
class FakeSwift:
|
||||
|
||||
def __init__(self):
|
||||
return None
|
||||
|
||||
def fake_get_containers_list(self, backup_opt):
|
||||
return backup_opt
|
||||
|
||||
def fake_get_containers_list1(self, backup_opt):
|
||||
return backup_opt
|
||||
|
||||
def fake_get_containers_list2(self, backup_opt):
|
||||
backup_opt.list_containers = None
|
||||
backup_opt.list_objects = None
|
||||
return backup_opt
|
||||
pass
|
||||
|
||||
def fake_show_containers(self, backup_opt):
|
||||
return True
|
||||
@ -1055,13 +1040,6 @@ class FakeSwift:
|
||||
def fake_check_container_existance1(self, *args, **kwargs):
|
||||
return {'main_container': False, 'container_segments': False}
|
||||
|
||||
def fake_get_containers_list3(self, backup_opt):
|
||||
return backup_opt
|
||||
|
||||
def fake_get_containers_list4(self, backup_opt):
|
||||
backup_opt.containers_list = []
|
||||
return backup_opt
|
||||
|
||||
def fake_get_container_content(self, backup_opt):
|
||||
return backup_opt
|
||||
|
||||
|
@ -32,6 +32,7 @@ import hashlib
|
||||
import string
|
||||
import time
|
||||
from copy import copy
|
||||
from freezer.utils import segments_name
|
||||
|
||||
lib_path = os.path.abspath(os.path.join('..', '..'))
|
||||
sys.path.append(lib_path)
|
||||
@ -78,7 +79,6 @@ class BackupScenarioFS(unittest.TestCase):
|
||||
handle.close()
|
||||
self.tmp_files.append(file_path)
|
||||
|
||||
|
||||
def hashfile(self, filepath):
|
||||
"""
|
||||
Get GIT style sha1 hash for a file
|
||||
@ -90,7 +90,6 @@ class BackupScenarioFS(unittest.TestCase):
|
||||
hash_obj.update(handle.read())
|
||||
return hash_obj.hexdigest()
|
||||
|
||||
|
||||
def snap_tmp_tree_sha1(self, file_list):
|
||||
"""
|
||||
Record in a dictionary all files' absulute paths and SHA1
|
||||
@ -103,7 +102,6 @@ class BackupScenarioFS(unittest.TestCase):
|
||||
hash_dict[file_name] = self.hashfile(file_name)
|
||||
return hash_dict
|
||||
|
||||
|
||||
def damage_tmp_tree(self, tmp_files):
|
||||
"""
|
||||
Delete and modify random files from the tree file structure
|
||||
@ -129,7 +127,6 @@ class BackupScenarioFS(unittest.TestCase):
|
||||
f.close()
|
||||
self.tmp_modified.append(fn)
|
||||
|
||||
|
||||
def create_big_file(self, file_path, size):
|
||||
"""
|
||||
Create test text file with random data and
|
||||
@ -142,7 +139,6 @@ class BackupScenarioFS(unittest.TestCase):
|
||||
handle.write('%s' % ''.join(buf))
|
||||
handle.close()
|
||||
|
||||
|
||||
def setUp(self):
|
||||
self.tmp_files = []
|
||||
self.tmp_deleted = []
|
||||
@ -174,7 +170,6 @@ class BackupScenarioFS(unittest.TestCase):
|
||||
self.assertTrue(os.path.isfile(key))
|
||||
self.assertEqual(key + dict_1[key], key + dict_2[key])
|
||||
|
||||
|
||||
def test_no_lvm_level0(self):
|
||||
"""
|
||||
Maximum level filesystem backup
|
||||
@ -202,7 +197,7 @@ class BackupScenarioFS(unittest.TestCase):
|
||||
name_list = [item['name'] for item in ns_backup_args.containers_list]
|
||||
retry += 1
|
||||
self.assertTrue(ns_backup_args.container in name_list)
|
||||
self.assertTrue(ns_backup_args.container_segments in name_list)
|
||||
self.assertTrue(segments_name(ns_backup_args.container) in name_list)
|
||||
fdict_before = self.snap_tmp_tree_sha1(self.tmp_files)
|
||||
self.damage_tmp_tree(self.tmp_files)
|
||||
# Restore
|
||||
@ -221,7 +216,6 @@ class BackupScenarioFS(unittest.TestCase):
|
||||
self.assertTrue(os.path.isfile(key))
|
||||
self.assertEqual(key + fdict_before[key], key + fdict_after[key])
|
||||
|
||||
|
||||
def test_lvm_level0(self):
|
||||
"""
|
||||
LVM snapshot filesystem backup
|
||||
@ -263,7 +257,7 @@ class BackupScenarioFS(unittest.TestCase):
|
||||
name_list = [item['name'] for item in ns_backup_args.containers_list]
|
||||
retry += 1
|
||||
self.assertTrue(ns_backup_args.container in name_list)
|
||||
self.assertTrue(ns_backup_args.container_segments in name_list)
|
||||
self.assertTrue(segments_name(ns_backup_args.container) in name_list)
|
||||
# Create a file => SAH1 hash dictionary that will recored file
|
||||
# hashes before any files being modified or deleted
|
||||
fdict_before = self.snap_tmp_tree_sha1(self.tmp_files)
|
||||
@ -298,7 +292,6 @@ class BackupScenarioFS(unittest.TestCase):
|
||||
self.assertTrue(os.path.isfile(key))
|
||||
self.assertEqual(key + fdict_before[key], key + fdict_after[key])
|
||||
|
||||
|
||||
def test_bandwith_limit(self):
|
||||
"""
|
||||
Freezer upload/download speed limit test. We set a fixed 512KB/s speed and
|
||||
@ -364,7 +357,6 @@ class BackupScenarioFS(unittest.TestCase):
|
||||
# Test that time is longer than the theoretical 2 sec
|
||||
self.assertTrue(time_low < download_time)
|
||||
|
||||
|
||||
def test_lvm_incremental_level5(self):
|
||||
"""
|
||||
Incremental LVM snapshots filesystem backup
|
||||
@ -407,9 +399,11 @@ class BackupScenarioFS(unittest.TestCase):
|
||||
ns_backup_args = main.freezer_main(backup_args)
|
||||
self.damage_tmp_tree(self.tmp_files)
|
||||
# time.sleep(2)
|
||||
ns_backup_args = swift.get_container_content(ns_backup_args)
|
||||
# Filter only the container names from all other data
|
||||
name_list = [item['name'] for item in ns_backup_args.remote_obj_list]
|
||||
ns_backup_args = swift.get_container_content(
|
||||
ns_backup_args.client_manager,
|
||||
ns_backup_args.container)
|
||||
name_list = [item['name'] for item in ns_backup_args]
|
||||
for counter in range(0, max_level):
|
||||
found_objects = [obj for obj in name_list if obj.endswith('_%s' % counter)]
|
||||
objects_str = ' '.join(found_objects)
|
||||
|
@ -193,14 +193,12 @@ class TestBackUP:
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.volume_id = 34
|
||||
BackupOs(backup_opt.client_manager,
|
||||
backup_opt.container,
|
||||
backup_opt.container_segments).backup_cinder_by_glance(
|
||||
backup_opt.container).backup_cinder_by_glance(
|
||||
backup_opt, 1417649003)
|
||||
|
||||
def test_backup_cinder(self):
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.volume_id = 34
|
||||
BackupOs(backup_opt.client_manager,
|
||||
backup_opt.container,
|
||||
backup_opt.container_segments).backup_cinder(
|
||||
backup_opt.container).backup_cinder(
|
||||
backup_opt, 1417649003)
|
||||
|
@ -22,8 +22,7 @@ Hudson (tjh@cryptsoft.com).
|
||||
"""
|
||||
|
||||
from commons import *
|
||||
from freezer import (
|
||||
swift, restore, utils, backup)
|
||||
from freezer import (swift, restore, backup)
|
||||
|
||||
from freezer.job import Job, InfoJob, AdminJob, BackupJob, RestoreJob, create_job
|
||||
import logging
|
||||
@ -42,7 +41,6 @@ class TestJob:
|
||||
monkeypatch.setattr(logging, 'warning', fakelogging.warning)
|
||||
monkeypatch.setattr(logging, 'exception', fakelogging.exception)
|
||||
monkeypatch.setattr(logging, 'error', fakelogging.error)
|
||||
monkeypatch.setattr(swift, 'get_containers_list', fakeswift.fake_get_containers_list1)
|
||||
|
||||
def test_execute(self, monkeypatch):
|
||||
self.do_monkeypatch(monkeypatch)
|
||||
@ -74,24 +72,13 @@ class TestInfoJob(TestJob):
|
||||
job = InfoJob(backup_opt)
|
||||
assert job.execute() is True
|
||||
|
||||
def test_execute_container_not_exist(self, monkeypatch):
|
||||
self.do_monkeypatch(monkeypatch)
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.list_objects = True
|
||||
monkeypatch.setattr(swift, 'check_container_existance', self.fakeswift.fake_check_container_existance1)
|
||||
job = InfoJob(backup_opt)
|
||||
assert job.execute() is False
|
||||
|
||||
|
||||
class TestBackupJob(TestJob):
|
||||
|
||||
def test_execute_backup_fs_incremental(self, monkeypatch):
|
||||
self.do_monkeypatch(monkeypatch)
|
||||
monkeypatch.setattr(swift, 'check_container_existance', self.fakeswift.fake_check_container_existance1)
|
||||
monkeypatch.setattr(swift, 'get_containers_list', self.fakeswift.fake_get_containers_list4)
|
||||
monkeypatch.setattr(utils, 'set_backup_level', self.fakeutils.fake_set_backup_level)
|
||||
monkeypatch.setattr(swift, 'set_backup_level', self.fakeutils.fake_set_backup_level)
|
||||
monkeypatch.setattr(backup, 'backup_mode_fs', self.fakebackup.fake_backup_mode_fs)
|
||||
monkeypatch.setattr(swift, 'get_container_content', self.fakeswift.fake_get_container_content)
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.mode = 'fs'
|
||||
backup_opt.no_incremental = False
|
||||
@ -108,8 +95,7 @@ class TestBackupJob(TestJob):
|
||||
|
||||
def test_execute_backup_mongo(self, monkeypatch):
|
||||
self.do_monkeypatch(monkeypatch)
|
||||
monkeypatch.setattr(utils, 'set_backup_level', self.fakeutils.fake_set_backup_level)
|
||||
monkeypatch.setattr(swift, 'get_container_content', self.fakeswift.fake_get_container_content)
|
||||
monkeypatch.setattr(swift, 'set_backup_level', self.fakeutils.fake_set_backup_level)
|
||||
monkeypatch.setattr(backup, 'backup_mode_mongo', self.fakebackup.fake_backup_mode_mongo)
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.no_incremental = False
|
||||
@ -119,8 +105,7 @@ class TestBackupJob(TestJob):
|
||||
|
||||
def test_execute_backup_mysql(self, monkeypatch):
|
||||
self.do_monkeypatch(monkeypatch)
|
||||
monkeypatch.setattr(utils, 'set_backup_level', self.fakeutils.fake_set_backup_level)
|
||||
monkeypatch.setattr(swift, 'get_container_content', self.fakeswift.fake_get_container_content)
|
||||
monkeypatch.setattr(swift, 'set_backup_level', self.fakeutils.fake_set_backup_level)
|
||||
monkeypatch.setattr(backup, 'backup_mode_mysql', self.fakebackup.fake_backup_mode_mysql)
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.no_incremental = False
|
||||
@ -130,8 +115,7 @@ class TestBackupJob(TestJob):
|
||||
|
||||
def test_execute_raise(self, monkeypatch):
|
||||
self.do_monkeypatch(monkeypatch)
|
||||
monkeypatch.setattr(utils, 'set_backup_level', self.fakeutils.fake_set_backup_level)
|
||||
monkeypatch.setattr(swift, 'get_container_content', self.fakeswift.fake_get_container_content)
|
||||
monkeypatch.setattr(swift, 'set_backup_level', self.fakeutils.fake_set_backup_level)
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.no_incremental = False
|
||||
backup_opt.mode = None
|
||||
@ -141,23 +125,10 @@ class TestBackupJob(TestJob):
|
||||
|
||||
class TestRestoreJob(TestJob):
|
||||
|
||||
def test_execute_raise(self, monkeypatch):
|
||||
self.do_monkeypatch(monkeypatch)
|
||||
monkeypatch.setattr(swift, 'check_container_existance', self.fakeswift.fake_check_container_existance1)
|
||||
monkeypatch.setattr(swift, 'get_containers_list', self.fakeswift.fake_get_containers_list3)
|
||||
backup_opt = BackupOpt1()
|
||||
job = RestoreJob(backup_opt)
|
||||
#assert job.execute() is None
|
||||
pytest.raises(Exception, job.execute)
|
||||
|
||||
|
||||
def test_execute(self, monkeypatch):
|
||||
self.do_monkeypatch(monkeypatch)
|
||||
fakerestore = FakeRestore()
|
||||
monkeypatch.setattr(swift, 'check_container_existance', self.fakeswift.fake_check_container_existance)
|
||||
monkeypatch.setattr(swift, 'get_containers_list', self.fakeswift.fake_get_containers_list3)
|
||||
monkeypatch.setattr(restore, 'restore_fs', fakerestore.fake_restore_fs)
|
||||
monkeypatch.setattr(swift, 'get_container_content', self.fakeswift.fake_get_container_content)
|
||||
backup_opt = BackupOpt1()
|
||||
job = RestoreJob(backup_opt)
|
||||
assert job.execute() is None
|
||||
|
@ -8,16 +8,16 @@ class TestOsClients(unittest.TestCase):
|
||||
fake_options = OpenstackOptions("user", "tenant", "url", "password")
|
||||
|
||||
def test_init(self):
|
||||
ClientManager(self.fake_options, None, None, None, None, None)
|
||||
ClientManager(self.fake_options, None, None, None)
|
||||
|
||||
def test_create_cinder(self):
|
||||
client = ClientManager(self.fake_options, None, None, None, None, None)
|
||||
client = ClientManager(self.fake_options, None, None, None)
|
||||
client.create_cinder()
|
||||
|
||||
def test_create_swift(self):
|
||||
client = ClientManager(self.fake_options, None, None, None, None, None)
|
||||
client = ClientManager(self.fake_options, None, None, None)
|
||||
client.create_swift()
|
||||
|
||||
def test_create_nova(self):
|
||||
client = ClientManager(self.fake_options, None, None, None, None, None)
|
||||
client = ClientManager(self.fake_options, None, None, None)
|
||||
client.create_nova()
|
||||
|
@ -24,10 +24,10 @@ Hudson (tjh@cryptsoft.com).
|
||||
from commons import *
|
||||
from freezer.restore import (
|
||||
restore_fs, restore_fs_sort_obj, RestoreOs)
|
||||
from freezer import swift
|
||||
import freezer
|
||||
import logging
|
||||
import pytest
|
||||
import swiftclient
|
||||
|
||||
|
||||
class TestRestore:
|
||||
@ -43,11 +43,6 @@ class TestRestore:
|
||||
monkeypatch.setattr(logging, 'error', fakelogging.error)
|
||||
monkeypatch.setattr(
|
||||
freezer.restore, 'restore_fs_sort_obj', fake_restore_fs_sort_obj)
|
||||
|
||||
fakeclient = FakeSwiftClient()
|
||||
fakeconnector = fakeclient.client
|
||||
monkeypatch.setattr(swiftclient, 'client', fakeconnector)
|
||||
|
||||
assert restore_fs(backup_opt) is None
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
@ -58,14 +53,6 @@ class TestRestore:
|
||||
backup_opt.restore_from_date = None
|
||||
assert restore_fs(backup_opt) is None
|
||||
|
||||
monkeypatch.setattr(
|
||||
freezer.utils, 'get_match_backup', fake_get_match_backup)
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.remote_obj_list = [{'name': 'tsdfgsdfs',
|
||||
'last_modified': 'testdate'}]
|
||||
backup_opt.remote_match_backup = []
|
||||
pytest.raises(ValueError, restore_fs, backup_opt)
|
||||
|
||||
def test_restore_fs_sort_obj(self, monkeypatch):
|
||||
|
||||
fakelogging = FakeLogging()
|
||||
|
@ -22,12 +22,10 @@ Hudson (tjh@cryptsoft.com).
|
||||
"""
|
||||
|
||||
from commons import *
|
||||
from freezer.swift import (create_containers, show_containers,
|
||||
show_objects, remove_obj_older_than, get_container_content,
|
||||
check_container_existance,
|
||||
manifest_upload, add_object, get_containers_list,
|
||||
object_to_file, object_to_stream, _remove_object, remove_object)
|
||||
import os
|
||||
from freezer.storages.swiftstorage import SwiftStorage
|
||||
from freezer.swift import (
|
||||
show_containers, show_objects, remove_obj_older_than,
|
||||
get_container_content, object_to_stream, _remove_object, remove_object)
|
||||
import logging
|
||||
import pytest
|
||||
import time
|
||||
@ -35,18 +33,6 @@ import time
|
||||
|
||||
class TestSwift:
|
||||
|
||||
def test_create_containers(self, monkeypatch):
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
fakelogging = FakeLogging()
|
||||
|
||||
monkeypatch.setattr(logging, 'critical', fakelogging.critical)
|
||||
monkeypatch.setattr(logging, 'warning', fakelogging.warning)
|
||||
monkeypatch.setattr(logging, 'exception', fakelogging.exception)
|
||||
monkeypatch.setattr(logging, 'error', fakelogging.error)
|
||||
|
||||
create_containers(backup_opt)
|
||||
|
||||
def test_show_containers(self, monkeypatch):
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
@ -58,10 +44,7 @@ class TestSwift:
|
||||
monkeypatch.setattr(logging, 'error', fakelogging.error)
|
||||
|
||||
backup_opt.__dict__['list_containers'] = True
|
||||
assert show_containers(backup_opt) is True
|
||||
|
||||
backup_opt.__dict__['list_containers'] = False
|
||||
assert show_containers(backup_opt) is False
|
||||
show_containers(backup_opt.containers_list)
|
||||
|
||||
def test_show_objects(self, monkeypatch):
|
||||
|
||||
@ -76,9 +59,6 @@ class TestSwift:
|
||||
backup_opt.__dict__['list_objects'] = True
|
||||
assert show_objects(backup_opt) is True
|
||||
|
||||
backup_opt.__dict__['remote_obj_list'] = None
|
||||
pytest.raises(Exception, show_objects, backup_opt)
|
||||
|
||||
backup_opt.__dict__['list_objects'] = False
|
||||
assert show_objects(backup_opt) is False
|
||||
|
||||
@ -122,7 +102,6 @@ class TestSwift:
|
||||
|
||||
assert remove_object(fakeswclient, 'freezer_segments', 'has_segments') is None
|
||||
|
||||
|
||||
def test_remove_obj_older_than(self, monkeypatch):
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
@ -139,15 +118,6 @@ class TestSwift:
|
||||
monkeypatch.setattr(logging, 'error', fakelogging.error)
|
||||
monkeypatch.setattr(time, 'sleep', faketime.sleep)
|
||||
|
||||
backup_opt.__dict__['remove_older_than'] = None
|
||||
backup_opt.__dict__['remove_from_date'] = None
|
||||
pytest.raises(Exception, remove_obj_older_than, backup_opt)
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.__dict__['remove_older_than'] = 0
|
||||
backup_opt.__dict__['remove_from_date'] = '2014-12-03T23:23:23'
|
||||
pytest.raises(Exception, remove_obj_older_than, backup_opt)
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.__dict__['remove_older_than'] = None
|
||||
backup_opt.__dict__['remove_from_date'] = '2014-12-03T23:23:23'
|
||||
@ -172,33 +142,10 @@ class TestSwift:
|
||||
monkeypatch.setattr(logging, 'exception', fakelogging.exception)
|
||||
monkeypatch.setattr(logging, 'error', fakelogging.error)
|
||||
|
||||
assert get_container_content(backup_opt) is not False
|
||||
assert get_container_content(backup_opt) is not None
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.container = False
|
||||
pytest.raises(Exception, get_container_content, backup_opt)
|
||||
|
||||
def test_check_container_existance(self, monkeypatch):
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
fakelogging = FakeLogging()
|
||||
|
||||
monkeypatch.setattr(logging, 'critical', fakelogging.critical)
|
||||
monkeypatch.setattr(logging, 'warning', fakelogging.warning)
|
||||
monkeypatch.setattr(logging, 'exception', fakelogging.exception)
|
||||
monkeypatch.setattr(logging, 'error', fakelogging.error)
|
||||
|
||||
assert type(check_container_existance(backup_opt)) is dict
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.container_segments = None
|
||||
pytest.raises(Exception, check_container_existance, backup_opt)
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.container = 'test-abcd'
|
||||
backup_opt.container_segments = 'test-abcd-segments'
|
||||
assert type(check_container_existance(backup_opt)) is dict
|
||||
assert get_container_content(backup_opt.client_manager,
|
||||
backup_opt.container) is not False
|
||||
assert get_container_content(backup_opt.client_manager,
|
||||
backup_opt.container) is not None
|
||||
|
||||
def test_manifest_upload(self, monkeypatch):
|
||||
|
||||
@ -210,73 +157,17 @@ class TestSwift:
|
||||
monkeypatch.setattr(logging, 'exception', fakelogging.exception)
|
||||
monkeypatch.setattr(logging, 'error', fakelogging.error)
|
||||
|
||||
manifest_file = 'test-manifest-file'
|
||||
file_prefix = '000000'
|
||||
manifest_meta_dict = {'x-object-manifest': 'test-x-object'}
|
||||
storage = SwiftStorage(backup_opt.client_manager, backup_opt.container)
|
||||
|
||||
assert manifest_upload(
|
||||
manifest_file, backup_opt,
|
||||
file_prefix, manifest_meta_dict) is None
|
||||
assert storage.upload_manifest(file_prefix, manifest_meta_dict) is None
|
||||
|
||||
manifest_meta_dict = {}
|
||||
pytest.raises(
|
||||
Exception, manifest_upload, manifest_file, backup_opt,
|
||||
Exception, storage.upload_manifest,
|
||||
file_prefix, manifest_meta_dict)
|
||||
|
||||
def test_add_object(self, monkeypatch):
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
fakelogging = FakeLogging()
|
||||
|
||||
monkeypatch.setattr(logging, 'critical', fakelogging.critical)
|
||||
monkeypatch.setattr(logging, 'warning', fakelogging.warning)
|
||||
monkeypatch.setattr(logging, 'exception', fakelogging.exception)
|
||||
monkeypatch.setattr(logging, 'error', fakelogging.error)
|
||||
fakemultiprocessing = FakeMultiProcessing()
|
||||
backup_queue = fakemultiprocessing.Queue()
|
||||
|
||||
time_stamp = int(time.time())
|
||||
faketime = FakeTime()
|
||||
monkeypatch.setattr(time, 'sleep', faketime.sleep)
|
||||
absolute_file_path = '/tmp/test-abs-file-path'
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.container = None
|
||||
|
||||
pytest.raises(SystemExit, add_object, backup_opt, backup_queue,
|
||||
absolute_file_path, time_stamp)
|
||||
|
||||
def test_get_containers_list(self, monkeypatch):
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
fakelogging = FakeLogging()
|
||||
|
||||
monkeypatch.setattr(logging, 'critical', fakelogging.critical)
|
||||
monkeypatch.setattr(logging, 'warning', fakelogging.warning)
|
||||
monkeypatch.setattr(logging, 'exception', fakelogging.exception)
|
||||
monkeypatch.setattr(logging, 'error', fakelogging.error)
|
||||
|
||||
assert isinstance(get_containers_list(backup_opt), BackupOpt1) is True
|
||||
|
||||
def test_object_to_file(self, monkeypatch):
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
fakelogging = FakeLogging()
|
||||
|
||||
monkeypatch.setattr(logging, 'critical', fakelogging.critical)
|
||||
monkeypatch.setattr(logging, 'warning', fakelogging.warning)
|
||||
monkeypatch.setattr(logging, 'exception', fakelogging.exception)
|
||||
monkeypatch.setattr(logging, 'error', fakelogging.error)
|
||||
|
||||
file_name_abs_path = '/tmp/test-abs-file-path'
|
||||
object_to_file(backup_opt, file_name_abs_path)
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.container = None
|
||||
pytest.raises(Exception, object_to_file, backup_opt, file_name_abs_path)
|
||||
|
||||
os.unlink(file_name_abs_path)
|
||||
|
||||
def test_object_to_stream(self, monkeypatch):
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
@ -291,10 +182,6 @@ class TestSwift:
|
||||
fakemultiprocessing = FakeMultiProcessing1()
|
||||
backup_pipe_read = backup_pipe_write = fakemultiprocessing.Pipe()
|
||||
|
||||
backup_opt.container = None
|
||||
pytest.raises(Exception, object_to_stream,
|
||||
backup_opt, backup_pipe_write, backup_pipe_read, obj_name)
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
assert object_to_stream(
|
||||
backup_opt, backup_pipe_write, backup_pipe_read, obj_name) is None
|
||||
backup_opt.container, backup_opt.client_manager,
|
||||
backup_pipe_write, backup_pipe_read, obj_name) is None
|
||||
|
@ -22,15 +22,13 @@ Hudson (tjh@cryptsoft.com).
|
||||
"""
|
||||
|
||||
from commons import *
|
||||
from freezer.tar import (tar_restore, tar_incremental, tar_backup,
|
||||
gen_tar_command, tar_restore_args_valid)
|
||||
from freezer.tar import (tar_restore, tar_backup, tar_restore_args_valid)
|
||||
from freezer import winutils
|
||||
|
||||
import os
|
||||
import logging
|
||||
import subprocess
|
||||
import pytest
|
||||
import time
|
||||
|
||||
|
||||
class TestTar:
|
||||
@ -75,79 +73,6 @@ class TestTar:
|
||||
monkeypatch.setattr(os, 'chdir', fake_os.chdir2)
|
||||
pytest.raises(Exception, tar_restore(backup_opt, fakepipe))
|
||||
|
||||
def test_tar_incremental(self, monkeypatch):
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
fakelogging = FakeLogging()
|
||||
(tar_cmd, curr_tar_meta,
|
||||
remote_manifest_meta) = True, True, {}
|
||||
(val1, val2, val3) = tar_incremental(
|
||||
tar_cmd, backup_opt, curr_tar_meta,
|
||||
remote_manifest_meta)
|
||||
assert val1 is not False
|
||||
assert val2 is not False
|
||||
assert val3 is not False
|
||||
|
||||
(tar_cmd, curr_tar_meta,
|
||||
remote_manifest_meta) = False, True, {}
|
||||
pytest.raises(Exception, tar_incremental, tar_cmd, backup_opt, curr_tar_meta, remote_manifest_meta)
|
||||
|
||||
def test_gen_tar_command(self, monkeypatch):
|
||||
expanduser = Os()
|
||||
backup_opt = BackupOpt1()
|
||||
fakelogging = FakeLogging()
|
||||
(meta_data_backup_file, remote_manifest_meta) = True, {}
|
||||
time_stamp = int(time.time())
|
||||
monkeypatch.setattr(logging, 'critical', fakelogging.critical)
|
||||
monkeypatch.setattr(logging, 'warning', fakelogging.warning)
|
||||
monkeypatch.setattr(logging, 'exception', fakelogging.exception)
|
||||
monkeypatch.setattr(logging, 'error', fakelogging.error)
|
||||
|
||||
(val1, val2, val3) = gen_tar_command(backup_opt, meta_data_backup_file, time_stamp,
|
||||
remote_manifest_meta)
|
||||
assert val1 is not False
|
||||
assert val2 is not False
|
||||
assert val3 is not False
|
||||
|
||||
backup_opt.__dict__['dereference_symlink'] = 'soft'
|
||||
(val1, val2, val3) = gen_tar_command(backup_opt, meta_data_backup_file, time_stamp,
|
||||
remote_manifest_meta)
|
||||
assert val1 is not False
|
||||
assert val2 is not False
|
||||
assert val3 is not False
|
||||
|
||||
backup_opt.__dict__['dereference_symlink'] = 'hard'
|
||||
(val1, val2, val3) = gen_tar_command(backup_opt, meta_data_backup_file, time_stamp,
|
||||
remote_manifest_meta)
|
||||
assert val1 is not False
|
||||
assert val2 is not False
|
||||
assert val3 is not False
|
||||
|
||||
backup_opt.__dict__['dereference_symlink'] = 'all'
|
||||
(val1, val2, val3) = gen_tar_command(backup_opt, meta_data_backup_file, time_stamp,
|
||||
remote_manifest_meta)
|
||||
assert val1 is not False
|
||||
assert val2 is not False
|
||||
assert val3 is not False
|
||||
|
||||
monkeypatch.setattr(os.path, 'exists', expanduser.notexists)
|
||||
|
||||
with pytest.raises(Exception) as excinfo :
|
||||
gen_tar_command(backup_opt, meta_data_backup_file,
|
||||
time_stamp, remote_manifest_meta)
|
||||
assert excinfo.value.message == 'Error: path-to-backup does not exist'
|
||||
|
||||
monkeypatch.setattr(os.path, 'exists', expanduser.exists)
|
||||
|
||||
backup_opt.__dict__['path_to_backup'] = ''
|
||||
with pytest.raises(Exception) as excinfo :
|
||||
gen_tar_command(backup_opt, meta_data_backup_file,
|
||||
time_stamp, remote_manifest_meta)
|
||||
assert excinfo.value.message == ('Error: Please ALL the '
|
||||
'following options: '
|
||||
'--path-to-backup, --backup-name')
|
||||
|
||||
|
||||
def test_tar_backup(self, monkeypatch):
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
|
@ -2,17 +2,16 @@
|
||||
|
||||
from freezer.utils import (
|
||||
gen_manifest_meta, validate_all_args,
|
||||
sort_backup_list, create_dir, get_match_backup,
|
||||
get_newest_backup, get_rel_oldest_backup,
|
||||
eval_restart_backup, set_backup_level,
|
||||
get_vol_fs_type, check_backup_and_tar_meta_existence,
|
||||
add_host_name_ts_level, get_mount_from_path, human2bytes, DateTime,
|
||||
date_to_timestamp)
|
||||
sort_backup_list, create_dir, get_vol_fs_type,
|
||||
get_mount_from_path, human2bytes, DateTime, date_to_timestamp)
|
||||
|
||||
from freezer import utils
|
||||
from freezer.swift import (get_match_backup,
|
||||
get_newest_backup,get_rel_oldest_backup,
|
||||
eval_restart_backup, set_backup_level,
|
||||
check_backup_and_tar_meta_existence)
|
||||
from freezer import swift
|
||||
import pytest
|
||||
import datetime
|
||||
import time
|
||||
from commons import *
|
||||
|
||||
|
||||
@ -52,7 +51,7 @@ class TestUtils:
|
||||
|
||||
def test_sort_backup_list(self):
|
||||
|
||||
sorted_backups = sort_backup_list(BackupOpt1())
|
||||
sorted_backups = sort_backup_list(BackupOpt1().remote_match_backup)
|
||||
|
||||
sort_params = map(
|
||||
lambda x: map(lambda y: int(y), x.rsplit('_', 2)[-2:]),
|
||||
@ -85,37 +84,33 @@ class TestUtils:
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
|
||||
backup_opt = get_match_backup(backup_opt)
|
||||
assert len(backup_opt.remote_match_backup) > 0
|
||||
assert len(get_match_backup(backup_opt.backup_name,
|
||||
backup_opt.hostname,
|
||||
[{'name': 'test-hostname_test-backup-name_1234567_0'}])) > 0
|
||||
|
||||
backup_opt.__dict__['backup_name'] = ''
|
||||
pytest.raises(Exception, get_match_backup, backup_opt)
|
||||
def test_get_newest_backup(self):
|
||||
|
||||
def test_get_newest_backup(self, monkeypatch):
|
||||
# no backups
|
||||
pytest.raises(Exception, get_newest_backup, "hostname", "backup", [])
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt = get_newest_backup(backup_opt)
|
||||
assert len(backup_opt.remote_newest_backup) > 0
|
||||
# different hostname
|
||||
pytest.raises(Exception, get_newest_backup, "hostname", "backup",
|
||||
["notthesamename_backup_1234_12",
|
||||
"tar_metadata_hostname_backup_1234_2"])
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.__dict__['remote_match_backup'] = ''
|
||||
backup_opt = get_newest_backup(backup_opt)
|
||||
assert backup_opt.remote_match_backup is not True
|
||||
# no tar file
|
||||
pytest.raises(Exception, get_newest_backup, "hostname", "backup",
|
||||
["hostname_backup_1234_2"])
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
fakere2 = FakeRe2()
|
||||
monkeypatch.setattr(re, 'search', fakere2.search)
|
||||
backup_opt = get_newest_backup(backup_opt)
|
||||
assert backup_opt.remote_match_backup is not True
|
||||
assert get_newest_backup("hostname", "backup",
|
||||
["hostname_backup_1234_2", "tar_metadata_hostname_backup_1234_2"]) == \
|
||||
"hostname_backup_1234_2"
|
||||
|
||||
def test_get_rel_oldest_backup(self):
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt = get_rel_oldest_backup(backup_opt)
|
||||
assert len(backup_opt.remote_rel_oldest) > 0
|
||||
|
||||
backup_opt.__dict__['backup_name'] = ''
|
||||
pytest.raises(Exception, get_rel_oldest_backup, backup_opt)
|
||||
remote_rel_oldest = get_rel_oldest_backup("test-hostname",
|
||||
"test-backup-name",
|
||||
[{"name": "test-hostname_test-backup-name_1234569_0"}])
|
||||
assert len(remote_rel_oldest) > 0
|
||||
|
||||
def test_eval_restart_backup(self, monkeypatch):
|
||||
|
||||
@ -126,13 +121,9 @@ class TestUtils:
|
||||
assert eval_restart_backup(backup_opt) is False
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
fake_get_rel_oldest_backup = Fakeget_rel_oldest_backup()
|
||||
monkeypatch.setattr(utils, 'get_rel_oldest_backup', fake_get_rel_oldest_backup)
|
||||
assert eval_restart_backup(backup_opt) is False
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
fake_get_rel_oldest_backup2 = Fakeget_rel_oldest_backup2()
|
||||
monkeypatch.setattr(utils, 'get_rel_oldest_backup', fake_get_rel_oldest_backup2)
|
||||
fakere2 = FakeRe2()
|
||||
monkeypatch.setattr(re, 'search', fakere2.search)
|
||||
assert eval_restart_backup(backup_opt) is not None
|
||||
@ -204,25 +195,14 @@ class TestUtils:
|
||||
backup_opt.__dict__['backup_name'] = None
|
||||
assert type(check_backup_and_tar_meta_existence(backup_opt)) is dict
|
||||
|
||||
fakeswiftclient = FakeSwiftClient()
|
||||
backup_opt = BackupOpt1()
|
||||
assert type(check_backup_and_tar_meta_existence(backup_opt)) is dict
|
||||
|
||||
fake_get_newest_backup = Fakeget_newest_backup()
|
||||
monkeypatch.setattr(utils, 'get_newest_backup', fake_get_newest_backup)
|
||||
monkeypatch.setattr(swift, 'get_newest_backup', fake_get_newest_backup)
|
||||
assert type(check_backup_and_tar_meta_existence(backup_opt)) is dict
|
||||
|
||||
def test_add_host_name_ts_level(self):
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.__dict__['backup_name'] = False
|
||||
pytest.raises(Exception, add_host_name_ts_level, backup_opt)
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
assert type(add_host_name_ts_level(backup_opt)) is unicode
|
||||
|
||||
def test_get_mount_from_path(self):
|
||||
|
||||
dir1 = '/tmp'
|
||||
dir2 = '/tmp/nonexistentpathasdf'
|
||||
assert type(get_mount_from_path(dir1)) is str
|
||||
@ -238,7 +218,7 @@ class TestUtils:
|
||||
assert human2bytes('1 k') == 1024
|
||||
pytest.raises(ValueError, human2bytes, '12 foo')
|
||||
|
||||
def test_OpenstackOptions_creation_success(self, monkeypatch):
|
||||
def test_OpenstackOptions_creation_success(self):
|
||||
env_dict = dict(OS_USERNAME='testusername', OS_TENANT_NAME='testtenantename', OS_AUTH_URL='testauthurl',
|
||||
OS_PASSWORD='testpassword', OS_REGION_NAME='testregion', OS_TENANT_ID='0123456789')
|
||||
options = OpenstackOptions.create_from_dict(env_dict)
|
||||
@ -259,7 +239,7 @@ class TestUtils:
|
||||
assert options.region_name is None
|
||||
assert options.tenant_id is None
|
||||
|
||||
def test_OpenstackOption_creation_error_for_missing_parameter(self, monkeypatch):
|
||||
def test_OpenstackOption_creation_error_for_missing_parameter(self):
|
||||
env_dict = dict(OS_TENANT_NAME='testtenantename', OS_AUTH_URL='testauthurl', OS_PASSWORD='testpassword')
|
||||
pytest.raises(Exception, OpenstackOptions.create_from_dict, env_dict)
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user