Cinder Volumes Backup
Implements blueprint: cinder-backup Change-Id: I34ba17da9de978ba2f6ca2c1d8a5417c8e355749
This commit is contained in:
parent
3f86090bac
commit
5f4d2974e3
238
README.rst
238
README.rst
@ -196,6 +196,9 @@ Execute a MySQL backup using lvm snapshot::
|
||||
--mysql-conf /root/.freezer/freezer-mysql.conf--container
|
||||
freezer_mysql-backup-prod --mode mysql --backup-name mysql-ops002
|
||||
|
||||
Execute a cinder backup::
|
||||
$ freezerc --volume-id 3ad7a62f-217a-48cd-a861-43ec0a04a78b
|
||||
|
||||
All the freezerc activities are logged into /var/log/freezer.log.
|
||||
|
||||
Restore
|
||||
@ -254,6 +257,9 @@ Remove backups older then 1 day::
|
||||
|
||||
$ freezerc --action admin --container freezer_dev-test --remove-older-then 1 --backup-name dev-test-01
|
||||
|
||||
Execute a cinder restore::
|
||||
$ freezerc --action restore --volume-id 3ad7a62f-217a-48cd-a861-43ec0a04a78b
|
||||
|
||||
Architecture
|
||||
============
|
||||
|
||||
@ -419,30 +425,35 @@ Available options::
|
||||
[--lvm-volgroup LVM_VOLGROUP] [--max-level MAX_BACKUP_LEVEL]
|
||||
[--always-level ALWAYS_BACKUP_LEVEL]
|
||||
[--restart-always-level RESTART_ALWAYS_BACKUP]
|
||||
[-R REMOVE_OLDER_THAN] [--no-incremental]
|
||||
[--hostname HOSTNAME] [--mysql-conf MYSQL_CONF_FILE]
|
||||
[--log-file LOG_FILE] [--exclude EXCLUDE]
|
||||
[-R REMOVE_OLDER_THAN] [--remove-from-date REMOVE_FROM_DATE]
|
||||
[--no-incremental] [--hostname HOSTNAME]
|
||||
[--mysql-conf MYSQL_CONF_FILE] [--log-file LOG_FILE]
|
||||
[--exclude EXCLUDE]
|
||||
[--dereference-symlink {none,soft,hard,all}] [-U]
|
||||
[--encrypt-pass-file ENCRYPT_PASS_FILE] [-M MAX_SEG_SIZE]
|
||||
[--restore-abs-path RESTORE_ABS_PATH]
|
||||
[--restore-from-host RESTORE_FROM_HOST]
|
||||
[--restore-from-date RESTORE_FROM_DATE] [--max-priority] [-V]
|
||||
[-q] [--insecure] [--os-auth-ver {1,2,3}] [--proxy PROXY]
|
||||
[--dry-run] [--upload-limit UPLOAD_LIMIT]
|
||||
[--volume-id VOLUME_ID] [--download-limit DOWNLOAD_LIMIT]
|
||||
[--sql-server-conf SQL_SERVER_CONFIG] [--volume VOLUME]
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
--action {backup,restore,info,admin}
|
||||
Set the action to be taken. backup and restore are
|
||||
self explanatory, info is used to retrieve info from
|
||||
the storage media, while maintenance is used to delete
|
||||
old backups and other admin actions. Default backup.
|
||||
the storage media, while admin is used to delete old
|
||||
backups and other admin actions. Default backup.
|
||||
-F SRC_FILE, --path-to-backup SRC_FILE, --file-to-backup SRC_FILE
|
||||
The file or directory you want to back up to Swift
|
||||
-N BACKUP_NAME, --backup-name BACKUP_NAME
|
||||
The backup name you want to use to identify your
|
||||
backup on Swift
|
||||
-m MODE, --mode MODE Set the technology to back from. Options are, fs
|
||||
(filesystem), mongo (MongoDB), mysql (MySQL) sqlserver (SQL Server).
|
||||
Default set to fs
|
||||
(filesystem), mongo (MongoDB), mysql (MySQL),
|
||||
sqlserver (SQL Server) Default set to fs
|
||||
-C CONTAINER, --container CONTAINER
|
||||
The Swift container used to upload files to
|
||||
-L, --list-containers
|
||||
@ -481,7 +492,7 @@ Available options::
|
||||
Set the backup level used with tar to implement
|
||||
incremental backup. If a level 1 is specified but no
|
||||
level 0 is already available, a level 0 will be done
|
||||
and subesequently backs to level 1. Default 0 (No
|
||||
and subsequently backs to level 1. Default 0 (No
|
||||
Incremental)
|
||||
--always-level ALWAYS_BACKUP_LEVEL
|
||||
Set backup maximum level used with tar to implement
|
||||
@ -497,12 +508,18 @@ Available options::
|
||||
level is used together with --remove-older-then, there
|
||||
might be the chance where the initial level 0 will be
|
||||
removed Default False (Disabled)
|
||||
-R REMOVE_OLDER_THAN, --remove-older-then REMOVE_OLDER_THAN
|
||||
-R REMOVE_OLDER_THAN, --remove-older-then REMOVE_OLDER_THAN, --remove-older-than REMOVE_OLDER_THAN
|
||||
Checks in the specified container for object older
|
||||
then the specified days. If i.e. 30 is specified, it
|
||||
than the specified days.If i.e. 30 is specified, it
|
||||
will remove the remote object older than 30 days.
|
||||
Default False (Disabled)
|
||||
--no-incremental Disable incremantal feature. By default freezer build
|
||||
Default False (Disabled) The option --remove-older-
|
||||
then is deprecated and will be removed soon
|
||||
--remove-from-date REMOVE_FROM_DATE
|
||||
Checks the specified container and removes objects
|
||||
older than the provided datetime in the form "YYYY-MM-
|
||||
DDThh:mm:ss i.e. "1974-03-25T23:23:23". Make sure the
|
||||
"T" is between date and time
|
||||
--no-incremental Disable incremental feature. By default freezer build
|
||||
the meta data even for level 0 backup. By setting this
|
||||
option incremental meta data is not created at all.
|
||||
Default disabled
|
||||
@ -514,10 +531,13 @@ Available options::
|
||||
--mysql-conf MYSQL_CONF_FILE
|
||||
Set the MySQL configuration file where freezer
|
||||
retrieve important information as db_name, user,
|
||||
password, host. Following is an example of config
|
||||
file: # cat ~/.freezer/backup_mysql_conf host = <db-
|
||||
host> user = <mysqluser> password = <mysqlpass>
|
||||
--log-file LOG_FILE Set log file. By default logs to ~/freezer.log
|
||||
password, host, port. Following is an example of
|
||||
config file: # cat ~/.freezer/backup_mysql_conf host =
|
||||
<db-host> user = <mysqluser> password = <mysqlpass>
|
||||
port = <db-port>
|
||||
--log-file LOG_FILE Set log file. By default logs to
|
||||
/var/log/freezer.logIf that file is not writable,
|
||||
freezer tries to logto ~/.freezer/freezer.log
|
||||
--exclude EXCLUDE Exclude files, given as a PATTERN.Ex: --exclude
|
||||
'*.log' will exclude any file with name ending with
|
||||
.log. Default no exclude
|
||||
@ -545,16 +565,188 @@ Available options::
|
||||
Restore Default False.
|
||||
--restore-from-date RESTORE_FROM_DATE
|
||||
Set the absolute path where you want your data
|
||||
restored. Please provide datime in forma "YYYY-MM-
|
||||
restored. Please provide datetime in format "YYYY-MM-
|
||||
DDThh:mm:ss" i.e. "1979-10-03T23:23:23". Make sure the
|
||||
"T" is between date and time Default False.
|
||||
--max-priority Set the cpu process to the highest priority (i.e. -20
|
||||
on Linux) and real-time for I/O. The process priority
|
||||
will be set only if nice and ionice are installed
|
||||
Default disabled. Use with caution.
|
||||
-V, --version Print the release version and exit.
|
||||
--volume Create a snapshot of the selected volume
|
||||
--sql-server-conf Set the SQL Server configuration file where freezer retrieve
|
||||
the sql server instance.
|
||||
Following is an example of config file:
|
||||
instance = <db-instance>
|
||||
-V, --version Print the release version and exit
|
||||
-q, --quiet Suppress error messages
|
||||
--insecure Allow to access swift servers without checking SSL
|
||||
certs.
|
||||
--os-auth-ver {1,2,3}
|
||||
Swift auth version, could be 1, 2 or 3
|
||||
--proxy PROXY Enforce proxy that alters system HTTP_PROXY and
|
||||
HTTPS_PROXY, use '' to eliminate all system proxies
|
||||
--dry-run Do everything except writing or removing objects
|
||||
--upload-limit UPLOAD_LIMIT
|
||||
Upload bandwidth limit in Bytes per sec. Can be
|
||||
invoked with dimensions (10K, 120M, 10G).
|
||||
--volume-id VOLUME_ID
|
||||
Id of cinder volume for backup
|
||||
--download-limit DOWNLOAD_LIMIT
|
||||
Download bandwidth limit in Bytes per sec. Can be
|
||||
invoked with dimensions (10K, 120M, 10G).
|
||||
--sql-server-conf SQL_SERVER_CONFIG
|
||||
Set the SQL Server configuration file where freezer
|
||||
retrieve the sql server instance. Following is an
|
||||
example of config file: instance = <db-instance>
|
||||
--volume VOLUME Create a snapshot of the selected volume
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
--action {backup,restore,info,admin}
|
||||
Set the action to be taken. backup and restore are
|
||||
self explanatory, info is used to retrieve info from
|
||||
the storage media, while admin is used to delete old
|
||||
backups and other admin actions. Default backup.
|
||||
-F SRC_FILE, --path-to-backup SRC_FILE, --file-to-backup SRC_FILE
|
||||
The file or directory you want to back up to Swift
|
||||
-N BACKUP_NAME, --backup-name BACKUP_NAME
|
||||
The backup name you want to use to identify your
|
||||
backup on Swift
|
||||
-m MODE, --mode MODE Set the technology to back from. Options are, fs
|
||||
(filesystem), mongo (MongoDB), mysql (MySQL),
|
||||
sqlserver (SQL Server) Default set to fs
|
||||
-C CONTAINER, --container CONTAINER
|
||||
The Swift container used to upload files to
|
||||
-L, --list-containers
|
||||
List the Swift containers on remote Object Storage
|
||||
Server
|
||||
-l, --list-objects List the Swift objects stored in a container on remote
|
||||
Object Storage Server.
|
||||
-o OBJECT, --get-object OBJECT
|
||||
The Object name you want to download on the local file
|
||||
system.
|
||||
-d DST_FILE, --dst-file DST_FILE
|
||||
The file name used to save the object on your local
|
||||
disk and upload file in swift
|
||||
--lvm-auto-snap LVM_AUTO_SNAP
|
||||
Automatically guess the volume group and volume name
|
||||
for given PATH.
|
||||
--lvm-srcvol LVM_SRCVOL
|
||||
Set the lvm volume you want to take a snaphost from.
|
||||
Default no volume
|
||||
--lvm-snapname LVM_SNAPNAME
|
||||
Set the lvm snapshot name to use. If the snapshot name
|
||||
already exists, the old one will be used a no new one
|
||||
will be created. Default freezer_backup_snap.
|
||||
--lvm-snapsize LVM_SNAPSIZE
|
||||
Set the lvm snapshot size when creating a new
|
||||
snapshot. Please add G for Gigabytes or M for
|
||||
Megabytes, i.e. 500M or 8G. Default 5G.
|
||||
--lvm-dirmount LVM_DIRMOUNT
|
||||
Set the directory you want to mount the lvm snapshot
|
||||
to. Default not set
|
||||
--lvm-volgroup LVM_VOLGROUP
|
||||
Specify the volume group of your logical volume. This
|
||||
is important to mount your snapshot volume. Default
|
||||
not set
|
||||
--max-level MAX_BACKUP_LEVEL
|
||||
Set the backup level used with tar to implement
|
||||
incremental backup. If a level 1 is specified but no
|
||||
level 0 is already available, a level 0 will be done
|
||||
and subsequently backs to level 1. Default 0 (No
|
||||
Incremental)
|
||||
--always-level ALWAYS_BACKUP_LEVEL
|
||||
Set backup maximum level used with tar to implement
|
||||
incremental backup. If a level 3 is specified, the
|
||||
backup will be executed from level 0 to level 3 and to
|
||||
that point always a backup level 3 will be executed.
|
||||
It will not restart from level 0. This option has
|
||||
precedence over --max-backup-level. Default False
|
||||
(Disabled)
|
||||
--restart-always-level RESTART_ALWAYS_BACKUP
|
||||
Restart the backup from level 0 after n days. Valid
|
||||
only if --always-level option if set. If --always-
|
||||
level is used together with --remove-older-then, there
|
||||
might be the chance where the initial level 0 will be
|
||||
removed Default False (Disabled)
|
||||
-R REMOVE_OLDER_THAN, --remove-older-then REMOVE_OLDER_THAN, --remove-older-than REMOVE_OLDER_THAN
|
||||
Checks in the specified container for object older
|
||||
than the specified days.If i.e. 30 is specified, it
|
||||
will remove the remote object older than 30 days.
|
||||
Default False (Disabled) The option --remove-older-
|
||||
then is deprecated and will be removed soon
|
||||
--remove-from-date REMOVE_FROM_DATE
|
||||
Checks the specified container and removes objects
|
||||
older than the provided datetime in the form "YYYY-MM-
|
||||
DDThh:mm:ss i.e. "1974-03-25T23:23:23". Make sure the
|
||||
"T" is between date and time
|
||||
--no-incremental Disable incremental feature. By default freezer build
|
||||
the meta data even for level 0 backup. By setting this
|
||||
option incremental meta data is not created at all.
|
||||
Default disabled
|
||||
--hostname HOSTNAME Set hostname to execute actions. If you are executing
|
||||
freezer from one host but you want to delete objects
|
||||
belonging to another host then you can set this option
|
||||
that hostname and execute appropriate actions. Default
|
||||
current node hostname.
|
||||
--mysql-conf MYSQL_CONF_FILE
|
||||
Set the MySQL configuration file where freezer
|
||||
retrieve important information as db_name, user,
|
||||
password, host, port. Following is an example of
|
||||
config file: # cat ~/.freezer/backup_mysql_conf host =
|
||||
<db-host> user = <mysqluser> password = <mysqlpass>
|
||||
port = <db-port>
|
||||
--log-file LOG_FILE Set log file. By default logs to
|
||||
/var/log/freezer.logIf that file is not writable,
|
||||
freezer tries to logto ~/.freezer/freezer.log
|
||||
--exclude EXCLUDE Exclude files, given as a PATTERN.Ex: --exclude
|
||||
'*.log' will exclude any file with name ending with
|
||||
.log. Default no exclude
|
||||
--dereference-symlink {none,soft,hard,all}
|
||||
Follow hard and soft links and archive and dump the
|
||||
files they refer to. Default False.
|
||||
-U, --upload Upload to Swift the destination file passed to the -d
|
||||
option. Default upload the data
|
||||
--encrypt-pass-file ENCRYPT_PASS_FILE
|
||||
Passing a private key to this option, allow you to
|
||||
encrypt the files before to be uploaded in Swift.
|
||||
Default do not encrypt.
|
||||
-M MAX_SEG_SIZE, --max-segment-size MAX_SEG_SIZE
|
||||
Set the maximum file chunk size in bytes to upload to
|
||||
swift Default 67108864 bytes (64MB)
|
||||
--restore-abs-path RESTORE_ABS_PATH
|
||||
Set the absolute path where you want your data
|
||||
restored. Default False.
|
||||
--restore-from-host RESTORE_FROM_HOST
|
||||
Set the hostname used to identify the data you want to
|
||||
restore from. If you want to restore data in the same
|
||||
host where the backup was executed just type from your
|
||||
shell: "$ hostname" and the output is the value that
|
||||
needs to be passed to this option. Mandatory with
|
||||
Restore Default False.
|
||||
--restore-from-date RESTORE_FROM_DATE
|
||||
Set the absolute path where you want your data
|
||||
restored. Please provide datetime in format "YYYY-MM-
|
||||
DDThh:mm:ss" i.e. "1979-10-03T23:23:23". Make sure the
|
||||
"T" is between date and time Default False.
|
||||
--max-priority Set the cpu process to the highest priority (i.e. -20
|
||||
on Linux) and real-time for I/O. The process priority
|
||||
will be set only if nice and ionice are installed
|
||||
Default disabled. Use with caution.
|
||||
-V, --version Print the release version and exit
|
||||
-q, --quiet Suppress error messages
|
||||
--insecure Allow to access swift servers without checking SSL
|
||||
certs.
|
||||
--os-auth-ver {1,2,3}
|
||||
Swift auth version, could be 1, 2 or 3
|
||||
--proxy PROXY Enforce proxy that alters system HTTP_PROXY and
|
||||
HTTPS_PROXY, use '' to eliminate all system proxies
|
||||
--dry-run Do everything except writing or removing objects
|
||||
--upload-limit UPLOAD_LIMIT
|
||||
Upload bandwidth limit in Bytes per sec. Can be
|
||||
invoked with dimensions (10K, 120M, 10G).
|
||||
--volume-id VOLUME_ID
|
||||
Id of cinder volume for backup
|
||||
--download-limit DOWNLOAD_LIMIT
|
||||
Download bandwidth limit in Bytes per sec. Can be
|
||||
invoked with dimensions (10K, 120M, 10G).
|
||||
--sql-server-conf SQL_SERVER_CONFIG
|
||||
Set the SQL Server configuration file where freezer
|
||||
retrieve the sql server instance. Following is an
|
||||
example of config file: instance = <db-instance>
|
||||
--volume VOLUME Create a snapshot of the selected volume
|
||||
|
@ -29,6 +29,7 @@ import distutils.spawn as distspawn
|
||||
import utils
|
||||
import socket
|
||||
|
||||
from freezer.utils import OpenstackOptions
|
||||
from freezer.winutils import is_windows
|
||||
from os.path import expanduser
|
||||
home = expanduser("~")
|
||||
@ -295,6 +296,11 @@ def backup_arguments(args_dict={}):
|
||||
dest='upload_limit',
|
||||
type=utils.human2bytes,
|
||||
default=-1)
|
||||
arg_parser.add_argument(
|
||||
"--volume-id", action='store',
|
||||
help='Id of cinder volume for backup',
|
||||
dest="volume_id",
|
||||
default='')
|
||||
arg_parser.add_argument(
|
||||
'--download-limit', action='store',
|
||||
help='''Download bandwidth limit in Bytes per sec.
|
||||
@ -402,4 +408,7 @@ def backup_arguments(args_dict={}):
|
||||
# Freezer version
|
||||
backup_args.__dict__['__version__'] = '1.1.3'
|
||||
|
||||
backup_args.__dict__['options'] = \
|
||||
OpenstackOptions.create_from_dict(os.environ)
|
||||
|
||||
return backup_args, arg_parser
|
||||
|
@ -21,6 +21,11 @@ Hudson (tjh@cryptsoft.com).
|
||||
Freezer Backup modes related functions
|
||||
"""
|
||||
|
||||
import multiprocessing
|
||||
import logging
|
||||
import os
|
||||
from os.path import expanduser
|
||||
|
||||
from freezer.lvm import lvm_snap, lvm_snap_remove, get_lvm_info
|
||||
from freezer.tar import tar_backup, gen_tar_command
|
||||
from freezer.swift import add_object, manifest_upload, get_client
|
||||
@ -31,12 +36,12 @@ from freezer.vss import start_sql_server
|
||||
from freezer.vss import stop_sql_server
|
||||
from freezer.winutils import use_shadow
|
||||
from freezer.winutils import is_windows
|
||||
from freezer.cinder import provide_snapshot, do_copy_volume, make_glance_image
|
||||
from freezer.cinder import download_image, clean_snapshot
|
||||
from freezer.glance import glance
|
||||
from freezer.cinder import cinder
|
||||
from freezer import swift
|
||||
|
||||
import multiprocessing
|
||||
import logging
|
||||
import os
|
||||
|
||||
from os.path import expanduser
|
||||
home = expanduser("~")
|
||||
|
||||
|
||||
@ -142,6 +147,36 @@ def backup_mode_mongo(backup_opt_dict, time_stamp, manifest_meta_dict):
|
||||
return True
|
||||
|
||||
|
||||
def backup_mode_cinder(backup_dict, time_stamp, create_clients=True):
|
||||
"""
|
||||
Implements cinder backup:
|
||||
1) Gets a stream of the image from glance
|
||||
2) Stores resulted image to the swift as multipart object
|
||||
|
||||
:param backup_dict: global dict with variables
|
||||
:param time_stamp: timestamp of snapshot
|
||||
:param create_clients: if set to True -
|
||||
recreates cinder and glance clients,
|
||||
False - uses existing from backup_opt_dict
|
||||
"""
|
||||
if create_clients:
|
||||
backup_dict = cinder(backup_dict)
|
||||
backup_dict = glance(backup_dict)
|
||||
|
||||
volume_id = backup_dict.volume_id
|
||||
volume = backup_dict.cinder.volumes.get(volume_id)
|
||||
snapshot = provide_snapshot(backup_dict, volume,
|
||||
"backup_snapshot_for_volume_%s" % volume_id)
|
||||
copied_volume = do_copy_volume(backup_dict, snapshot)
|
||||
image = make_glance_image(backup_dict, "name", copied_volume)
|
||||
stream = download_image(backup_dict, image)
|
||||
package = "{0}/{1}".format(backup_dict, volume_id, time_stamp)
|
||||
swift.add_stream(backup_dict, stream, package)
|
||||
clean_snapshot(backup_dict, snapshot)
|
||||
backup_dict.cinder.volumes.delete(copied_volume)
|
||||
backup_dict.glance.images.delete(image)
|
||||
|
||||
|
||||
def backup_mode_fs(backup_opt_dict, time_stamp, manifest_meta_dict):
|
||||
"""
|
||||
Execute the necessary tasks for file system backup mode
|
||||
|
@ -46,7 +46,7 @@ class ThrottledSocket(object):
|
||||
return socket._fileobject(self, mode, bufsize)
|
||||
|
||||
|
||||
def monkeypatch_socket_bandwidth(download_bytes_per_sec, upload_bytes_per_sec):
|
||||
def monkeypatch_bandwidth(download_bytes_per_sec, upload_bytes_per_sec):
|
||||
"""
|
||||
Monkey patch socket to ensure that all
|
||||
new sockets created are throttled.
|
||||
@ -60,3 +60,11 @@ def monkeypatch_socket_bandwidth(download_bytes_per_sec, upload_bytes_per_sec):
|
||||
|
||||
socket.socket = make_throttled_socket
|
||||
socket.SocketType = ThrottledSocket
|
||||
|
||||
|
||||
def monkeypatch_socket_bandwidth(backup_opt_dict):
|
||||
download_limit = backup_opt_dict.download_limit
|
||||
upload_limit = backup_opt_dict.upload_limit
|
||||
|
||||
if upload_limit > -1 or download_limit > - 1:
|
||||
monkeypatch_bandwidth(download_limit, upload_limit)
|
||||
|
134
freezer/cinder.py
Normal file
134
freezer/cinder.py
Normal file
@ -0,0 +1,134 @@
|
||||
"""
|
||||
Copyright 2014 Hewlett-Packard
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
This product includes cryptographic software written by Eric Young
|
||||
(eay@cryptsoft.com). This product includes software written by Tim
|
||||
Hudson (tjh@cryptsoft.com).
|
||||
========================================================================
|
||||
|
||||
Freezer functions to interact with OpenStack Swift client and server
|
||||
"""
|
||||
|
||||
from cinderclient.v1 import client as ciclient
|
||||
import time
|
||||
from glance import ReSizeStream
|
||||
import logging
|
||||
from freezer.bandwidth import monkeypatch_socket_bandwidth
|
||||
|
||||
|
||||
def cinder(backup_opt_dict):
|
||||
"""
|
||||
Creates cinder client and attached it ot the dictionary
|
||||
:param backup_opt_dict: Dictionary with configuration
|
||||
:return: Dictionary with attached cinder client
|
||||
"""
|
||||
options = backup_opt_dict.options
|
||||
|
||||
monkeypatch_socket_bandwidth(backup_opt_dict)
|
||||
|
||||
backup_opt_dict.cinder = ciclient.Client(
|
||||
username=options.user_name,
|
||||
api_key=options.password,
|
||||
project_id=options.tenant_name,
|
||||
auth_url=options.auth_url,
|
||||
region_name=options.region_name,
|
||||
insecure=backup_opt_dict.insecure,
|
||||
service_type="volume")
|
||||
return backup_opt_dict
|
||||
|
||||
|
||||
def provide_snapshot(backup_dict, volume, snapshot_name):
|
||||
"""
|
||||
Creates snapshot for cinder volume with --force parameter
|
||||
:param backup_dict: Dictionary with configuration
|
||||
:param volume: volume object for snapshoting
|
||||
:param snapshot_name: name of snapshot
|
||||
:return: snapshot object
|
||||
"""
|
||||
volume_snapshots = backup_dict.cinder.volume_snapshots
|
||||
snapshot = volume_snapshots.create(volume_id=volume.id,
|
||||
display_name=snapshot_name,
|
||||
force=True)
|
||||
|
||||
while snapshot.status != "available":
|
||||
try:
|
||||
logging.info("[*] Snapshot status: " + snapshot.status)
|
||||
snapshot = volume_snapshots.get(snapshot.id)
|
||||
if snapshot.status == "error":
|
||||
logging.error("snapshot have error state")
|
||||
exit(1)
|
||||
time.sleep(5)
|
||||
except Exception as e:
|
||||
logging.info(e)
|
||||
return snapshot
|
||||
|
||||
|
||||
def do_copy_volume(backup_dict, snapshot):
|
||||
"""
|
||||
Creates new volume from a snapshot
|
||||
:param backup_dict: Configuration dictionary
|
||||
:param snapshot: provided snapshot
|
||||
:return: created volume
|
||||
"""
|
||||
volume = backup_dict.cinder.volumes.create(
|
||||
size=snapshot.size,
|
||||
snapshot_id=snapshot.id)
|
||||
|
||||
while volume.status != "available":
|
||||
try:
|
||||
logging.info("[*] Volume copy status: " + volume.status)
|
||||
volume = backup_dict.cinder.volumes.get(volume.id)
|
||||
time.sleep(5)
|
||||
except Exception as e:
|
||||
logging.info(e)
|
||||
logging.info("[*] Exception getting volume status")
|
||||
return volume
|
||||
|
||||
|
||||
def make_glance_image(backup_dict, image_volume_name, copy_volume):
|
||||
"""
|
||||
Creates an glance image from volume
|
||||
:param backup_dict: Configuration dictionary
|
||||
:param image_volume_name: Name of image
|
||||
:param copy_volume: volume to make an image
|
||||
:return: Glance image object
|
||||
"""
|
||||
volumes = backup_dict.cinder.volumes
|
||||
return volumes.upload_to_image(volume=copy_volume,
|
||||
force=True,
|
||||
image_name=image_volume_name,
|
||||
container_format="bare",
|
||||
disk_format="raw")
|
||||
|
||||
|
||||
def clean_snapshot(backup_dict, snapshot):
|
||||
"""
|
||||
Deletes snapshot
|
||||
:param backup_dict: Configuration dictionary
|
||||
:param snapshot: snapshot name
|
||||
"""
|
||||
logging.info("[*] Deleting existed snapshot: " + snapshot.id)
|
||||
backup_dict.cinder.volume_snapshots.delete(snapshot)
|
||||
|
||||
|
||||
def download_image(backup_dict, image):
|
||||
"""
|
||||
Creates a stream for image data
|
||||
:param backup_dict: Configuration dictionary
|
||||
:param image: Image object for downloading
|
||||
:return: stream of image data
|
||||
"""
|
||||
stream = backup_dict.glance.images.data(image)
|
||||
return ReSizeStream(stream, len(stream), 1000000)
|
109
freezer/glance.py
Normal file
109
freezer/glance.py
Normal file
@ -0,0 +1,109 @@
|
||||
"""
|
||||
Copyright 2014 Hewlett-Packard
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
This product includes cryptographic software written by Eric Young
|
||||
(eay@cryptsoft.com). This product includes software written by Tim
|
||||
Hudson (tjh@cryptsoft.com).
|
||||
========================================================================
|
||||
|
||||
Freezer functions to interact with OpenStack Swift client and server
|
||||
"""
|
||||
|
||||
import logging
|
||||
from glanceclient.v1 import client as glclient
|
||||
from glanceclient.shell import OpenStackImagesShell
|
||||
from freezer.bandwidth import monkeypatch_socket_bandwidth
|
||||
|
||||
|
||||
class Bunch:
|
||||
def __init__(self, **kwds):
|
||||
self.__dict__.update(kwds)
|
||||
|
||||
def __getattr__(self, item):
|
||||
return self.__dict__.get(item)
|
||||
|
||||
|
||||
def glance(backup_opt_dict):
|
||||
"""
|
||||
Creates glance client and attached it ot the dictionary
|
||||
:param backup_opt_dict: Dictionary with configuration
|
||||
:return: Dictionary with attached glance client
|
||||
"""
|
||||
|
||||
options = backup_opt_dict.options
|
||||
|
||||
monkeypatch_socket_bandwidth(backup_opt_dict)
|
||||
|
||||
endpoint, token = OpenStackImagesShell()._get_endpoint_and_token(
|
||||
Bunch(os_username=options.user_name,
|
||||
os_password=options.password,
|
||||
os_tenant_name=options.tenant_name,
|
||||
os_auth_url=options.auth_url,
|
||||
os_region_name=options.region_name,
|
||||
force_auth=True))
|
||||
|
||||
backup_opt_dict.glance = glclient.Client(endpoint=endpoint, token=token)
|
||||
return backup_opt_dict
|
||||
|
||||
|
||||
class ReSizeStream:
|
||||
"""
|
||||
Iterator/File-like object for changing size of chunk in stream
|
||||
"""
|
||||
def __init__(self, stream, length, chunk_size):
|
||||
self.stream = stream
|
||||
self.length = length
|
||||
self.chunk_size = chunk_size
|
||||
self.reminder = ""
|
||||
self.transmitted = 0
|
||||
|
||||
def __len__(self):
|
||||
return self.length
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def next(self):
|
||||
logging.info("Transmitted (%s) of (%s)" % (self.transmitted,
|
||||
self.length))
|
||||
chunk_size = self.chunk_size
|
||||
if len(self.reminder) > chunk_size:
|
||||
result = self.reminder[:chunk_size]
|
||||
self.reminder = self.reminder[chunk_size:]
|
||||
self.transmitted += len(result)
|
||||
return result
|
||||
else:
|
||||
stop = False
|
||||
while not stop and len(self.reminder) < chunk_size:
|
||||
try:
|
||||
self.reminder += next(self.stream)
|
||||
except StopIteration:
|
||||
stop = True
|
||||
if stop:
|
||||
result = self.reminder
|
||||
if len(self.reminder) == 0:
|
||||
raise StopIteration()
|
||||
self.reminder = []
|
||||
self.transmitted += len(result)
|
||||
return result
|
||||
else:
|
||||
result = self.reminder[:chunk_size]
|
||||
self.reminder = self.reminder[chunk_size:]
|
||||
self.transmitted += len(result)
|
||||
return result
|
||||
|
||||
def read(self, chunk_size):
|
||||
self.chunk_size = chunk_size
|
||||
return self.next()
|
@ -105,8 +105,10 @@ class BackupJob(Job):
|
||||
self.conf, manifest_meta_dict)
|
||||
|
||||
self.conf.manifest_meta_dict = manifest_meta_dict
|
||||
|
||||
if self.conf.mode == 'fs':
|
||||
if self.conf.volume_id:
|
||||
backup.backup_mode_cinder(
|
||||
self.conf, self.start_time.timestamp)
|
||||
elif self.conf.mode == 'fs':
|
||||
backup.backup_mode_fs(
|
||||
self.conf, self.start_time.timestamp, manifest_meta_dict)
|
||||
elif self.conf.mode == 'mongo':
|
||||
@ -137,7 +139,10 @@ class RestoreJob(Job):
|
||||
# Get the object list of the remote containers and store it in the
|
||||
# same dict passes as argument under the dict.remote_obj_list namespace
|
||||
self.conf = swift.get_container_content(self.conf)
|
||||
restore.restore_fs(self.conf)
|
||||
if self.conf.volume_id:
|
||||
restore.restore_cinder(self.conf)
|
||||
else:
|
||||
restore.restore_fs(self.conf)
|
||||
|
||||
|
||||
class AdminJob(Job):
|
||||
|
@ -21,11 +21,6 @@ Hudson (tjh@cryptsoft.com).
|
||||
Freezer restore modes related functions
|
||||
'''
|
||||
|
||||
from freezer.tar import tar_restore
|
||||
from freezer.swift import object_to_stream
|
||||
from freezer.utils import (
|
||||
validate_all_args, get_match_backup, sort_backup_list)
|
||||
|
||||
import multiprocessing
|
||||
import os
|
||||
import logging
|
||||
@ -33,6 +28,14 @@ import re
|
||||
import datetime
|
||||
import time
|
||||
|
||||
from freezer.tar import tar_restore
|
||||
from freezer.swift import object_to_stream
|
||||
from freezer.glance import glance
|
||||
from freezer.cinder import cinder
|
||||
from freezer.glance import ReSizeStream
|
||||
from freezer.utils import (
|
||||
validate_all_args, get_match_backup, sort_backup_list)
|
||||
|
||||
|
||||
def restore_fs(backup_opt_dict):
|
||||
'''
|
||||
@ -159,3 +162,45 @@ def restore_fs_sort_obj(backup_opt_dict):
|
||||
from container {1}, into directory {2}'.format(
|
||||
backup_opt_dict.backup_name, backup_opt_dict.container,
|
||||
backup_opt_dict.restore_abs_path))
|
||||
|
||||
|
||||
def restore_cinder(backup_opt_dict, create_clients=True):
|
||||
"""
|
||||
1) Define swift directory
|
||||
2) Download and upload to glance
|
||||
3) Create volume from glance
|
||||
4) Delete
|
||||
:param backup_opt_dict: global dictionary with params
|
||||
:param create_clients: if set to True -
|
||||
recreates cinder and glance clients,
|
||||
False - uses existing from backup_opt_dict
|
||||
"""
|
||||
if create_clients:
|
||||
backup_opt_dict = cinder(backup_opt_dict)
|
||||
backup_opt_dict = glance(backup_opt_dict)
|
||||
volume_id = backup_opt_dict.volume_id
|
||||
container = backup_opt_dict.container
|
||||
connector = backup_opt_dict.sw_connector
|
||||
info, backups = connector.get_container(container, path=volume_id)
|
||||
backups = sorted(map(lambda x: x["name"].rsplit("/", 1)[-1], backups))
|
||||
if not backups:
|
||||
msg = "Cannot find backups for volume: %s" % volume_id
|
||||
logging.error(msg)
|
||||
raise BaseException(msg)
|
||||
backup = backups[-1]
|
||||
stream = connector.get_object(
|
||||
backup_opt_dict.container, "%s/%s" % (volume_id, backup),
|
||||
resp_chunk_size=10000000)
|
||||
length = int(stream[0]["x-object-meta-length"])
|
||||
stream = stream[1]
|
||||
images = backup_opt_dict.glance.images
|
||||
image = images.create(data=ReSizeStream(stream, length, 1),
|
||||
container_format="bare",
|
||||
disk_format="raw")
|
||||
gb = 1073741824
|
||||
size = length / gb
|
||||
if length % gb > 0:
|
||||
size += 1
|
||||
|
||||
backup_opt_dict.cinder.volumes.create(size, imageRef=image.id)
|
||||
images.delete(image)
|
||||
|
@ -23,8 +23,8 @@ Freezer functions to interact with OpenStack Swift client and server
|
||||
|
||||
from freezer.utils import (
|
||||
validate_all_args, get_match_backup,
|
||||
sort_backup_list, DateTime, OpenstackOptions)
|
||||
|
||||
sort_backup_list, DateTime)
|
||||
from freezer.bandwidth import monkeypatch_socket_bandwidth
|
||||
import os
|
||||
import swiftclient
|
||||
import json
|
||||
@ -303,15 +303,9 @@ def get_client(backup_opt_dict):
|
||||
backup_opt_dict
|
||||
"""
|
||||
|
||||
options = OpenstackOptions.create_from_dict(os.environ)
|
||||
options = backup_opt_dict.options
|
||||
|
||||
download_limit = backup_opt_dict.download_limit
|
||||
upload_limit = backup_opt_dict.upload_limit
|
||||
|
||||
if upload_limit > -1 or download_limit > - 1:
|
||||
from bandwidth import monkeypatch_socket_bandwidth
|
||||
|
||||
monkeypatch_socket_bandwidth(download_limit, upload_limit)
|
||||
monkeypatch_socket_bandwidth(backup_opt_dict)
|
||||
|
||||
backup_opt_dict.sw_connector = swiftclient.client.Connection(
|
||||
authurl=options.auth_url,
|
||||
@ -352,6 +346,57 @@ def manifest_upload(
|
||||
logging.info('[*] Manifest successfully uploaded!')
|
||||
|
||||
|
||||
def add_stream(backup_opt_dict, stream, package_name):
|
||||
max_len = len(str(len(stream))) or 10
|
||||
|
||||
def format_chunk(number):
|
||||
str_repr = str(number)
|
||||
return "0" * (max_len - len(str_repr)) + str_repr
|
||||
|
||||
i = 0
|
||||
for el in stream:
|
||||
add_chunk(backup_opt_dict,
|
||||
"{0}/{1}".format(package_name, format_chunk(i)), el)
|
||||
i += 1
|
||||
headers = {'X-Object-Manifest': u'{0}/{1}/'.format(
|
||||
backup_opt_dict.container_segments, package_name),
|
||||
'x-object-meta-length': len(stream)}
|
||||
backup_opt_dict.sw_connector.put_object(
|
||||
backup_opt_dict.container, package_name, "", headers=headers)
|
||||
|
||||
|
||||
def add_chunk(backup_opt_dict, package_name, content):
|
||||
# If for some reason the swift client object is not available anymore
|
||||
# an exception is generated and a new client object is initialized/
|
||||
# If the exception happens for 10 consecutive times for a total of
|
||||
# 1 hour, then the program will exit with an Exception.
|
||||
sw_connector = backup_opt_dict.sw_connector
|
||||
count = 0
|
||||
while True:
|
||||
try:
|
||||
logging.info(
|
||||
'[*] Uploading file chunk index: {0}'.format(
|
||||
package_name))
|
||||
sw_connector.put_object(
|
||||
backup_opt_dict.container_segments,
|
||||
package_name, content,
|
||||
content_type='application/octet-stream',
|
||||
content_length=len(content))
|
||||
logging.info('[*] Data successfully uploaded!')
|
||||
print '[*] Data successfully uploaded!'
|
||||
break
|
||||
except Exception as error:
|
||||
logging.info('[*] Retrying to upload file chunk index: {0}'.format(
|
||||
package_name))
|
||||
time.sleep(60)
|
||||
backup_opt_dict = get_client(backup_opt_dict)
|
||||
count += 1
|
||||
if count == 10:
|
||||
logging.critical('[*] Error: add_object: {0}'
|
||||
.format(error))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def add_object(
|
||||
backup_opt_dict, backup_queue, absolute_file_path=None,
|
||||
time_stamp=None):
|
||||
@ -371,7 +416,6 @@ def add_object(
|
||||
logging.exception(err_msg)
|
||||
sys.exit(1)
|
||||
|
||||
sw_connector = backup_opt_dict.sw_connector
|
||||
while True:
|
||||
package_name = absolute_file_path.split('/')[-1]
|
||||
file_chunk_index, file_chunk = backup_queue.get().popitem()
|
||||
@ -380,34 +424,7 @@ def add_object(
|
||||
package_name = u'{0}/{1}/{2}/{3}'.format(
|
||||
package_name, time_stamp,
|
||||
backup_opt_dict.max_seg_size, file_chunk_index)
|
||||
# If for some reason the swift client object is not available anymore
|
||||
# an exception is generated and a new client object is initialized/
|
||||
# If the exception happens for 10 consecutive times for a total of
|
||||
# 1 hour, then the program will exit with an Exception.
|
||||
count = 0
|
||||
while True:
|
||||
try:
|
||||
logging.info(
|
||||
'[*] Uploading file chunk index: {0}'.format(
|
||||
package_name))
|
||||
sw_connector.put_object(
|
||||
backup_opt_dict.container_segments,
|
||||
package_name, file_chunk,
|
||||
content_type='application/octet-stream',
|
||||
content_length=len(file_chunk))
|
||||
logging.info('[*] Data successfully uploaded!')
|
||||
break
|
||||
except Exception as error:
|
||||
time.sleep(60)
|
||||
logging.info(
|
||||
'[*] Retrying to upload file chunk index: {0}'.format(
|
||||
package_name))
|
||||
backup_opt_dict = get_client(backup_opt_dict)
|
||||
count += 1
|
||||
if count == 10:
|
||||
logging.critical('[*] Error: add_object: {0}'
|
||||
.format(error))
|
||||
sys.exit(1)
|
||||
add_chunk(backup_opt_dict, package_name, file_chunk)
|
||||
|
||||
|
||||
def get_containers_list(backup_opt_dict):
|
||||
|
@ -1,5 +1,8 @@
|
||||
python-swiftclient>=1.6.0
|
||||
python-keystoneclient>=0.8.0
|
||||
python-cinderclient
|
||||
python-glanceclient
|
||||
|
||||
docutils>=0.8.1
|
||||
pymysql
|
||||
pymongo
|
||||
|
@ -12,6 +12,8 @@ import pymongo
|
||||
import re
|
||||
from collections import OrderedDict
|
||||
import __builtin__
|
||||
from glanceclient.common.utils import IterableWithLength
|
||||
from freezer.utils import OpenstackOptions
|
||||
|
||||
os.environ['OS_REGION_NAME'] = 'testregion'
|
||||
os.environ['OS_TENANT_ID'] = '0123456789'
|
||||
@ -504,6 +506,74 @@ class Lvm:
|
||||
return False
|
||||
|
||||
|
||||
class FakeIdObject:
|
||||
def __init__(self, id):
|
||||
self.id = id
|
||||
self.status = "available"
|
||||
self.size = 10
|
||||
self.min_disk = 10
|
||||
|
||||
|
||||
class FakeCinderClient:
|
||||
def __init__(self):
|
||||
self.volumes = FakeCinderClient.Volumes()
|
||||
self.volume_snapshots = FakeCinderClient.VolumeSnapshot
|
||||
|
||||
class Volumes:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def get(id):
|
||||
return FakeIdObject("5")
|
||||
|
||||
@staticmethod
|
||||
def create(size, snapshot_id=None, imageRef=None):
|
||||
return FakeIdObject("2")
|
||||
|
||||
@staticmethod
|
||||
def upload_to_image(volume, force, image_name,
|
||||
container_format, disk_format):
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def delete(volume):
|
||||
pass
|
||||
|
||||
class VolumeSnapshot:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def create(volume_id, display_name, force):
|
||||
return FakeIdObject("10")
|
||||
|
||||
@staticmethod
|
||||
def delete(snapshot):
|
||||
pass
|
||||
|
||||
|
||||
class FakeGlanceClient:
|
||||
def __init__(self):
|
||||
self.images = FakeGlanceClient.Images()
|
||||
|
||||
class Images:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def data(image):
|
||||
return IterableWithLength(iter("abc"), 3)
|
||||
|
||||
@staticmethod
|
||||
def delete(image):
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def create(data, container_format, disk_format):
|
||||
return FakeIdObject("10")
|
||||
|
||||
|
||||
class FakeSwiftClient:
|
||||
|
||||
def __init__(self):
|
||||
@ -516,7 +586,6 @@ class FakeSwiftClient:
|
||||
class Connection:
|
||||
def __init__(self, key=True, os_options=True, auth_version=True, user=True, authurl=True, tenant_name=True, retries=True, insecure=True):
|
||||
self.num_try = 0
|
||||
return None
|
||||
|
||||
def put_object(self, opt1=True, opt2=True, opt3=True, opt4=True, opt5=True, headers=True, content_length=True, content_type=True):
|
||||
return True
|
||||
@ -543,6 +612,11 @@ class FakeSwiftClient:
|
||||
{'bytes': 251, 'last_modified': '2015-03-09T10:37:01.701170', 'hash': '9a8cbdb30c226d11bf7849f3d48831b9', 'name': 'hostname_backup_name_1234567890_0/1234567890/67108864/00000000', 'content_type': 'application/octet-stream'},
|
||||
{'bytes': 632, 'last_modified': '2015-03-09T11:54:27.860730', 'hash': 'd657a4035d0dcc18deaf9bfd2a3d0ebf', 'name': 'hostname_backup_name_1234567891_1/1234567891/67108864/00000000', 'content_type': 'application/octet-stream'}
|
||||
])
|
||||
elif container == "test-container" and 'path' in kwargs:
|
||||
return ({'container_metadata': True}, [
|
||||
{'bytes': 251, 'last_modified': '2015-03-09T10:37:01.701170', 'hash': '9a8cbdb30c226d11bf7849f3d48831b9', 'name': 'hostname_backup_name_1234567890_0/1234567890/67108864/00000000', 'content_type': 'application/octet-stream'},
|
||||
{'bytes': 632, 'last_modified': '2015-03-09T11:54:27.860730', 'hash': 'd657a4035d0dcc18deaf9bfd2a3d0ebf', 'name': 'hostname_backup_name_1234567891_1/1234567891/67108864/00000000', 'content_type': 'application/octet-stream'}
|
||||
])
|
||||
else:
|
||||
return [{}, []]
|
||||
|
||||
@ -550,7 +624,7 @@ class FakeSwiftClient:
|
||||
return True, [{'name': 'test-container'}, {'name': 'test-container-segments'}]
|
||||
|
||||
def get_object(self, *args, **kwargs):
|
||||
return ['abcdef', 'hijlmno']
|
||||
return [{'x-object-meta-length': "123"}, "abc"]
|
||||
|
||||
|
||||
class FakeSwiftClient1:
|
||||
@ -705,6 +779,8 @@ class BackupOpt1:
|
||||
self.upload_limit = -1
|
||||
self.download_limit = -1
|
||||
self.sql_server_instance = 'Sql Server'
|
||||
self.volume_id = ''
|
||||
self.options = OpenstackOptions.create_from_dict(os.environ)
|
||||
|
||||
|
||||
class FakeMySQLdb:
|
||||
|
@ -1,7 +1,10 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from freezer.backup import backup_mode_mysql, backup_mode_fs, backup_mode_mongo
|
||||
from freezer.backup import backup_mode_cinder
|
||||
import freezer
|
||||
from freezer import cinder
|
||||
from freezer import glance
|
||||
import swiftclient
|
||||
import multiprocessing
|
||||
import subprocess
|
||||
@ -189,3 +192,14 @@ class TestBackUP:
|
||||
monkeypatch.setattr(pymongo, 'MongoClient', fakemongo2)
|
||||
assert backup_mode_mongo(
|
||||
backup_opt, 123456789, test_meta) is True
|
||||
|
||||
def test_backup_mode_cinder(self, monkeypatch):
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.volume_id = 34
|
||||
|
||||
backup_opt.glance = FakeGlanceClient()
|
||||
backup_opt.cinder = FakeCinderClient()
|
||||
fakeswiftclient = FakeSwiftClient()
|
||||
monkeypatch.setattr(swiftclient, 'client', fakeswiftclient.client)
|
||||
|
||||
backup_mode_cinder(backup_opt, 123456789, False)
|
||||
|
@ -1,4 +1,4 @@
|
||||
from freezer.bandwidth import ThrottledSocket, monkeypatch_socket_bandwidth
|
||||
from freezer.bandwidth import ThrottledSocket, monkeypatch_bandwidth
|
||||
from commons import FakeSocket
|
||||
import pytest
|
||||
|
||||
@ -27,7 +27,7 @@ class TestBandwidth:
|
||||
ThrottledSocket._sleep(10, 5, 5, 7)
|
||||
|
||||
def test_monkeypatch(self):
|
||||
monkeypatch_socket_bandwidth(100, 100)
|
||||
monkeypatch_bandwidth(100, 100)
|
||||
|
||||
def test_set(self):
|
||||
fake = FakeSocket()
|
||||
|
@ -23,7 +23,7 @@ Hudson (tjh@cryptsoft.com).
|
||||
|
||||
from commons import *
|
||||
from freezer.restore import (
|
||||
restore_fs, restore_fs_sort_obj)
|
||||
restore_fs, restore_fs_sort_obj, restore_cinder)
|
||||
import freezer
|
||||
import logging
|
||||
import pytest
|
||||
@ -66,7 +66,6 @@ class TestRestore:
|
||||
backup_opt.remote_match_backup = []
|
||||
pytest.raises(ValueError, restore_fs, backup_opt)
|
||||
|
||||
|
||||
def test_restore_fs_sort_obj(self, monkeypatch):
|
||||
|
||||
backup_opt = BackupOpt1()
|
||||
@ -82,3 +81,14 @@ class TestRestore:
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.backup_name = 'abcdtest'
|
||||
pytest.raises(Exception, restore_fs_sort_obj, backup_opt)
|
||||
|
||||
def test_backup_mode_cinder(self, monkeypatch):
|
||||
backup_opt = BackupOpt1()
|
||||
backup_opt.volume_id = 34
|
||||
|
||||
backup_opt.glance = FakeGlanceClient()
|
||||
backup_opt.cinder = FakeCinderClient()
|
||||
fakeswiftclient = FakeSwiftClient()
|
||||
monkeypatch.setattr(swiftclient, 'client', fakeswiftclient.client)
|
||||
|
||||
restore_cinder(backup_opt, False)
|
||||
|
@ -27,7 +27,6 @@ from freezer.swift import (create_containers, show_containers,
|
||||
check_container_existance,
|
||||
get_client, manifest_upload, add_object, get_containers_list,
|
||||
object_to_file, object_to_stream, _remove_object, remove_object)
|
||||
from freezer.swift import OpenstackOptions
|
||||
import os
|
||||
import logging
|
||||
import subprocess
|
||||
|
Loading…
Reference in New Issue
Block a user