Os-brick engine for backup volumes

This is a completely new engine that allows to backup
content of a cinder volume attaching it to localhost
using os_brick functionality.

Implements: blueprint cinder-osbrick
Change-Id: I5ebde164424e80bde80350099d6f1a7ea0b30cbd
Signed-off-by: Ruslan Aliev <raliev@mirantis.com>
This commit is contained in:
Ruslan Aliev 2017-03-10 21:46:19 +04:00
parent ad2e1f4cf5
commit 82ebec0bb5
10 changed files with 609 additions and 2 deletions

View File

@ -67,6 +67,7 @@ DEFAULT_PARAMS = {
'max_priority': None, 'max_level': False, 'path_to_backup': None,
'encrypt_pass_file': None, 'volume': None, 'proxy': None,
'cinder_vol_id': '', 'cindernative_vol_id': '',
'cinderbrick_vol_id': '',
'nova_inst_id': '', '__version__': FREEZER_VERSION,
'remove_older_than': None, 'restore_from_date': None,
'upload_limit': -1, 'always_level': False, 'version': None,
@ -116,7 +117,7 @@ _COMMON = [
"nova(OpenStack Instance). Default set to fs"),
cfg.StrOpt('engine',
short='e',
choices=['tar', 'rsync', 'nova'],
choices=['tar', 'rsync', 'nova', 'osbrick'],
dest='engine_name',
default=DEFAULT_PARAMS['engine_name'],
help="Engine to be used for backup/restore. "
@ -365,6 +366,11 @@ _COMMON = [
default=DEFAULT_PARAMS['cinder_vol_id'],
help="Id of cinder volume for backup"
),
cfg.StrOpt('cinderbrick-vol-id',
dest='cinderbrick_vol_id',
default=DEFAULT_PARAMS['cinderbrick_vol_id'],
help="Id of cinder volume for backup using os-brick"
),
cfg.StrOpt('cindernative-vol-id',
dest='cindernative_vol_id',
default=DEFAULT_PARAMS['cindernative_vol_id'],
@ -614,6 +620,8 @@ def get_backup_args():
elif backup_args.engine_name == 'nova' and (backup_args.project_id or
backup_args.nova_inst_id):
backup_media = 'nova'
elif backup_args.cinderbrick_vol_id:
backup_media = 'cinderbrick'
backup_args.__dict__['backup_media'] = backup_media

View File

View File

@ -0,0 +1,57 @@
# Copyright 2011-2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import socket
from cinderclient import exceptions
from oslo_concurrency import processutils
def get_my_ip():
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return None
def get_root_helper():
# NOTE (e0ne): We don't use rootwrap now
return 'sudo'
def require_root(f):
def wrapper(*args, **kwargs):
if os.getuid() != 0:
raise exceptions.CommandError(
"This command requires root permissions.")
return f(*args, **kwargs)
return wrapper
def safe_execute(cmd):
try:
processutils.execute(*cmd, root_helper=get_root_helper(),
run_as_root=True)
except processutils.ProcessExecutionError as e:
print('Command "{0}" execution returned {1} exit code:'.format(
e.cmd, e.exit_code))
print('Stderr: {0}'.format(e.stderr))
print('Stdout: {0}'.format(e.stdout))

View File

@ -0,0 +1,140 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
from cinderclient import exceptions
from os_brick.initiator import connector
from oslo_concurrency import processutils
from freezer.engine.osbrick import brick_utils
from freezer.engine.osbrick import volume_actions as actions
class Client(object):
version = '1.1.0'
def __init__(self, volumes_client=None):
self.volumes_client = volumes_client
def _brick_get_connector(self, protocol, driver=None,
execute=processutils.execute,
use_multipath=False,
device_scan_attempts=3,
*args, **kwargs):
"""Wrapper to get a brick connector object.
This automatically populates the required protocol as well
as the root_helper needed to execute commands.
"""
return connector.InitiatorConnector.factory(
protocol,
brick_utils.get_root_helper(),
driver=driver,
execute=execute,
use_multipath=use_multipath,
device_scan_attempts=device_scan_attempts,
*args, **kwargs)
def get_connector(self, multipath=False, enforce_multipath=False):
conn_prop = connector.get_connector_properties(
brick_utils.get_root_helper(),
brick_utils.get_my_ip(),
multipath=multipath,
enforce_multipath=(enforce_multipath),
execute=processutils.execute)
return conn_prop
def attach(self, volume_id, hostname, mountpoint=None, mode='rw',
multipath=False, enforce_multipath=False):
# Check protocol type of storage backend.
with actions.VerifyProtocol(self.volumes_client, volume_id) as cmd:
# Retrieve vol-host attribute of volume.
volume_info = self.volumes_client.volumes.get(volume_id)
volume_capabilities = self.volumes_client.capabilities.get(
volume_info.__dict__['os-vol-host-attr:host'])
# Retrieve storage_protocol from storage backend capabilities.
protocol = volume_capabilities.storage_protocol.upper()
cmd.verify(protocol)
# Reserve volume before attachment
with actions.Reserve(self.volumes_client, volume_id) as cmd:
cmd.reserve()
with actions.InitializeConnection(
self.volumes_client, volume_id) as cmd:
connection = cmd.initialize(self, multipath, enforce_multipath)
with actions.ConnectVolume(self.volumes_client, volume_id) as cmd:
brick_connector = self._brick_get_connector(
protocol, do_local_attach=True)
device_info = cmd.connect(brick_connector,
connection['data'],
mountpoint, mode, hostname)
return device_info
def detach(self, volume_id, attachment_uuid=None, multipath=False,
enforce_multipath=False, device_info=None):
with actions.BeginDetach(self.volumes_client, volume_id) as cmd:
cmd.reserve()
with actions.InitializeConnectionForDetach(
self.volumes_client, volume_id) as cmd:
connection = cmd.initialize(self, multipath, enforce_multipath)
brick_connector = self._brick_get_connector(
connection['driver_volume_type'], do_local_attach=True)
with actions.DisconnectVolume(self.volumes_client, volume_id) as cmd:
cmd.disconnect(brick_connector, connection['data'], device_info)
with actions.DetachVolume(self.volumes_client, volume_id) as cmd:
cmd.detach(self, attachment_uuid, multipath, enforce_multipath)
def get_volume_paths(self, volume_id, use_multipath=False):
"""Gets volume paths on the system for a specific volume."""
conn_props = self.get_connector(multipath=use_multipath)
vols = self.volumes_client.volumes.list()
vol_in_use = False
vol_found = False
for vol in vols:
if (volume_id == vol.id or volume_id == vol.name):
vol_found = True
if vol.status == "in-use":
vol_in_use = True
# Make sure the volume ID is used and not the name
volume_id = vol.id
break
if not vol_found:
msg = "No volume with a name or ID of '%s' exists." % volume_id
raise exceptions.CommandError(msg)
paths = []
if vol_in_use:
conn_info = self.volumes_client.volumes.initialize_connection(
volume_id, conn_props)
protocol = conn_info['driver_volume_type']
conn = self._brick_get_connector(protocol,
use_multipath=use_multipath)
paths = conn.get_volume_paths(conn_info['data'])
return paths
def get_all_volume_paths(self, protocol, use_multipath=False):
"""Gets all volume paths on the system for a given protocol."""
conn = self._brick_get_connector(protocol, use_multipath=use_multipath)
paths = conn.get_all_available_volumes()
return paths

View File

@ -0,0 +1,260 @@
"""
(c) Copyright 2017 Mirantis, Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import shutil
import socket
import subprocess
import tempfile
from oslo_config import cfg
from oslo_log import log
from freezer.common import client_manager
from freezer.engine import engine
from freezer.engine.osbrick import client as brick_client
from freezer.engine.tar import tar
from freezer.utils import utils
from freezer.utils import winutils
LOG = log.getLogger(__name__)
CONF = cfg.CONF
class OsbrickEngine(engine.BackupEngine):
def __init__(self, storage, **kwargs):
super(OsbrickEngine, self).__init__(storage=storage)
self.client = client_manager.get_client_manager(CONF)
self.cinder = self.client.create_cinder()
self.volume_info = None
self.compression_algo = kwargs.get('compression')
self.encrypt_pass_file = kwargs.get('encrypt_key')
self.dereference_symlink = kwargs.get('symlinks')
self.exclude = kwargs.get('exclude')
self.storage = storage
self.is_windows = winutils.is_windows()
self.dry_run = kwargs.get('dry_run', False)
self.max_segment_size = kwargs.get('max_segment_size')
@property
def name(self):
return "osbrick"
def metadata(self, backup_resource):
"""Construct metadata"""
return {
"engine_name": self.name,
"volume_info": self.volume_info
}
@staticmethod
def is_active(client_manager, id):
get_res = client_manager.get(id)
return get_res.status == 'available'
def backup_data(self, backup_path, manifest_path):
LOG.info("Starting os-brick engine backup stream")
volume = self.cinder.volumes.get(backup_path)
self.volume_info = volume.to_dict()
snapshot = self.cinder.volume_snapshots.create(backup_path, force=True)
LOG.info("[*] Creating volume snapshot")
utils.wait_for(
OsbrickEngine.is_active,
1,
100,
message="Waiting for volume {0} snapshot to become "
"active".format(backup_path),
kwargs={"client_manager": self.cinder.volume_snapshots,
"id": snapshot.id}
)
LOG.info("[*] Converting snapshot to volume")
backup_volume = self.cinder.volumes.create(snapshot.size,
snapshot_id=snapshot.id)
utils.wait_for(
OsbrickEngine.is_active,
1,
100,
message="Waiting for backup volume {0} to become "
"active".format(backup_volume.id),
kwargs={"client_manager": self.cinder.volumes,
"id": backup_volume.id}
)
try:
tmpdir = tempfile.mkdtemp()
except Exception:
LOG.error("Unable to create a tmp directory")
raise
LOG.info("[*] Trying to attach the volume to localhost")
brickclient = brick_client.Client(volumes_client=self.cinder)
attach_info = brickclient.attach(backup_volume.id,
socket.gethostname(),
tmpdir)
if not os.path.ismount(tmpdir):
subprocess.check_output(['sudo', 'mount', '-t', 'ext4',
attach_info.get('path'), tmpdir])
cwd = os.getcwd()
os.chdir(tmpdir)
tar_engine = tar.TarEngine(self.compression_algo,
self.dereference_symlink,
self.exclude, self.storage,
self.max_segment_size,
self.encrypt_pass_file, self.dry_run)
for data_chunk in tar_engine.backup_data('.', manifest_path):
yield data_chunk
os.chdir(cwd)
LOG.info("[*] Detaching volume")
subprocess.check_output(['sudo', 'umount', tmpdir])
shutil.rmtree(tmpdir)
brickclient.detach(backup_volume.id)
utils.wait_for(
OsbrickEngine.is_active,
1,
100,
message="Waiting for backup volume {0} to become "
"active".format(backup_volume.id),
kwargs={"client_manager": self.cinder.volumes,
"id": backup_volume.id}
)
LOG.info("[*] Removing backup volume and snapshot")
self.cinder.volumes.delete(backup_volume.id)
self.cinder.volume_snapshots.delete(snapshot, force=True)
LOG.info('Backup process completed')
def restore_level(self, restore_path, read_pipe, backup, except_queue):
try:
LOG.info("Restoring volume {} using os-brick engine".format(
restore_path))
new_volume = False
metadata = backup.metadata()
volume_info = metadata.get("volume_info")
try:
backup_volume = self.cinder.volumes.get(restore_path)
except Exception:
new_volume = True
LOG.info("[*] Volume doesn't exists, creating a new one")
backup_volume = self.cinder.volumes.create(volume_info['size'])
utils.wait_for(
OsbrickEngine.is_active,
1,
100,
message="Waiting for backup volume {0} to become "
"active".format(backup_volume.id),
kwargs={"client_manager": self.cinder.volumes,
"id": backup_volume.id}
)
if backup_volume.attachments:
LOG.info('Volume is used, creating a copy from snapshot')
snapshot = self.cinder.volume_snapshots.create(
backup_volume.id, force=True)
utils.wait_for(
OsbrickEngine.is_active,
1,
100,
message="Waiting for volume {0} snapshot to become "
"active".format(backup_volume.id),
kwargs={"client_manager": self.cinder.volume_snapshots,
"id": snapshot.id}
)
LOG.info("[*] Converting snapshot to volume")
backup_volume = self.cinder.volumes.create(
snapshot.size, snapshot_id=snapshot.id)
utils.wait_for(
OsbrickEngine.is_active,
1,
100,
message="Waiting for backup volume {0} to become "
"active".format(backup_volume.id),
kwargs={"client_manager": self.cinder.volumes,
"id": backup_volume.id}
)
backup_volume = self.cinder.volumes.get(backup_volume.id)
if backup_volume.status != 'available':
raise RuntimeError('Unable to use volume for restore data')
try:
tmpdir = tempfile.mkdtemp()
except Exception:
LOG.error("Unable to create a tmp directory")
raise
LOG.info("[*] Trying to attach the volume to localhost")
brickclient = brick_client.Client(volumes_client=self.cinder)
attach_info = brickclient.attach(backup_volume.id,
socket.gethostname(),
tmpdir)
if not os.path.ismount(tmpdir):
if new_volume:
subprocess.check_output(['sudo', 'mkfs.ext4',
attach_info.get('path')])
subprocess.check_output(['sudo', 'mount', '-t', 'ext4',
attach_info.get('path'),
tmpdir])
tar_engine = tar.TarEngine(self.compression_algo,
self.dereference_symlink,
self.exclude, self.storage,
self.max_segment_size,
self.encrypt_pass_file, self.dry_run)
tar_engine.restore_level(tmpdir, read_pipe, backup,
except_queue)
subprocess.check_output(['sudo', 'umount', tmpdir])
shutil.rmtree(tmpdir)
LOG.info("[*] Detaching volume")
brickclient.detach(backup_volume.id)
utils.wait_for(
OsbrickEngine.is_active,
1,
100,
message="Waiting for backup volume {0} to become "
"active".format(backup_volume.id),
kwargs={"client_manager": self.cinder.volumes,
"id": backup_volume.id}
)
LOG.info('Restore process completed')
except Exception as e:
LOG.exception(e)
except_queue.put(e)
raise

View File

@ -0,0 +1,101 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from os_brick import exception
from os_brick.initiator import connector
class VolumeAction(object):
def __init__(self, volumes_client, volume_id):
self.volumes_client = volumes_client
self.volume_id = volume_id
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if traceback:
self.volumes_client.volumes.unreserve(self.volume_id)
return False
return True
class Reserve(VolumeAction):
def reserve(self):
self.volumes_client.volumes.reserve(self.volume_id)
class InitializeConnection(VolumeAction):
def initialize(self, brick_client, multipath, enforce_multipath):
conn_prop = brick_client.get_connector(multipath, enforce_multipath)
return self.volumes_client.volumes.initialize_connection(
self.volume_id, conn_prop)
class VerifyProtocol(VolumeAction):
# NOTE(e0ne): Only iSCSI and RBD based drivers are supported. NFS doesn't
# work. Drivers with other protocols are not tested yet.
SUPPORTED_PROCOTOLS = [connector.ISCSI, connector.RBD]
def verify(self, protocol):
protocol = protocol.upper()
# NOTE(e0ne): iSCSI drivers works without issues, RBD and NFS don't
# work. Drivers with other protocols are not tested yet.
if protocol not in VerifyProtocol.SUPPORTED_PROCOTOLS:
raise exception.ProtocolNotSupported(protocol=protocol)
class ConnectVolume(VolumeAction):
def connect(self, brick_connector, connection_data,
mountpoint, mode, hostname):
device_info = brick_connector.connect_volume(connection_data)
self.volumes_client.volumes.attach(self.volume_id, instance_uuid=None,
mountpoint=mountpoint,
mode=mode,
host_name=hostname)
return device_info
class VolumeDetachAction(VolumeAction):
def __exit__(self, type, value, traceback):
if traceback:
self.volumes_client.volumes.roll_detaching(self.volume_id)
return False
return True
class BeginDetach(VolumeDetachAction):
def reserve(self):
self.volumes_client.volumes.begin_detaching(self.volume_id)
class InitializeConnectionForDetach(InitializeConnection, VolumeDetachAction):
pass
class DisconnectVolume(VolumeDetachAction):
def disconnect(self, brick_connector, connection_data, device_info):
device_info = device_info or {}
brick_connector.disconnect_volume(connection_data, device_info)
class DetachVolume(VolumeDetachAction):
def detach(self, brick_client,
attachment_uuid, multipath, enforce_multipath):
conn_prop = brick_client.get_connector(multipath, enforce_multipath)
self.volumes_client.volumes.terminate_connection(self.volume_id,
conn_prop)
self.volumes_client.volumes.detach(self.volume_id, attachment_uuid)

View File

@ -272,6 +272,16 @@ class BackupJob(Job):
LOG.info('Executing cinder snapshot. Volume ID: {0}'.format(
self.conf.cinder_vol_id))
backup_os.backup_cinder_by_glance(self.conf.cinder_vol_id)
elif backup_media == 'cinderbrick':
LOG.info('Executing cinder volume backup using os-brick. '
'Volume ID: {0}'.format(self.conf.cinderbrick_vol_id))
return self.engine.backup(
backup_resource=self.conf.cinderbrick_vol_id,
hostname_backup_name=self.conf.hostname_backup_name,
no_incremental=self.conf.no_incremental,
max_level=self.conf.max_level,
always_level=self.conf.always_level,
restart_always_level=self.conf.restart_always_level)
else:
raise Exception('unknown parameter backup_media %s' % backup_media)
return None
@ -284,6 +294,7 @@ class RestoreJob(Job):
self.conf.nova_inst_id,
self.conf.cinder_vol_id,
self.conf.cindernative_vol_id,
self.conf.cinderbrick_vol_id,
self.conf.project_id]):
raise ValueError("--restore-abs-path is required")
if not self.conf.container:
@ -363,6 +374,15 @@ class RestoreJob(Job):
res.restore_cinder(conf.cindernative_vol_id,
conf.cindernative_backup_id,
restore_timestamp)
elif conf.backup_media == 'cinderbrick':
LOG.info("Restoring cinder backup using os-brick. Volume ID {0}, "
"timestamp: {1}".format(conf.cinderbrick_vol_id,
restore_timestamp))
self.engine.restore(
hostname_backup_name=self.conf.hostname_backup_name,
restore_resource=conf.cinderbrick_vol_id,
overwrite=conf.overwrite,
recent_to_date=restore_timestamp)
else:
raise Exception("unknown backup type: %s" % conf.backup_media)
return {}

View File

@ -57,7 +57,8 @@ def freezer_main(backup_args):
max_segment_size = backup_args.max_segment_size
if (backup_args.storage ==
'swift' or
backup_args.backup_media in ['nova', 'cinder', 'cindernative']):
backup_args.backup_media in ['nova', 'cinder', 'cindernative',
'cinderbrick']):
backup_args.client_manager = client_manager.get_client_manager(
backup_args.__dict__)

View File

@ -0,0 +1,19 @@
---
prelude: >
Currently Freezer provides basic features to execute Cinder volumes backup.
The current approach present significant challenges,
due mainly to the difficulty of downloading Cinder Volumes without passing
through Glance. This can be an issue for time and scalability reasons,
(i.e. volumes of few hundreds GB size, potential error probability increase,
as more services are part of the process, unailability of cinder-backup).
features:
- |
Added new backup engine called 'os-brick' which allows to backup and
restore the content of cinder volumes attaching it directly to localhost
using functionality of os-brick library.
issues:
- |
There are could be a read/write file permisson issues if freezer-agent
don't have appropriate right to read\write files to mounted FS.

View File

@ -12,6 +12,7 @@ oslo.utils>=3.20.0 # Apache-2.0
oslo.log>=3.22.0 # Apache-2.0
oslo.config>=3.22.0 # Apache-2.0
keystoneauth1>=2.18.0 # Apache-2.0
os-brick>=1.8.0 # Apache-2.0
pycrypto>=2.6 # Public Domain
PyMySQL>=0.7.6 # MIT License