Browse Source

Fix backup using temp snapshot code path

- Non-disruptive backup using a temp snapshot works in Liberty
but was broken in Mitaka.
- Backup a snapshot directly without creating a temp volume
worked when the feature was first added in Mitaka but was broken
later in Mitaka.

This patch provides a fix as follows:
1. It checks an existing config option backup_use_same_host.
   By default, this option is False.
2. When the backup service starts, it checks the above option. If
   the option is True, the backup service will find volume manager
   on the current node and get volume backends.
3. If the option is True and backup_use_temp_snapshot returns True,
   volume service returns the snapshot to the backup service in
   get_backup_device and backup will be performed using the snapshot.
   Otherwise, the volume will be returned as the backup device and
   the backup will be performed using the volume.

This fix is a Mitaka backport candidate. After this is merged,
we will provide a more complete fix which allows backup using
temp snapshot to happen on a remote node as well and we will
also clean up the code to get volume backends on the backup node.

The unit test test_backup_volume_inuse_temp_snapshot in test_volume.py
is removed. This test was testing backup_volume in
cinder/volume/driver.py, but this code path is not used any more.
Backup now starts in create_backup in backup/manager.py which calls
_run_backup which calls _attach_snapshot in volume/driver.py. The new
test test_create_backup_with_temp_snapshot in test_backup.py tests
the new code path.

Change-Id: I2e0c115e1dacf9eea73803cdbb1452bfeb56d87c
Closes-Bug: #1578034
Closes-Bug: #1575886
changes/43/321943/7
xing-yang 5 years ago
parent
commit
763dd8cdc5
  1. 98
      cinder/backup/manager.py
  2. 52
      cinder/tests/unit/backup/test_backup.py
  3. 67
      cinder/tests/unit/test_volume.py
  4. 6
      cinder/volume/driver.py

98
cinder/backup/manager.py

@ -91,11 +91,85 @@ class BackupManager(manager.SchedulerDependentManager):
self.service = importutils.import_module(self.driver_name)
self.az = CONF.storage_availability_zone
self.volume_managers = {}
# TODO(xyang): If backup_use_same_host is True, we'll find
# the volume backend on the backup node. This allows us
# to use a temp snapshot to backup an in-use volume if the
# driver supports it. This code should go away when we add
# support for backing up in-use volume using a temp snapshot
# on a remote node.
if CONF.backup_use_same_host:
self._setup_volume_drivers()
self.backup_rpcapi = backup_rpcapi.BackupAPI()
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
super(BackupManager, self).__init__(service_name='backup',
*args, **kwargs)
def _get_volume_backend(self, host=None, allow_null_host=False):
if host is None:
if not allow_null_host:
msg = _("NULL host not allowed for volume backend lookup.")
raise exception.BackupFailedToGetVolumeBackend(msg)
else:
LOG.debug("Checking hostname '%s' for backend info.", host)
# NOTE(xyang): If host='myhost@lvmdriver', backend='lvmdriver'
# by the logic below. This is different from extract_host.
# vol_utils.extract_host(host, 'backend')='myhost@lvmdriver'.
part = host.partition('@')
if (part[1] == '@') and (part[2] != ''):
backend = part[2]
LOG.debug("Got backend '%s'.", backend)
return backend
LOG.info(_LI("Backend not found in hostname (%s) so using default."),
host)
if 'default' not in self.volume_managers:
# For multi-backend we just pick the top of the list.
return self.volume_managers.keys()[0]
return 'default'
def _get_manager(self, backend):
LOG.debug("Manager requested for volume_backend '%s'.",
backend)
if backend is None:
LOG.debug("Fetching default backend.")
backend = self._get_volume_backend(allow_null_host=True)
if backend not in self.volume_managers:
msg = (_("Volume manager for backend '%s' does not exist.") %
(backend))
raise exception.BackupFailedToGetVolumeBackend(msg)
return self.volume_managers[backend]
def _get_driver(self, backend=None):
LOG.debug("Driver requested for volume_backend '%s'.",
backend)
if backend is None:
LOG.debug("Fetching default backend.")
backend = self._get_volume_backend(allow_null_host=True)
mgr = self._get_manager(backend)
mgr.driver.db = self.db
return mgr.driver
def _setup_volume_drivers(self):
if CONF.enabled_backends:
for backend in CONF.enabled_backends:
host = "%s@%s" % (CONF.host, backend)
mgr = importutils.import_object(CONF.volume_manager,
host=host,
service_name=backend)
config = mgr.configuration
backend_name = config.safe_get('volume_backend_name')
LOG.debug("Registering backend %(backend)s (host=%(host)s "
"backend_name=%(backend_name)s).",
{'backend': backend, 'host': host,
'backend_name': backend_name})
self.volume_managers[backend] = mgr
else:
default = importutils.import_object(CONF.volume_manager)
LOG.debug("Registering default backend %s.", default)
self.volume_managers['default'] = default
@property
def driver_name(self):
"""This function maps old backup services to backup drivers."""
@ -812,8 +886,12 @@ class BackupManager(manager.SchedulerDependentManager):
if not is_snapshot:
return self._attach_volume(context, backup_device, properties)
else:
msg = _("Can't attach snapshot.")
raise NotImplementedError(msg)
volume = self.db.volume_get(context, backup_device.volume_id)
host = volume_utils.extract_host(volume['host'], 'backend')
backend = self._get_volume_backend(host=host)
rc = self._get_driver(backend)._attach_snapshot(
context, backup_device, properties)
return rc
def _attach_volume(self, context, volume, properties):
"""Attach a volume."""
@ -849,13 +927,21 @@ class BackupManager(manager.SchedulerDependentManager):
return {'conn': conn, 'device': vol_handle, 'connector': connector}
def _detach_device(self, context, attach_info, volume,
def _detach_device(self, context, attach_info, device,
properties, is_snapshot=False, force=False):
"""Disconnect the volume from the host. """
"""Disconnect the volume or snapshot from the host. """
connector = attach_info['connector']
connector.disconnect_volume(attach_info['conn']['data'],
attach_info['device'])
rpcapi = self.volume_rpcapi
rpcapi.terminate_connection(context, volume, properties, force=force)
rpcapi.remove_export(context, volume)
if not is_snapshot:
rpcapi.terminate_connection(context, device, properties,
force=force)
rpcapi.remove_export(context, device)
else:
volume = self.db.volume_get(context, device.volume_id)
host = volume_utils.extract_host(volume['host'], 'backend')
backend = self._get_volume_backend(host=host)
self._get_driver(backend)._detach_snapshot(
context, attach_info, device, properties, force)

52
cinder/tests/unit/backup/test_backup.py

@ -20,6 +20,7 @@ import tempfile
import uuid
import mock
import os_brick
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_utils import importutils
@ -35,6 +36,7 @@ from cinder.objects import fields
from cinder import test
from cinder.tests.unit.backup import fake_service_with_verify as fake_service
from cinder.tests.unit import utils
from cinder.volume import driver
CONF = cfg.CONF
@ -589,6 +591,56 @@ class BackupTestCase(BaseBackupTest):
self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status'])
self.assertEqual(vol_size, backup['size'])
@mock.patch('cinder.utils.brick_get_connector_properties')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device')
@mock.patch('cinder.utils.temporary_chown')
@mock.patch('six.moves.builtins.open')
def test_create_backup_with_temp_snapshot(self, mock_open,
mock_temporary_chown,
mock_get_backup_device,
mock_get_conn):
"""Test backup in-use volume using temp snapshot."""
self.override_config('backup_use_same_host', True)
self.backup_mgr._setup_volume_drivers()
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size,
previous_status='in-use')
backup = self._create_backup_db_entry(volume_id=vol_id)
snap = self._create_snapshot_db_entry(volume_id = vol_id)
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
mock_get_backup_device.return_value = {'backup_device': snap,
'secure_enabled': False,
'is_snapshot': True, }
attach_info = {
'device': {'path': '/dev/null'},
'conn': {'data': {}},
'connector': os_brick.initiator.connector.FakeConnector(None)}
mock_detach_snapshot = self.mock_object(driver.BaseVD,
'_detach_snapshot')
mock_attach_snapshot = self.mock_object(driver.BaseVD,
'_attach_snapshot')
mock_attach_snapshot.return_value = attach_info
properties = {}
mock_get_conn.return_value = properties
mock_open.return_value = open('/dev/null', 'rb')
self.backup_mgr.create_backup(self.ctxt, backup)
mock_temporary_chown.assert_called_once_with('/dev/null')
mock_attach_snapshot.assert_called_once_with(self.ctxt, snap,
properties)
mock_get_backup_device.assert_called_once_with(self.ctxt, backup, vol)
mock_get_conn.assert_called_once_with()
mock_detach_snapshot.assert_called_once_with(self.ctxt, attach_info,
snap, properties, False)
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
self.assertEqual('in-use', vol['status'])
self.assertEqual('backing-up', vol['previous_status'])
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status'])
self.assertEqual(vol_size, backup['size'])
@mock.patch('cinder.volume.utils.notify_about_backup_usage')
def test_create_backup_with_notify(self, notify):
"""Test normal backup creation with notifications."""

67
cinder/tests/unit/test_volume.py

@ -6027,73 +6027,6 @@ class GenericVolumeDriverTestCase(DriverTestCase):
self.volume.driver._delete_temp_volume.assert_called_once_with(
self.context, temp_vol)
@mock.patch.object(cinder.volume.driver.VolumeDriver,
'backup_use_temp_snapshot',
return_value=True)
@mock.patch.object(utils, 'temporary_chown')
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os_brick.initiator.connector.LocalConnector,
'connect_volume')
@mock.patch.object(os_brick.initiator.connector.LocalConnector,
'check_valid_device',
return_value=True)
@mock.patch.object(os_brick.initiator.connector,
'get_connector_properties',
return_value={})
@mock.patch.object(db.sqlalchemy.api, 'volume_get')
def test_backup_volume_inuse_temp_snapshot(self, mock_volume_get,
mock_get_connector_properties,
mock_check_device,
mock_connect_volume,
mock_file_open,
mock_temporary_chown,
mock_temp_snapshot):
vol = tests_utils.create_volume(self.context,
status='backing-up',
previous_status='in-use')
self.context.user_id = fake.USER_ID
self.context.project_id = fake.PROJECT_ID
backup = tests_utils.create_backup(self.context,
vol['id'])
backup_obj = objects.Backup.get_by_id(self.context, backup.id)
attach_info = {'device': {'path': '/dev/null'},
'driver_volume_type': 'LOCAL',
'data': {}}
backup_service = mock.Mock()
self.volume.driver.terminate_connection_snapshot = mock.MagicMock()
self.volume.driver.initialize_connection_snapshot = mock.MagicMock()
self.volume.driver.create_snapshot = mock.MagicMock()
self.volume.driver.delete_snapshot = mock.MagicMock()
self.volume.driver.create_export_snapshot = mock.MagicMock()
self.volume.driver.remove_export_snapshot = mock.MagicMock()
mock_volume_get.return_value = vol
mock_connect_volume.return_value = {'type': 'local',
'path': '/dev/null'}
f = mock_file_open.return_value = open('/dev/null', 'rb')
self.volume.driver._connect_device
backup_service.backup(backup_obj, f, None)
self.volume.driver.initialize_connection_snapshot.return_value = (
attach_info)
self.volume.driver.create_export_snapshot.return_value = (
{'provider_location': '/dev/null',
'provider_auth': 'xxxxxxxx'})
self.volume.driver.backup_volume(self.context, backup_obj,
backup_service)
mock_volume_get.assert_called_with(self.context, vol['id'])
self.assertTrue(self.volume.driver.create_snapshot.called)
self.assertTrue(self.volume.driver.create_export_snapshot.called)
self.assertTrue(
self.volume.driver.initialize_connection_snapshot.called)
self.assertTrue(
self.volume.driver.terminate_connection_snapshot.called)
self.assertTrue(self.volume.driver.remove_export_snapshot.called)
self.assertTrue(self.volume.driver.delete_snapshot.called)
@mock.patch.object(utils, 'temporary_chown')
@mock.patch.object(os_brick.initiator.connector,
'get_connector_properties')

6
cinder/volume/driver.py

@ -278,6 +278,7 @@ iser_opts = [
CONF = cfg.CONF
CONF.register_opts(volume_opts)
CONF.register_opts(iser_opts)
CONF.import_opt('backup_use_same_host', 'cinder.backup.api')
@six.add_metaclass(abc.ABCMeta)
@ -995,7 +996,7 @@ class BaseVD(object):
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=ex_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
return (self._connect_device(conn), snapshot)
return self._connect_device(conn)
def _connect_device(self, conn):
# Use Brick's code to do attach/detach
@ -1052,8 +1053,7 @@ class BaseVD(object):
"""
backup_device = None
is_snapshot = False
if (self.backup_use_temp_snapshot() and
self.snapshot_remote_attachable()):
if self.backup_use_temp_snapshot() and CONF.backup_use_same_host:
(backup_device, is_snapshot) = (
self._get_backup_volume_temp_snapshot(context, backup))
else:

Loading…
Cancel
Save