Merge "Make ZFSonLinux driver handle snapshots of replicated shares properly"

This commit is contained in:
Jenkins 2016-03-19 13:54:55 +00:00 committed by Gerrit Code Review
commit bf24cdbae8
2 changed files with 477 additions and 52 deletions

View File

@ -480,13 +480,13 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
def create_snapshot(self, context, snapshot, share_server=None):
"""Is called to create a snapshot."""
dataset_name = self.private_storage.get(
snapshot['share_id'], 'dataset_name')
snapshot_name = self._get_snapshot_name(snapshot['id'])
snapshot_name = dataset_name + '@' + snapshot_name
snapshot['share_instance_id'], 'dataset_name')
snapshot_tag = self._get_snapshot_name(snapshot['id'])
snapshot_name = dataset_name + '@' + snapshot_tag
self.private_storage.update(
snapshot['id'], {
snapshot['snapshot_id'], {
'entity_type': 'snapshot',
'snapshot_name': snapshot_name,
'snapshot_tag': snapshot_tag,
}
)
self.zfs('snapshot', snapshot_name)
@ -494,11 +494,19 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
@ensure_share_server_not_provided
def delete_snapshot(self, context, snapshot, share_server=None):
"""Is called to remove a snapshot."""
snapshot_name = self.private_storage.get(
snapshot['id'], 'snapshot_name')
pool_name = snapshot_name.split('/')[0]
return self._delete_snapshot(context, snapshot)
out, err = self.zfs('list', '-r', '-t', 'snapshot', pool_name)
def _get_saved_snapshot_name(self, snapshot_instance):
snapshot_tag = self.private_storage.get(
snapshot_instance['snapshot_id'], 'snapshot_tag')
dataset_name = self.private_storage.get(
snapshot_instance['share_instance_id'], 'dataset_name')
snapshot_name = dataset_name + '@' + snapshot_tag
return snapshot_name
def _delete_snapshot(self, context, snapshot):
snapshot_name = self._get_saved_snapshot_name(snapshot)
out, err = self.zfs('list', '-r', '-t', 'snapshot', snapshot_name)
data = self.parse_zfs_answer(out)
for datum in data:
if datum['NAME'] == snapshot_name:
@ -531,13 +539,24 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
'used_options': options,
}
)
snapshot_name = self.private_storage.get(
snapshot['id'], 'snapshot_name')
snapshot_name = self._get_saved_snapshot_name(snapshot)
cmd = ['clone', snapshot_name, dataset_name]
self.execute(
# NOTE(vponomaryov): SSH is used as workaround for 'execute'
# implementation restriction that does not support usage of '|'.
'ssh', ssh_cmd,
'sudo', 'zfs', 'send', '-vDp', snapshot_name, '|',
'sudo', 'zfs', 'receive', '-v', dataset_name,
)
# Apply options based on used share type that may differ from
# one used for original share.
for option in options:
cmd.extend(['-o', option])
self.zfs(*cmd)
self.zfs('set', option, dataset_name)
# Delete with retry as right after creation it may be temporary busy.
self.execute_with_retry(
'sudo', 'zfs', 'destroy',
dataset_name + '@' + snapshot_name.split('@')[-1])
return self._get_share_helper(
share['share_proto']).create_exports(dataset_name)
@ -740,6 +759,11 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
access_rules, replica_snapshots,
share_server=None):
"""Syncs replica and updates its 'replica_state'."""
return self._update_replica_state(
context, replica_list, replica, replica_snapshots, access_rules)
def _update_replica_state(self, context, replica_list, replica,
replica_snapshots=None, access_rules=None):
active_replica = self._get_active_replica(replica_list)
src_dataset_name = self.private_storage.get(
active_replica['id'], 'dataset_name')
@ -793,6 +817,7 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
data = self.parse_zfs_answer(out)
for datum in data:
if (dst_dataset_name in datum['NAME'] and
'@' + self.replica_snapshot_prefix in datum['NAME'] and
datum['NAME'].split('@')[-1] not in snap_references):
self._delete_dataset_or_snapshot_with_retry(datum['NAME'])
@ -814,14 +839,15 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
'sudo', 'zfs', 'destroy', '-f', datum['NAME'],
)
# Apply access rules from original share
# TODO(vponomaryov): we should remove somehow rules that were
# deleted on active replica after creation of secondary replica.
# For the moment there will be difference and it can be considered
# as a bug.
self._get_share_helper(replica['share_proto']).update_access(
dst_dataset_name, access_rules, add_rules=[], delete_rules=[],
make_all_ro=True)
if access_rules:
# Apply access rules from original share
# TODO(vponomaryov): we should remove somehow rules that were
# deleted on active replica after creation of secondary replica.
# For the moment there will be difference and it can be considered
# as a bug.
self._get_share_helper(replica['share_proto']).update_access(
dst_dataset_name, access_rules, add_rules=[], delete_rules=[],
make_all_ro=True)
# Return results
return constants.REPLICA_STATE_IN_SYNC
@ -966,3 +992,145 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
self.zfs('set', 'readonly=off', dst_dataset_name)
return list(replica_dict.values())
@ensure_share_server_not_provided
def create_replicated_snapshot(self, context, replica_list,
replica_snapshots, share_server=None):
"""Create a snapshot and update across the replicas."""
active_replica = self._get_active_replica(replica_list)
src_dataset_name = self.private_storage.get(
active_replica['id'], 'dataset_name')
ssh_to_src_cmd = self.private_storage.get(
active_replica['id'], 'ssh_cmd')
replica_snapshots_dict = {
si['id']: {'id': si['id']} for si in replica_snapshots}
active_snapshot_instance_id = [
si['id'] for si in replica_snapshots
if si['share_instance_id'] == active_replica['id']][0]
snapshot_tag = self._get_snapshot_name(active_snapshot_instance_id)
# Replication should not be dependent on manually created snapshots
# so, create additional one, newer, that will be used for replication
# synchronizations.
repl_snapshot_tag = self._get_replication_snapshot_tag(active_replica)
src_snapshot_name = src_dataset_name + '@' + repl_snapshot_tag
self.private_storage.update(
replica_snapshots[0]['snapshot_id'], {
'entity_type': 'snapshot',
'snapshot_tag': snapshot_tag,
}
)
for tag in (snapshot_tag, repl_snapshot_tag):
self.execute(
'ssh', ssh_to_src_cmd,
'sudo', 'zfs', 'snapshot', src_dataset_name + '@' + tag,
)
# Populate snapshot to all replicas
for replica_snapshot in replica_snapshots:
replica_id = replica_snapshot['share_instance_id']
if replica_id == active_replica['id']:
replica_snapshots_dict[replica_snapshot['id']]['status'] = (
constants.STATUS_AVAILABLE)
continue
previous_snapshot_tag = self.private_storage.get(
replica_id, 'repl_snapshot_tag')
dst_dataset_name = self.private_storage.get(
replica_id, 'dataset_name')
ssh_to_dst_cmd = self.private_storage.get(replica_id, 'ssh_cmd')
try:
# Send/receive diff between previous snapshot and last one
out, err = self.execute(
'ssh', ssh_to_src_cmd,
'sudo', 'zfs', 'send', '-vDRI',
previous_snapshot_tag, src_snapshot_name, '|',
'ssh', ssh_to_dst_cmd,
'sudo', 'zfs', 'receive', '-vF', dst_dataset_name,
)
except exception.ProcessExecutionError as e:
LOG.warning(
_LW("Failed to sync snapshot instance %(id)s. %(e)s"),
{'id': replica_snapshot['id'], 'e': e})
replica_snapshots_dict[replica_snapshot['id']]['status'] = (
constants.STATUS_ERROR)
continue
replica_snapshots_dict[replica_snapshot['id']]['status'] = (
constants.STATUS_AVAILABLE)
msg = ("Info about last replica '%(replica_id)s' "
"sync is following: \n%(out)s")
LOG.debug(msg, {'replica_id': replica_id, 'out': out})
# Update latest replication snapshot for replica
self.private_storage.update(
replica_id, {'repl_snapshot_tag': repl_snapshot_tag})
# Update latest replication snapshot for currently active replica
self.private_storage.update(
active_replica['id'], {'repl_snapshot_tag': repl_snapshot_tag})
return list(replica_snapshots_dict.values())
@ensure_share_server_not_provided
def delete_replicated_snapshot(self, context, replica_list,
replica_snapshots, share_server=None):
"""Delete a snapshot by deleting its instances across the replicas."""
active_replica = self._get_active_replica(replica_list)
replica_snapshots_dict = {
si['id']: {'id': si['id']} for si in replica_snapshots}
for replica_snapshot in replica_snapshots:
replica_id = replica_snapshot['share_instance_id']
snapshot_name = self._get_saved_snapshot_name(replica_snapshot)
if active_replica['id'] == replica_id:
self._delete_snapshot(context, replica_snapshot)
replica_snapshots_dict[replica_snapshot['id']]['status'] = (
constants.STATUS_DELETED)
continue
ssh_cmd = self.private_storage.get(replica_id, 'ssh_cmd')
out, err = self.execute(
'ssh', ssh_cmd,
'sudo', 'zfs', 'list', '-r', '-t', 'snapshot', snapshot_name,
)
data = self.parse_zfs_answer(out)
for datum in data:
if datum['NAME'] != snapshot_name:
continue
self.execute_with_retry(
'ssh', ssh_cmd,
'sudo', 'zfs', 'destroy', '-f', datum['NAME'],
)
self.private_storage.delete(replica_snapshot['id'])
replica_snapshots_dict[replica_snapshot['id']]['status'] = (
constants.STATUS_DELETED)
return list(replica_snapshots_dict.values())
@ensure_share_server_not_provided
def update_replicated_snapshot(self, context, replica_list,
share_replica, replica_snapshots,
replica_snapshot, share_server=None):
"""Update the status of a snapshot instance that lives on a replica."""
self._update_replica_state(context, replica_list, share_replica)
snapshot_name = self._get_saved_snapshot_name(replica_snapshot)
out, err = self.zfs('list', '-r', '-t', 'snapshot', snapshot_name)
data = self.parse_zfs_answer(out)
snapshot_found = False
for datum in data:
if datum['NAME'] == snapshot_name:
snapshot_found = True
break
return_dict = {'id': replica_snapshot['id']}
if snapshot_found:
return_dict.update({'status': constants.STATUS_AVAILABLE})
else:
return_dict.update({'status': constants.STATUS_ERROR})
return return_dict

View File

@ -617,26 +617,38 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
self.configuration.zfs_dataset_snapshot_name_prefix = 'prefx_'
self.mock_object(self.driver, 'zfs')
snapshot = {
'id': 'fake_snapshot_id',
'id': 'fake_snapshot_instance_id',
'snapshot_id': 'fake_snapshot_id',
'host': 'hostname@backend_name#bar',
'size': 4,
'share_id': 'fake_share_id'
'share_instance_id': 'fake_share_id'
}
snapshot_name = 'foo_data_set_name@prefx_fake_snapshot_id'
snapshot_name = 'foo_data_set_name@prefx_%s' % snapshot['id']
self.driver.private_storage.update(
snapshot['share_id'], {'dataset_name': 'foo_data_set_name'})
snapshot['share_instance_id'],
{'dataset_name': 'foo_data_set_name'})
self.driver.create_snapshot('fake_context', snapshot)
self.driver.zfs.assert_called_once_with(
'snapshot', snapshot_name)
self.assertEqual(
snapshot_name,
snapshot_name.split('@')[-1],
self.driver.private_storage.get(
snapshot['id'], 'snapshot_name'))
snapshot['snapshot_id'], 'snapshot_tag'))
def test_delete_snapshot(self):
snap_name = 'foo_zpool/bar_dataset_name@prefix_fake_snapshot_id'
snapshot = {
'id': 'fake_snapshot_instance_id',
'snapshot_id': 'fake_snapshot_id',
'host': 'hostname@backend_name#bar',
'size': 4,
'share_instance_id': 'fake_share_id',
}
dataset_name = 'foo_zpool/bar_dataset_name'
snap_tag = 'prefix_%s' % snapshot['id']
snap_name = '%(dataset)s@%(tag)s' % {
'dataset': dataset_name, 'tag': snap_tag}
mock_delete = self.mock_object(
self.driver, '_delete_dataset_or_snapshot_with_retry')
self.mock_object(zfs_driver.LOG, 'warning')
@ -649,25 +661,33 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
{'NAME': snap_name}],
[]]))
context = 'fake_context'
snapshot = {
'id': 'fake_snapshot_id',
'host': 'hostname@backend_name#bar',
'size': 4,
'share_id': 'fake_share_id',
}
self.driver.private_storage.update(
snapshot['id'], {'snapshot_name': snap_name})
self.driver.private_storage.update(
snapshot['snapshot_id'], {'snapshot_tag': snap_tag})
self.driver.private_storage.update(
snapshot['share_instance_id'], {'dataset_name': dataset_name})
self.driver.delete_snapshot(context, snapshot, share_server=None)
self.assertEqual(0, zfs_driver.LOG.warning.call_count)
self.driver.zfs.assert_called_once_with(
'list', '-r', '-t', 'snapshot', 'foo_zpool')
'list', '-r', '-t', 'snapshot', snap_name)
self.driver.parse_zfs_answer.assert_called_once_with('a')
mock_delete.assert_called_once_with(snap_name)
def test_delete_snapshot_absent(self):
snap_name = 'foo_zpool/bar_dataset_name@prefix_fake_snapshot_id'
snapshot = {
'id': 'fake_snapshot_instance_id',
'snapshot_id': 'fake_snapshot_id',
'host': 'hostname@backend_name#bar',
'size': 4,
'share_instance_id': 'fake_share_id',
}
dataset_name = 'foo_zpool/bar_dataset_name'
snap_tag = 'prefix_%s' % snapshot['id']
snap_name = '%(dataset)s@%(tag)s' % {
'dataset': dataset_name, 'tag': snap_tag}
mock_delete = self.mock_object(
self.driver, '_delete_dataset_or_snapshot_with_retry')
self.mock_object(zfs_driver.LOG, 'warning')
@ -677,20 +697,18 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
self.driver, 'parse_zfs_answer',
mock.Mock(side_effect=[[], [{'NAME': snap_name}]]))
context = 'fake_context'
snapshot = {
'id': 'fake_snapshot_id',
'host': 'hostname@backend_name#bar',
'size': 4,
'share_id': 'fake_share_id',
}
self.driver.private_storage.update(
snapshot['id'], {'snapshot_name': snap_name})
self.driver.private_storage.update(
snapshot['snapshot_id'], {'snapshot_tag': snap_tag})
self.driver.private_storage.update(
snapshot['share_instance_id'], {'dataset_name': dataset_name})
self.driver.delete_snapshot(context, snapshot, share_server=None)
self.assertEqual(0, mock_delete.call_count)
self.driver.zfs.assert_called_once_with(
'list', '-r', '-t', 'snapshot', 'foo_zpool')
'list', '-r', '-t', 'snapshot', snap_name)
self.driver.parse_zfs_answer.assert_called_once_with('a')
zfs_driver.LOG.warning.assert_called_once_with(
mock.ANY, {'id': snapshot['id'], 'name': snap_name})
@ -706,6 +724,7 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
def test_create_share_from_snapshot(self):
mock_get_helper = self.mock_object(self.driver, '_get_share_helper')
self.mock_object(self.driver, 'zfs')
self.mock_object(self.driver, 'execute')
mock_get_extra_specs_from_share = self.mock_object(
zfs_driver.share_types,
'get_extra_specs_from_share',
@ -718,19 +737,26 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
'size': 4,
}
snapshot = {
'id': 'fake_snapshot_id',
'id': 'fake_snapshot_instance_id',
'snapshot_id': 'fake_snapshot_id',
'host': 'hostname@backend_name#bar',
'size': 4,
'share_id': share['id'],
'share_instance_id': share['id'],
}
snap_name = 'foo_zpool/bar_dataset_name@prefix_fake_snapshot_id'
dataset_name = 'bar/subbar/some_prefix_%s' % share['id']
snap_tag = 'prefix_%s' % snapshot['id']
snap_name = '%(dataset)s@%(tag)s' % {
'dataset': dataset_name, 'tag': snap_tag}
self.configuration.zfs_dataset_name_prefix = 'some_prefix_'
self.configuration.zfs_ssh_username = 'someuser'
self.driver.share_export_ip = '1.1.1.1'
self.driver.service_ip = '2.2.2.2'
dataset_name = 'bar/subbar/some_prefix_fake_share_id'
self.driver.private_storage.update(
snapshot['id'], {'snapshot_name': snap_name})
self.driver.private_storage.update(
snapshot['snapshot_id'], {'snapshot_tag': snap_tag})
self.driver.private_storage.update(
snapshot['share_instance_id'], {'dataset_name': dataset_name})
result = self.driver.create_share_from_snapshot(
context, share, snapshot, share_server=None)
@ -751,10 +777,20 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
self.assertEqual(
'bar',
self.driver.private_storage.get(share['id'], 'pool_name'))
self.driver.zfs.assert_called_once_with(
'clone', snap_name, 'bar/subbar/some_prefix_fake_share_id',
'-o', 'quota=4G', '-o', 'fook=foov', '-o', 'bark=barv',
'-o', 'readonly=off')
self.driver.execute.assert_has_calls([
mock.call(
'ssh', 'someuser@2.2.2.2',
'sudo', 'zfs', 'send', '-vDp', snap_name, '|',
'sudo', 'zfs', 'receive', '-v',
'bar/subbar/some_prefix_fake_share_id'),
mock.call(
'sudo', 'zfs', 'destroy',
'bar/subbar/some_prefix_fake_share_id@%s' % snap_tag),
])
self.driver.zfs.assert_has_calls([
mock.call('set', opt, 'bar/subbar/some_prefix_fake_share_id')
for opt in ('quota=4G', 'bark=barv', 'readonly=off', 'fook=foov')
], any_order=True)
mock_get_helper.assert_has_calls([
mock.call('NFS'), mock.call().create_exports(dataset_name)
])
@ -1538,3 +1574,224 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
old_repl_snapshot_tag,
self.driver.private_storage.get(
repl['id'], 'repl_snapshot_tag'))
def test_create_replicated_snapshot(self):
active_replica = {
'id': 'fake_active_replica_id',
'replica_state': zfs_driver.constants.REPLICA_STATE_ACTIVE,
}
replica = {
'id': 'fake_first_replica_id',
'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC,
}
second_replica = {
'id': 'fake_second_replica_id',
'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC,
}
replica_list = [replica, active_replica, second_replica]
snapshot_instances = [
{'id': 'si_%s' % r['id'], 'share_instance_id': r['id'],
'snapshot_id': 'some_snapshot_id'}
for r in replica_list
]
src_dataset_name = (
'bar/subbar/fake_dataset_name_prefix%s' % active_replica['id'])
old_repl_snapshot_tag = (
self.driver._get_replication_snapshot_prefix(
active_replica) + 'foo')
self.driver.private_storage.update(
active_replica['id'],
{'dataset_name': src_dataset_name,
'ssh_cmd': 'fake_src_ssh_cmd',
'repl_snapshot_tag': old_repl_snapshot_tag}
)
for repl in (replica, second_replica):
self.driver.private_storage.update(
repl['id'],
{'dataset_name': (
'bar/subbar/fake_dataset_name_prefix%s' % repl['id']),
'ssh_cmd': 'fake_dst_ssh_cmd',
'repl_snapshot_tag': old_repl_snapshot_tag}
)
self.mock_object(
self.driver, 'execute', mock.Mock(side_effect=[
('a', 'b'),
('c', 'd'),
('e', 'f'),
exception.ProcessExecutionError('Second replica sync failure'),
]))
self.configuration.zfs_dataset_name_prefix = 'fake_dataset_name_prefix'
self.configuration.zfs_dataset_snapshot_name_prefix = (
'fake_dataset_snapshot_name_prefix')
snap_tag_prefix = (
self.configuration.zfs_dataset_snapshot_name_prefix +
'si_%s' % active_replica['id'])
repl_snap_tag = 'fake_repl_tag'
self.mock_object(
self.driver, '_get_replication_snapshot_tag',
mock.Mock(return_value=repl_snap_tag))
result = self.driver.create_replicated_snapshot(
'fake_context', replica_list, snapshot_instances)
expected = [
{'id': 'si_fake_active_replica_id',
'status': zfs_driver.constants.STATUS_AVAILABLE},
{'id': 'si_fake_first_replica_id',
'status': zfs_driver.constants.STATUS_AVAILABLE},
{'id': 'si_fake_second_replica_id',
'status': zfs_driver.constants.STATUS_ERROR},
]
for repl in expected:
self.assertIn(repl, result)
self.assertEqual(3, len(result))
for repl in (active_replica, replica):
self.assertEqual(
repl_snap_tag,
self.driver.private_storage.get(
repl['id'], 'repl_snapshot_tag'))
self.assertEqual(
old_repl_snapshot_tag,
self.driver.private_storage.get(
second_replica['id'], 'repl_snapshot_tag'))
self.assertEqual(
snap_tag_prefix,
self.driver.private_storage.get(
snapshot_instances[0]['snapshot_id'], 'snapshot_tag'))
self.driver._get_replication_snapshot_tag.assert_called_once_with(
active_replica)
def test_delete_replicated_snapshot(self):
active_replica = {
'id': 'fake_active_replica_id',
'replica_state': zfs_driver.constants.REPLICA_STATE_ACTIVE,
}
replica = {
'id': 'fake_first_replica_id',
'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC,
}
second_replica = {
'id': 'fake_second_replica_id',
'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC,
}
replica_list = [replica, active_replica, second_replica]
active_snapshot_instance = {
'id': 'si_%s' % active_replica['id'],
'share_instance_id': active_replica['id'],
'snapshot_id': 'some_snapshot_id',
'share_id': 'some_share_id',
}
snapshot_instances = [
{'id': 'si_%s' % r['id'], 'share_instance_id': r['id'],
'snapshot_id': active_snapshot_instance['snapshot_id'],
'share_id': active_snapshot_instance['share_id']}
for r in (replica, second_replica)
]
snapshot_instances.append(active_snapshot_instance)
for si in snapshot_instances:
self.driver.private_storage.update(
si['id'], {'snapshot_name': 'fake_snap_name_%s' % si['id']})
src_dataset_name = (
'bar/subbar/fake_dataset_name_prefix%s' % active_replica['id'])
old_repl_snapshot_tag = (
self.driver._get_replication_snapshot_prefix(
active_replica) + 'foo')
self.driver.private_storage.update(
active_replica['id'],
{'dataset_name': src_dataset_name,
'ssh_cmd': 'fake_src_ssh_cmd',
'repl_snapshot_tag': old_repl_snapshot_tag}
)
for replica in (replica, second_replica):
self.driver.private_storage.update(
replica['id'],
{'dataset_name': 'some_dataset_name',
'ssh_cmd': 'fake_ssh_cmd'}
)
self.driver.private_storage.update(
snapshot_instances[0]['snapshot_id'],
{'snapshot_tag': 'foo_snapshot_tag'}
)
snap_name = 'fake_snap_name'
self.mock_object(self.driver, '_delete_snapshot')
self.mock_object(
self.driver, '_get_saved_snapshot_name',
mock.Mock(return_value=snap_name))
self.mock_object(self.driver, 'execute_with_retry')
self.mock_object(
self.driver, 'execute', mock.Mock(side_effect=[
('a', 'b'),
('c', 'd'),
exception.ProcessExecutionError('Second replica sync failure'),
]))
self.mock_object(
self.driver, 'parse_zfs_answer', mock.Mock(side_effect=[
({'NAME': 'foo'}, {'NAME': snap_name}),
({'NAME': 'bar'}, {'NAME': snap_name}),
]))
expected = sorted([
{'id': si['id'], 'status': 'deleted'} for si in snapshot_instances
], key=lambda item: item['id'])
result = self.driver.delete_replicated_snapshot(
'fake_context', replica_list, snapshot_instances)
self.driver._get_saved_snapshot_name.assert_has_calls([
mock.call(si) for si in snapshot_instances
])
self.driver._delete_snapshot.assert_called_once_with(
'fake_context', active_snapshot_instance)
self.driver.execute.assert_has_calls([
mock.call('ssh', 'fake_ssh_cmd', 'sudo', 'zfs', 'list', '-r', '-t',
'snapshot', snap_name) for i in (0, 1)
])
self.driver.execute_with_retry.assert_has_calls([
mock.call('ssh', 'fake_ssh_cmd', 'sudo', 'zfs', 'destroy',
'-f', snap_name) for i in (0, 1)
])
self.assertIsInstance(result, list)
self.assertEqual(3, len(result))
self.assertEqual(expected, sorted(result, key=lambda item: item['id']))
@ddt.data(
({'NAME': 'fake'}, zfs_driver.constants.STATUS_ERROR),
({'NAME': 'fake_snap_name'}, zfs_driver.constants.STATUS_AVAILABLE),
)
@ddt.unpack
def test_update_replicated_snapshot(self, parse_answer, expected_status):
snap_name = 'fake_snap_name'
self.mock_object(self.driver, '_update_replica_state')
self.mock_object(
self.driver, '_get_saved_snapshot_name',
mock.Mock(return_value=snap_name))
self.mock_object(
self.driver, 'zfs', mock.Mock(side_effect=[('a', 'b')]))
self.mock_object(
self.driver, 'parse_zfs_answer', mock.Mock(side_effect=[
[parse_answer]
]))
fake_context = 'fake_context'
replica_list = ['foo', 'bar']
share_replica = 'quuz'
snapshot_instance = {'id': 'fake_snapshot_instance_id'}
snapshot_instances = ['q', 'w', 'e', 'r', 't', 'y']
result = self.driver.update_replicated_snapshot(
fake_context, replica_list, share_replica, snapshot_instances,
snapshot_instance)
self.driver._update_replica_state.assert_called_once_with(
fake_context, replica_list, share_replica)
self.driver._get_saved_snapshot_name.assert_called_once_with(
snapshot_instance)
self.driver.zfs.assert_called_once_with(
'list', '-r', '-t', 'snapshot', snap_name)
self.driver.parse_zfs_answer.assert_called_once_with('a')
self.assertIsInstance(result, dict)
self.assertEqual(2, len(result))
self.assertIn('status', result)
self.assertIn('id', result)
self.assertEqual(expected_status, result['status'])
self.assertEqual(snapshot_instance['id'], result['id'])