[SVF]:HyperSwap volume service status update
[Spectrum Virtualize Family] During HyperSwap volume service initialization, replication status is updated as disabled. with this patch updating the replication status based on volume node status. Closes-Bug: #1931968 Change-Id: I0db4a3ff34ddd87f2e51cb7095952a7f61b0c326
This commit is contained in:
parent
8930fbac3c
commit
8a240585b5
@ -4830,6 +4830,111 @@ class StorwizeSVCCommonDriverTestCase(test.TestCase):
|
||||
self.assertRaises(exception.InvalidInput,
|
||||
self._driver._validate_pools_exist)
|
||||
|
||||
@ddt.data({'node1': 'online', 'node2': 'online', 'node3': 'online',
|
||||
'node4': 'online', 'state': 'enabled', 'reason': None},
|
||||
{'node1': 'online', 'node2': 'online', 'node3': 'offline',
|
||||
'node4': 'offline', 'state': 'disabled',
|
||||
'reason': 'site2 is down'},
|
||||
{'node1': 'offline', 'node2': 'offline', 'node3': 'online',
|
||||
'node4': 'online', 'state': 'disabled',
|
||||
'reason': 'site1 is down'},
|
||||
{'node1': 'offline', 'node2': 'offline', 'node3': 'offline',
|
||||
'node4': 'offline', 'state': 'disabled',
|
||||
'reason': 'site1 is down'})
|
||||
@mock.patch.object(storwize_svc_common.StorwizeHelpers,
|
||||
'get_node_info')
|
||||
def test_get_hyperswap_storage_state(self, node_data, get_node_info):
|
||||
get_node_info.return_value = {'7': {'id': '7', 'name': 'node1',
|
||||
'status': node_data['node1'],
|
||||
'site_id': '1',
|
||||
'site_name': 'site1'},
|
||||
'8': {'id': '8', 'name': 'node2',
|
||||
'status': node_data['node2'],
|
||||
'site_id': '1',
|
||||
'site_name': 'site1'},
|
||||
'9': {'id': '9', 'name': 'node3',
|
||||
'status': node_data['node3'],
|
||||
'site_id': '2',
|
||||
'site_name': 'site2'},
|
||||
'10': {'id': '10', 'name': 'node4',
|
||||
'status': node_data['node4'],
|
||||
'site_id': '2',
|
||||
'site_name': 'site2'}}
|
||||
|
||||
state, reason = self.driver.get_hyperswap_storage_state()
|
||||
|
||||
self.assertEqual(state, node_data['state'])
|
||||
self.assertEqual(reason, node_data['reason'])
|
||||
|
||||
@ddt.data((True, 'online'),
|
||||
(True, 'offline'),
|
||||
(False, 'online'),
|
||||
(False, 'offline'))
|
||||
@mock.patch.object(storwize_svc_common.StorwizeSSH,
|
||||
'lsnode')
|
||||
@ddt.unpack
|
||||
def test_get_node_info(self, online_node, node_status, lsnode):
|
||||
empty_nodes_info = {}
|
||||
fake_lsnode_info = [{
|
||||
'id': 1,
|
||||
'name': 'test',
|
||||
'IO_group_id': 'test_io_group',
|
||||
'iscsi_name': 'test_iscsi',
|
||||
'WWNN': '123456',
|
||||
'status': node_status,
|
||||
'WWPN': '8999',
|
||||
'ipv4': '192.9.123.1',
|
||||
'ipv6': '1.2.3.4',
|
||||
'enabled_protocols': 'ipv6',
|
||||
'site_id': '1783',
|
||||
'site_name': 'test-sitename'
|
||||
}]
|
||||
lsnode.return_value = fake_lsnode_info
|
||||
nodes = self.driver._helpers.get_node_info(online_node)
|
||||
if not online_node or online_node and node_status == 'online':
|
||||
self.assertIsNotNone(nodes)
|
||||
elif online_node and node_status == 'offline':
|
||||
self.assertEqual(nodes, empty_nodes_info)
|
||||
|
||||
@ddt.data((False, 'enabled', ''),
|
||||
(False, 'disabled', 'site 2 down'),
|
||||
(True, '', ''))
|
||||
@mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver,
|
||||
'get_hyperswap_storage_state')
|
||||
@ddt.unpack
|
||||
def test_update_volume_stats(self, is_replica_enabled,
|
||||
replication_status,
|
||||
reason, get_hs_storage_state):
|
||||
self._replica_enabled = is_replica_enabled
|
||||
self.driver._update_volume_stats()
|
||||
if not self._replica_enabled:
|
||||
with mock.patch.object(
|
||||
storwize_svc_common.StorwizeHelpers,
|
||||
'is_system_topology_hyperswap') as is_hyperswap:
|
||||
with mock.patch.object(
|
||||
storwize_svc_common.StorwizeHelpers,
|
||||
'get_node_info') as get_node_info:
|
||||
is_hyperswap.return_value = is_hyperswap
|
||||
if is_hyperswap:
|
||||
get_node_info.return_value = None
|
||||
get_hs_storage_state.side_effect =\
|
||||
exception.VolumeBackendAPIException(data='')
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
get_hs_storage_state)
|
||||
get_hs_storage_state.return_value = (
|
||||
replication_status, reason)
|
||||
if replication_status != 'enabled':
|
||||
self.assertNotEqual(
|
||||
fields.ReplicationStatus.ENABLED,
|
||||
replication_status)
|
||||
self.assertIsNotNone(reason)
|
||||
else:
|
||||
self.assertEqual(fields.ReplicationStatus.ENABLED,
|
||||
replication_status)
|
||||
self.assertEqual(reason, '')
|
||||
else:
|
||||
self.assertFalse(get_hs_storage_state.called)
|
||||
|
||||
def _get_pool_volumes(self, pool):
|
||||
vdisks = self.sim._cmd_lsvdisks_from_filter('mdisk_grp_name', pool)
|
||||
return vdisks
|
||||
|
@ -1026,13 +1026,13 @@ class StorwizeHelpers(object):
|
||||
return int(iogrp['id'])
|
||||
return None
|
||||
|
||||
def get_node_info(self):
|
||||
def get_node_info(self, online_node=True):
|
||||
"""Return dictionary containing information on system's nodes."""
|
||||
nodes = {}
|
||||
resp = self.ssh.lsnode()
|
||||
for node_data in resp:
|
||||
try:
|
||||
if node_data['status'] != 'online':
|
||||
if online_node and node_data['status'] != 'online':
|
||||
continue
|
||||
node = {}
|
||||
node['id'] = node_data['id']
|
||||
@ -1048,6 +1048,8 @@ class StorwizeHelpers(object):
|
||||
nodes[node['id']] = node
|
||||
node['site_id'] = (node_data['site_id']
|
||||
if 'site_id' in node_data else None)
|
||||
node['site_name'] = (node_data['site_name']
|
||||
if 'site_name' in node_data else None)
|
||||
except KeyError:
|
||||
self.handle_keyerror('lsnode', node_data)
|
||||
return nodes
|
||||
@ -6080,6 +6082,34 @@ class StorwizeSVCCommonDriver(san.SanDriver,
|
||||
|
||||
return attr['mdisk_grp_name']
|
||||
|
||||
def get_hyperswap_storage_state(self):
|
||||
storage_state = fields.ReplicationStatus.ENABLED
|
||||
disabled_reason = None
|
||||
site_node_info = {}
|
||||
site_node_down_info = {}
|
||||
|
||||
storage_nodes = self._helpers.get_node_info(online_node=False)
|
||||
|
||||
for node, node_info in storage_nodes.items():
|
||||
if node_info['site_id']:
|
||||
site = node_info['site_id']
|
||||
if site not in site_node_info:
|
||||
site_node_info[site] = []
|
||||
site_node_down_info[site] = {'nodes_down': 0}
|
||||
|
||||
site_node_info[site].append(node_info)
|
||||
if node_info['status'] not in ['online', 'degraded']:
|
||||
site_node_down_info[site]['nodes_down'] += 1
|
||||
|
||||
for site, site_info in site_node_down_info.items():
|
||||
if len(site_node_info[site]) == site_info['nodes_down']:
|
||||
storage_state = fields.ReplicationStatus.DISABLED
|
||||
site_name = site_node_info[site][0]['site_name']
|
||||
disabled_reason = "{0} is down".format(site_name)
|
||||
break
|
||||
|
||||
return storage_state, disabled_reason
|
||||
|
||||
def _update_volume_stats(self):
|
||||
"""Retrieve stats info from volume group."""
|
||||
|
||||
@ -6109,6 +6139,18 @@ class StorwizeSVCCommonDriver(san.SanDriver,
|
||||
data['replication_enabled'] = self._replica_enabled
|
||||
data['replication_targets'] = self._get_replication_targets()
|
||||
data['consistent_group_replication_enabled'] = True
|
||||
|
||||
if self._helpers.is_system_topology_hyperswap(self._state):
|
||||
data['replication_enabled'] = True
|
||||
try:
|
||||
state, reason = self.get_hyperswap_storage_state()
|
||||
if state != fields.ReplicationStatus.ENABLED:
|
||||
data['replication_enabled'] = False
|
||||
data['disabled_reason'] = reason
|
||||
except exception.VolumeBackendAPIException as exc:
|
||||
LOG.warning("Failed to get node info. "
|
||||
"Exception: %(ex)s.", {'ex': exc.msg})
|
||||
|
||||
self._stats = data
|
||||
|
||||
def _build_pool_stats(self, pool):
|
||||
|
@ -0,0 +1,8 @@
|
||||
---
|
||||
fixes:
|
||||
- |
|
||||
IBM Spectrum Virtualize Family driver `Bug #1931968
|
||||
<https://bugs.launchpad.net/cinder/+bug/1931968>`_: Fixed
|
||||
issue in updating the replication status of HyperSwap volume
|
||||
service based on status of nodes during initialization and
|
||||
periodic calls.
|
Loading…
Reference in New Issue
Block a user