Hitachi: Fix key error when backend is down

This patch is to fix the cause of key error in cinder scheduler
when a backend is down.
This patch can fix the bug in OEM drivers.

Closes-Bug: #2004140
Change-Id: I2735d902af256f979fc75a697f605b7a8ae65178
This commit is contained in:
Atsushi Kawai 2023-02-27 03:31:07 +00:00
parent e096b2db0e
commit 3eb8bb739b
6 changed files with 66 additions and 7 deletions

View File

@ -18,6 +18,7 @@ import functools
from unittest import mock
from oslo_config import cfg
from oslo_utils import units
import requests
from requests import models
@ -877,11 +878,59 @@ class HBSDRESTFCDriverTest(test.TestCase):
get_goodness_function.return_value = None
stats = self.driver.get_volume_stats(True)
self.assertEqual('Hitachi', stats['vendor_name'])
self.assertEqual(self.configuration.volume_backend_name,
stats["pools"][0]['pool_name'])
self.assertEqual(self.configuration.reserved_percentage,
stats["pools"][0]['reserved_percentage'])
self.assertTrue(stats["pools"][0]['thin_provisioning_support'])
self.assertFalse(stats["pools"][0]['thick_provisioning_support'])
self.assertTrue(stats["pools"][0]['multiattach'])
self.assertTrue(stats["pools"][0]['consistencygroup_support'])
self.assertTrue(stats["pools"][0]['consistent_group_snapshot_enabled'])
self.assertEqual(self.configuration.max_over_subscription_ratio,
stats["pools"][0]['max_over_subscription_ratio'])
self.assertEqual(
GET_POOL_RESULT['totalPoolCapacity'] // units.Ki,
stats["pools"][0]['total_capacity_gb'])
self.assertEqual(
GET_POOL_RESULT['availableVolumeCapacity'] // units.Ki,
stats["pools"][0]['free_capacity_gb'])
self.assertEqual(
GET_POOL_RESULT['totalLocatedCapacity'] // units.Ki,
stats["pools"][0]['provisioned_capacity_gb'])
self.assertEqual('up', stats["pools"][0]['backend_state'])
self.assertEqual(1, request.call_count)
self.assertEqual(1, get_filter_function.call_count)
self.assertEqual(1, get_goodness_function.call_count)
@mock.patch.object(driver.FibreChannelDriver, "get_goodness_function")
@mock.patch.object(driver.FibreChannelDriver, "get_filter_function")
@mock.patch.object(hbsd_rest.HBSDREST, "get_pool_info")
def test_get_volume_stats_error(
self, get_pool_info, get_filter_function, get_goodness_function):
get_pool_info.side_effect = exception.VolumeDriverException(data='')
get_filter_function.return_value = None
get_goodness_function.return_value = None
stats = self.driver.get_volume_stats(True)
self.assertEqual('Hitachi', stats['vendor_name'])
self.assertEqual(self.configuration.volume_backend_name,
stats["pools"][0]['pool_name'])
self.assertEqual(self.configuration.reserved_percentage,
stats["pools"][0]['reserved_percentage'])
self.assertTrue(stats["pools"][0]['thin_provisioning_support'])
self.assertFalse(stats["pools"][0]['thick_provisioning_support'])
self.assertTrue(stats["pools"][0]['multiattach'])
self.assertTrue(stats["pools"][0]['consistencygroup_support'])
self.assertTrue(stats["pools"][0]['consistent_group_snapshot_enabled'])
self.assertEqual(self.configuration.max_over_subscription_ratio,
stats["pools"][0]['max_over_subscription_ratio'])
self.assertEqual(0, stats["pools"][0]['total_capacity_gb'])
self.assertEqual(0, stats["pools"][0]['free_capacity_gb'])
self.assertEqual(0, stats["pools"][0]['provisioned_capacity_gb'])
self.assertEqual('down', stats["pools"][0]['backend_state'])
self.assertEqual(1, get_filter_function.call_count)
self.assertEqual(1, get_goodness_function.call_count)
@mock.patch.object(requests.Session, "request")
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
@mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)

View File

@ -462,14 +462,21 @@ class HBSDCommon():
pool_name=pool_name,
reserved_percentage=self.conf.safe_get('reserved_percentage'),
QoS_support=False,
thin_provisioning_support=True,
thick_provisioning_support=False,
multiattach=True,
consistencygroup_support=True,
consistent_group_snapshot_enabled=True,
max_over_subscription_ratio=(
volume_utils.get_max_over_subscription_ratio(
self.conf.safe_get('max_over_subscription_ratio'),
True)),
location_info=location_info
))
if cap_data is None:
single_pool.update(dict(
total_capacity_gb=0,
free_capacity_gb=0,
provisioned_capacity_gb=0,
backend_state='down'))
self.output_log(MSG.POOL_INFO_RETRIEVAL_FAILED, pool=pool_name)
@ -478,12 +485,7 @@ class HBSDCommon():
single_pool.update(dict(
total_capacity_gb=total_capacity,
free_capacity_gb=free_capacity,
provisioned_capacity_gb=provisioned_capacity,
max_over_subscription_ratio=(
volume_utils.get_max_over_subscription_ratio(
self.conf.safe_get('max_over_subscription_ratio'),
True)),
thin_provisioning_support=True
provisioned_capacity_gb=provisioned_capacity
))
single_pool.update(dict(backend_state='up'))
return single_pool

View File

@ -79,6 +79,7 @@ class HBSDFCDriver(driver.FibreChannelDriver):
2.3.2 - Add specifies format of the names HostGroups/iSCSI Targets.
2.3.3 - Add GAD volume support.
2.3.4 - Support data deduplication and compression.
2.3.5 - Fix key error when backend is down.
"""

View File

@ -79,6 +79,7 @@ class HBSDISCSIDriver(driver.ISCSIDriver):
2.3.2 - Add specifies format of the names HostGroups/iSCSI Targets.
2.3.3 - Add GAD volume support.
2.3.4 - Support data deduplication and compression.
2.3.5 - Fix key error when backend is down.
"""

View File

@ -25,7 +25,7 @@ from oslo_utils import units
from cinder import exception
from cinder import utils as cinder_utils
VERSION = '2.3.4'
VERSION = '2.3.5'
CI_WIKI_NAME = 'Hitachi_VSP_CI'
PARAM_PREFIX = 'hitachi'
VENDOR_NAME = 'Hitachi'

View File

@ -0,0 +1,6 @@
---
fixes:
- |
Hitachi, NEC V, HPE XP drivers `bug #2004140
<https://bugs.launchpad.net/cinder/+bug/2004140>`_: Fixed
``KeyError`` when a backend is down.