diff --git a/cinder/scheduler/host_manager.py b/cinder/scheduler/host_manager.py index 5c72ba8db35..fc219f4167b 100644 --- a/cinder/scheduler/host_manager.py +++ b/cinder/scheduler/host_manager.py @@ -335,6 +335,7 @@ class PoolState(BackendState): def update_from_volume_capability(self, capability, service=None): """Update information about a pool from its volume_node info.""" + LOG.debug("Updating capabilities for %s: %s", self.host, capability) self.update_capabilities(capability, service) if capability: if self.updated and self.updated > capability['timestamp']: @@ -355,13 +356,15 @@ class PoolState(BackendState): # provisioned_capacity_gb if it is not set. self.provisioned_capacity_gb = capability.get( 'provisioned_capacity_gb', self.allocated_capacity_gb) - self.max_over_subscription_ratio = capability.get( - 'max_over_subscription_ratio', - CONF.max_over_subscription_ratio) self.thin_provisioning_support = capability.get( 'thin_provisioning_support', False) self.thick_provisioning_support = capability.get( 'thick_provisioning_support', False) + + self.max_over_subscription_ratio = ( + utils.calculate_max_over_subscription_ratio( + capability, CONF.max_over_subscription_ratio)) + self.multiattach = capability.get('multiattach', False) def update_pools(self, capability): @@ -756,7 +759,8 @@ class HostManager(object): allocated = pool["allocated_capacity_gb"] provisioned = pool["provisioned_capacity_gb"] reserved = pool["reserved_percentage"] - ratio = pool["max_over_subscription_ratio"] + ratio = utils.calculate_max_over_subscription_ratio( + pool, CONF.max_over_subscription_ratio) support = pool["thin_provisioning_support"] virtual_free = utils.calculate_virtual_free_capacity( diff --git a/cinder/tests/unit/scheduler/fakes.py b/cinder/tests/unit/scheduler/fakes.py index ebb61194896..a90dd19f4c4 100644 --- a/cinder/tests/unit/scheduler/fakes.py +++ b/cinder/tests/unit/scheduler/fakes.py @@ -32,7 +32,7 @@ SERVICE_STATES = { 'free_capacity_gb': 1024, 'allocated_capacity_gb': 0, 'provisioned_capacity_gb': 0, - 'max_over_subscription_ratio': 1.0, + 'max_over_subscription_ratio': '1.0', 'thin_provisioning_support': False, 'thick_provisioning_support': True, 'reserved_percentage': 10, @@ -44,7 +44,7 @@ SERVICE_STATES = { 'free_capacity_gb': 300, 'allocated_capacity_gb': 1748, 'provisioned_capacity_gb': 1748, - 'max_over_subscription_ratio': 1.5, + 'max_over_subscription_ratio': '1.5', 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 10, @@ -55,7 +55,7 @@ SERVICE_STATES = { 'free_capacity_gb': 256, 'allocated_capacity_gb': 256, 'provisioned_capacity_gb': 256, - 'max_over_subscription_ratio': 2.0, + 'max_over_subscription_ratio': '2.0', 'thin_provisioning_support': False, 'thick_provisioning_support': True, 'reserved_percentage': 0, @@ -66,7 +66,7 @@ SERVICE_STATES = { 'free_capacity_gb': 200, 'allocated_capacity_gb': 1848, 'provisioned_capacity_gb': 2047, - 'max_over_subscription_ratio': 1.0, + 'max_over_subscription_ratio': '1.0', 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 5, @@ -78,7 +78,7 @@ SERVICE_STATES = { 'free_capacity_gb': 'unknown', 'allocated_capacity_gb': 1548, 'provisioned_capacity_gb': 1548, - 'max_over_subscription_ratio': 1.0, + 'max_over_subscription_ratio': '1.0', 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 5, @@ -98,7 +98,7 @@ SERVICE_STATES_WITH_POOLS = { 'free_capacity_gb': 1024, 'allocated_capacity_gb': 0, 'provisioned_capacity_gb': 0, - 'max_over_subscription_ratio': 1.0, + 'max_over_subscription_ratio': '1.0', 'thin_provisioning_support': False, 'thick_provisioning_support': True, 'reserved_percentage': 15, @@ -109,7 +109,7 @@ SERVICE_STATES_WITH_POOLS = { 'free_capacity_gb': 1008, 'allocated_capacity_gb': 0, 'provisioned_capacity_gb': 0, - 'max_over_subscription_ratio': 1.0, + 'max_over_subscription_ratio': '1.0', 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 15, @@ -131,7 +131,7 @@ SERVICE_STATES_WITH_POOLS = { 'free_capacity_gb': 300, 'allocated_capacity_gb': 1748, 'provisioned_capacity_gb': 1748, - 'max_over_subscription_ratio': 1.5, + 'max_over_subscription_ratio': '1.5', 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 10, @@ -142,7 +142,7 @@ SERVICE_STATES_WITH_POOLS = { 'free_capacity_gb': 256, 'allocated_capacity_gb': 256, 'provisioned_capacity_gb': 256, - 'max_over_subscription_ratio': 2.0, + 'max_over_subscription_ratio': '2.0', 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 10, @@ -160,7 +160,7 @@ SERVICE_STATES_WITH_POOLS = { 'free_capacity_gb': 256, 'allocated_capacity_gb': 256, 'provisioned_capacity_gb': 256, - 'max_over_subscription_ratio': 2.0, + 'max_over_subscription_ratio': '2.0', 'thin_provisioning_support': False, 'thick_provisioning_support': True, 'reserved_percentage': 0, @@ -180,7 +180,7 @@ SERVICE_STATES_WITH_POOLS = { 'free_capacity_gb': 'unknown', 'allocated_capacity_gb': 170, 'provisioned_capacity_gb': 170, - 'max_over_subscription_ratio': 1.0, + 'max_over_subscription_ratio': '1.0', 'thin_provisioning_support': False, 'thick_provisioning_support': True, 'QoS_support': True, @@ -192,7 +192,7 @@ SERVICE_STATES_WITH_POOLS = { 'free_capacity_gb': 'unknown', 'allocated_capacity_gb': 1548, 'provisioned_capacity_gb': 1548, - 'max_over_subscription_ratio': 1.0, + 'max_over_subscription_ratio': '1.0', 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'QoS_support': True, diff --git a/cinder/tests/unit/scheduler/test_capacity_weigher.py b/cinder/tests/unit/scheduler/test_capacity_weigher.py index b7474a2f4db..ac0c7c101fa 100644 --- a/cinder/tests/unit/scheduler/test_capacity_weigher.py +++ b/cinder/tests/unit/scheduler/test_capacity_weigher.py @@ -155,7 +155,8 @@ class CapacityWeigherTestCase(test.TestCase): } weighed_host = self._get_weighed_hosts( backend_info_list, - weight_properties=weight_properties)[0] + weight_properties=weight_properties) + weighed_host = weighed_host[0] self.assertEqual(0.0, weighed_host.weight) self.assertEqual(winner, utils.extract_host(weighed_host.obj.host)) diff --git a/cinder/tests/unit/scheduler/test_host_manager.py b/cinder/tests/unit/scheduler/test_host_manager.py index 3a5cf24d285..67c3474deb5 100644 --- a/cinder/tests/unit/scheduler/test_host_manager.py +++ b/cinder/tests/unit/scheduler/test_host_manager.py @@ -158,7 +158,7 @@ class HostManagerTestCase(test.TestCase): capab1 = {'pools': [{ 'pool_name': 'pool1', 'thick_provisioning_support': True, 'thin_provisioning_support': False, 'total_capacity_gb': 10, - 'free_capacity_gb': 10, 'max_over_subscription_ratio': 1, + 'free_capacity_gb': 10, 'max_over_subscription_ratio': '1', 'provisioned_capacity_gb': 0, 'allocated_capacity_gb': 0, 'reserved_percentage': 0}]} @@ -210,7 +210,7 @@ class HostManagerTestCase(test.TestCase): capab1 = {'pools': [{ 'pool_name': 'pool1', 'thick_provisioning_support': True, 'thin_provisioning_support': False, 'total_capacity_gb': 10, - 'free_capacity_gb': 10, 'max_over_subscription_ratio': 1, + 'free_capacity_gb': 10, 'max_over_subscription_ratio': '1', 'provisioned_capacity_gb': 0, 'allocated_capacity_gb': 0, 'reserved_percentage': 0}]} @@ -279,7 +279,7 @@ class HostManagerTestCase(test.TestCase): capab1 = {'pools': [{ 'pool_name': 'pool1', 'thick_provisioning_support': True, 'thin_provisioning_support': False, 'total_capacity_gb': 10, - 'free_capacity_gb': 10, 'max_over_subscription_ratio': 1, + 'free_capacity_gb': 10, 'max_over_subscription_ratio': '1', 'provisioned_capacity_gb': 0, 'allocated_capacity_gb': 0, 'reserved_percentage': 0}]} @@ -348,7 +348,7 @@ class HostManagerTestCase(test.TestCase): capab1 = {'pools': [{ 'pool_name': 'pool1', 'thick_provisioning_support': True, 'thin_provisioning_support': False, 'total_capacity_gb': 10, - 'free_capacity_gb': 10, 'max_over_subscription_ratio': 1, + 'free_capacity_gb': 10, 'max_over_subscription_ratio': '1', 'provisioned_capacity_gb': 0, 'allocated_capacity_gb': 0, 'reserved_percentage': 0}]} @@ -364,7 +364,7 @@ class HostManagerTestCase(test.TestCase): capab2 = {'pools': [{ 'pool_name': 'pool1', 'thick_provisioning_support': True, 'thin_provisioning_support': False, 'total_capacity_gb': 10, - 'free_capacity_gb': 9, 'max_over_subscription_ratio': 1, + 'free_capacity_gb': 9, 'max_over_subscription_ratio': '1', 'provisioned_capacity_gb': 1, 'allocated_capacity_gb': 1, 'reserved_percentage': 0}]} @@ -423,14 +423,14 @@ class HostManagerTestCase(test.TestCase): capab1 = {'pools': [{ 'pool_name': 'pool1', 'thick_provisioning_support': True, 'thin_provisioning_support': False, 'total_capacity_gb': 10, - 'free_capacity_gb': 10, 'max_over_subscription_ratio': 1, + 'free_capacity_gb': 10, 'max_over_subscription_ratio': '1', 'provisioned_capacity_gb': 0, 'allocated_capacity_gb': 0, 'reserved_percentage': 0}]} capab2 = {'pools': [{ 'pool_name': 'pool1', 'thick_provisioning_support': True, 'thin_provisioning_support': False, 'total_capacity_gb': 10, - 'free_capacity_gb': 9, 'max_over_subscription_ratio': 1, + 'free_capacity_gb': 9, 'max_over_subscription_ratio': '1', 'provisioned_capacity_gb': 1, 'allocated_capacity_gb': 1, 'reserved_percentage': 0}]} @@ -862,7 +862,7 @@ class HostManagerTestCase(test.TestCase): 'free_capacity_gb': 28.01, 'allocated_capacity_gb': 2.0, 'provisioned_capacity_gb': 2.0, - 'max_over_subscription_ratio': 1.0, + 'max_over_subscription_ratio': '1.0', 'thin_provisioning_support': False, 'thick_provisioning_support': True, 'reserved_percentage': 5}, @@ -871,7 +871,7 @@ class HostManagerTestCase(test.TestCase): 'free_capacity_gb': 18.01, 'allocated_capacity_gb': 2.0, 'provisioned_capacity_gb': 2.0, - 'max_over_subscription_ratio': 2.0, + 'max_over_subscription_ratio': '2.0', 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 5}]} @@ -881,7 +881,7 @@ class HostManagerTestCase(test.TestCase): 'free_capacity_gb': 28.01, 'allocated_capacity_gb': 2.0, 'provisioned_capacity_gb': 2.0, - 'max_over_subscription_ratio': 1.0, + 'max_over_subscription_ratio': '1.0', 'thin_provisioning_support': False, 'thick_provisioning_support': True, 'reserved_percentage': 5}, @@ -890,7 +890,7 @@ class HostManagerTestCase(test.TestCase): 'free_capacity_gb': 18.01, 'allocated_capacity_gb': 2.0, 'provisioned_capacity_gb': 2.0, - 'max_over_subscription_ratio': 2.0, + 'max_over_subscription_ratio': '2.0', 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 5}] @@ -901,7 +901,7 @@ class HostManagerTestCase(test.TestCase): 'free_capacity_gb': 28.01, 'allocated_capacity_gb': 2.0, 'provisioned_capacity_gb': 2.0, - 'max_over_subscription_ratio': 2.0, + 'max_over_subscription_ratio': '2.0', 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 0}, @@ -910,7 +910,7 @@ class HostManagerTestCase(test.TestCase): 'free_capacity_gb': 18.01, 'allocated_capacity_gb': 2.0, 'provisioned_capacity_gb': 2.0, - 'max_over_subscription_ratio': 2.0, + 'max_over_subscription_ratio': '2.0', 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 5}]} @@ -920,7 +920,7 @@ class HostManagerTestCase(test.TestCase): 'free_capacity_gb': 28.01, 'allocated_capacity_gb': 2.0, 'provisioned_capacity_gb': 2.0, - 'max_over_subscription_ratio': 2.0, + 'max_over_subscription_ratio': '2.0', 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 0}] diff --git a/cinder/tests/unit/test_utils.py b/cinder/tests/unit/test_utils.py index f7ab2dd35d1..0591e8d8b8d 100644 --- a/cinder/tests/unit/test_utils.py +++ b/cinder/tests/unit/test_utils.py @@ -34,6 +34,19 @@ from cinder import test from cinder.tests.unit import fake_constants as fake from cinder import utils +POOL_CAPS = {'total_capacity_gb': 0, + 'free_capacity_gb': 0, + 'allocated_capacity_gb': 0, + 'provisioned_capacity_gb': 0, + 'max_over_subscription_ratio': '1.0', + 'thin_provisioning_support': False, + 'thick_provisioning_support': True, + 'reserved_percentage': 0, + 'volume_backend_name': 'lvm1', + 'timestamp': timeutils.utcnow(), + 'multiattach': True, + 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'} + class ExecuteTestCase(test.TestCase): @mock.patch('cinder.utils.processutils.execute') @@ -1532,3 +1545,82 @@ class TestCheckMetadataProperties(test.TestCase): self.assertRaises(exception.InvalidInput, utils.check_metadata_properties, meta) + + +POOL_CAP1 = {'allocated_capacity_gb': 10, 'provisioned_capacity_gb': 10, + 'thin_provisioning_support': False, 'total_capacity_gb': 10, + 'free_capacity_gb': 10, 'max_over_subscription_ratio': 1.0} +POOL_CAP2 = {'allocated_capacity_gb': 10, 'provisioned_capacity_gb': 10, + 'thin_provisioning_support': True, 'total_capacity_gb': 100, + 'free_capacity_gb': 95, 'max_over_subscription_ratio': None} +POOL_CAP3 = {'allocated_capacity_gb': 0, 'provisioned_capacity_gb': 0, + 'thin_provisioning_support': True, 'total_capacity_gb': 100, + 'free_capacity_gb': 100, 'max_over_subscription_ratio': 'auto'} +POOL_CAP4 = {'allocated_capacity_gb': 100, + 'thin_provisioning_support': True, 'total_capacity_gb': 2500, + 'free_capacity_gb': 500, 'max_over_subscription_ratio': 'auto'} +POOL_CAP5 = {'allocated_capacity_gb': 10000, + 'thin_provisioning_support': True, 'total_capacity_gb': 2500, + 'free_capacity_gb': 0.1, 'max_over_subscription_ratio': 'auto'} +POOL_CAP6 = {'allocated_capacity_gb': 1000, 'provisioned_capacity_gb': 1010, + 'thin_provisioning_support': True, 'total_capacity_gb': 2500, + 'free_capacity_gb': 2500, 'max_over_subscription_ratio': 'auto'} +POOL_CAP7 = {'allocated_capacity_gb': 10, 'provisioned_capacity_gb': 10, + 'thin_provisioning_support': True, 'total_capacity_gb': 10, + 'free_capacity_gb': 10} +POOL_CAP8 = {'allocated_capacity_gb': 10, 'provisioned_capacity_gb': 10, + 'thin_provisioning_support': True, 'total_capacity_gb': 10, + 'free_capacity_gb': 10, 'max_over_subscription_ratio': '15.5'} +POOL_CAP9 = {'allocated_capacity_gb': 10, 'provisioned_capacity_gb': 10, + 'thin_provisioning_support': True, 'total_capacity_gb': 10, + 'free_capacity_gb': 'unknown', + 'max_over_subscription_ratio': '15.5'} +POOL_CAP10 = {'allocated_capacity_gb': 10, 'provisioned_capacity_gb': 10, + 'thin_provisioning_support': True, + 'total_capacity_gb': 'infinite', 'free_capacity_gb': 10, + 'max_over_subscription_ratio': '15.5'} + + +@ddt.ddt +class TestAutoMaxOversubscriptionRatio(test.TestCase): + @ddt.data({'data': POOL_CAP1, + 'global_max_over_subscription_ratio': 'auto', + 'expected_result': 1.0}, + {'data': POOL_CAP2, + 'global_max_over_subscription_ratio': 'auto', + 'expected_result': 2.67}, + {'data': POOL_CAP3, + 'global_max_over_subscription_ratio': '20.0', + 'expected_result': 20}, + {'data': POOL_CAP4, + 'global_max_over_subscription_ratio': '20.0', + 'expected_result': 1.05}, + {'data': POOL_CAP5, + 'global_max_over_subscription_ratio': '10.0', + 'expected_result': 5.0}, + {'data': POOL_CAP6, + 'global_max_over_subscription_ratio': '20.0', + 'expected_result': 1011.0}, + {'data': POOL_CAP7, + 'global_max_over_subscription_ratio': 'auto', + 'expected_result': 11.0}, + {'data': POOL_CAP8, + 'global_max_over_subscription_ratio': '20.0', + 'expected_result': 15.5}, + {'data': POOL_CAP9, + 'global_max_over_subscription_ratio': '20.0', + 'expected_result': 1.0}, + {'data': POOL_CAP10, + 'global_max_over_subscription_ratio': '20.0', + 'expected_result': 1.0}, + ) + @ddt.unpack + def test_calculate_max_over_subscription_ratio( + self, data, expected_result, global_max_over_subscription_ratio): + + result = utils.calculate_max_over_subscription_ratio( + data, global_max_over_subscription_ratio) + # Just for sake of testing we reduce the float precision + if result is not None: + result = round(result, 2) + self.assertEqual(expected_result, result) diff --git a/cinder/tests/unit/test_volume_utils.py b/cinder/tests/unit/test_volume_utils.py index 045046b7f10..632cc32df22 100644 --- a/cinder/tests/unit/test_volume_utils.py +++ b/cinder/tests/unit/test_volume_utils.py @@ -1055,3 +1055,28 @@ class VolumeUtilsTestCase(test.TestCase): group = fake_group.fake_group_obj( None, group_type_id=fake.GROUP_TYPE_ID) self.assertTrue(volume_utils.is_group_a_cg_snapshot_type(group)) + + @ddt.data({'max_over_subscription_ratio': '10', 'supports_auto': True}, + {'max_over_subscription_ratio': 'auto', 'supports_auto': True}, + {'max_over_subscription_ratio': 'auto', 'supports_auto': False}, + {'max_over_subscription_ratio': '1.2', 'supports_auto': False},) + @ddt.unpack + def test_get_max_over_subscription_ratio(self, + max_over_subscription_ratio, + supports_auto): + + if not supports_auto and max_over_subscription_ratio == 'auto': + self.assertRaises(exception.VolumeDriverException, + volume_utils.get_max_over_subscription_ratio, + max_over_subscription_ratio, supports_auto) + elif not supports_auto: + mosr = volume_utils.get_max_over_subscription_ratio( + max_over_subscription_ratio, supports_auto) + self.assertEqual(float(max_over_subscription_ratio), mosr) + else: # supports_auto + mosr = volume_utils.get_max_over_subscription_ratio( + max_over_subscription_ratio, supports_auto) + if max_over_subscription_ratio == 'auto': + self.assertEqual(max_over_subscription_ratio, mosr) + else: + self.assertEqual(float(max_over_subscription_ratio), mosr) diff --git a/cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py b/cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py index 966a19a6d5f..f62f8b6cfe0 100644 --- a/cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py +++ b/cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py @@ -1007,7 +1007,7 @@ class FakeXML(object): class VMAXUtilsTest(test.TestCase): def setUp(self): self.data = VMAXCommonData() - + volume_utils.get_max_over_subscription_ratio = mock.Mock() super(VMAXUtilsTest, self).setUp() config_group = 'UtilsTests' fake_xml = FakeXML().create_fake_config_file( @@ -1487,6 +1487,7 @@ class VMAXRestTest(test.TestCase): self.data = VMAXCommonData() super(VMAXRestTest, self).setUp() + volume_utils.get_max_over_subscription_ratio = mock.Mock() config_group = 'RestTests' fake_xml = FakeXML().create_fake_config_file( config_group, self.data.port_group_name_f) @@ -2865,6 +2866,7 @@ class VMAXProvisionTest(test.TestCase): self.data = VMAXCommonData() super(VMAXProvisionTest, self).setUp() + volume_utils.get_max_over_subscription_ratio = mock.Mock() config_group = 'ProvisionTests' self.fake_xml = FakeXML().create_fake_config_file( config_group, self.data.port_group_name_i) @@ -3367,6 +3369,8 @@ class VMAXCommonTest(test.TestCase): self.data = VMAXCommonData() super(VMAXCommonTest, self).setUp() + self.mock_object(volume_utils, 'get_max_over_subscription_ratio', + return_value=1.0) config_group = 'CommonTests' self.fake_xml = FakeXML().create_fake_config_file( config_group, self.data.port_group_name_f) @@ -5075,6 +5079,7 @@ class VMAXFCTest(test.TestCase): super(VMAXFCTest, self).setUp() config_group = 'FCTests' + volume_utils.get_max_over_subscription_ratio = mock.Mock() self.fake_xml = FakeXML().create_fake_config_file( config_group, self.data.port_group_name_f) self.configuration = FakeConfiguration(self.fake_xml, config_group) @@ -5334,6 +5339,7 @@ class VMAXISCSITest(test.TestCase): config_group = 'ISCSITests' self.fake_xml = FakeXML().create_fake_config_file( config_group, self.data.port_group_name_i) + volume_utils.get_max_over_subscription_ratio = mock.Mock() configuration = FakeConfiguration(self.fake_xml, config_group) rest.VMAXRest._establish_rest_session = mock.Mock( return_value=FakeRequestsSession()) @@ -5639,6 +5645,7 @@ class VMAXMaskingTest(test.TestCase): super(VMAXMaskingTest, self).setUp() + volume_utils.get_max_over_subscription_ratio = mock.Mock() configuration = mock.Mock() configuration.safe_get.return_value = 'MaskingTests' configuration.config_group = 'MaskingTests' @@ -6568,6 +6575,7 @@ class VMAXCommonReplicationTest(test.TestCase): 'remote_pool': self.data.srp2, 'rdf_group_label': self.data.rdf_group_name, 'allow_extend': 'True'} + volume_utils.get_max_over_subscription_ratio = mock.Mock() configuration = FakeConfiguration( self.fake_xml, config_group, replication_device=self.replication_device) diff --git a/cinder/tests/unit/volume/drivers/test_nfs.py b/cinder/tests/unit/volume/drivers/test_nfs.py index c4f0247791b..da76418bc61 100644 --- a/cinder/tests/unit/volume/drivers/test_nfs.py +++ b/cinder/tests/unit/volume/drivers/test_nfs.py @@ -33,6 +33,7 @@ from cinder.tests.unit import fake_volume from cinder.volume import configuration as conf from cinder.volume.drivers import nfs from cinder.volume.drivers import remotefs +from cinder.volume import utils as vutils class RemoteFsDriverTestCase(test.TestCase): @@ -414,6 +415,9 @@ class NfsDriverTestCase(test.TestCase): self.configuration.nas_mount_options = None self.configuration.volume_dd_blocksize = '1M' + self.mock_object(vutils, 'get_max_over_subscription_ratio', + return_value=1) + self.context = context.get_admin_context() def _set_driver(self, extra_confs=None): diff --git a/cinder/tests/unit/volume/drivers/test_pure.py b/cinder/tests/unit/volume/drivers/test_pure.py index 56d58f76d94..b1bb42ffa75 100644 --- a/cinder/tests/unit/volume/drivers/test_pure.py +++ b/cinder/tests/unit/volume/drivers/test_pure.py @@ -27,6 +27,7 @@ from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_group from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume +from cinder.volume import utils as volume_utis def fake_retry(exceptions, interval=1, retries=3, backoff_rate=2): @@ -2688,6 +2689,8 @@ class PureVolumeUpdateStatsTestCase(PureBaseSharedDriverTestCase): config_ratio, expected_ratio, auto): + volume_utis.get_max_over_subscription_ratio = mock.Mock( + return_value=expected_ratio) self.mock_config.pure_automatic_max_oversubscription_ratio = auto self.mock_config.max_over_subscription_ratio = config_ratio actual_ratio = self.driver._get_thin_provisioning(provisioned, used) diff --git a/cinder/tests/unit/volume/drivers/test_zfssa.py b/cinder/tests/unit/volume/drivers/test_zfssa.py index 915a63dfb75..af5b41544f8 100644 --- a/cinder/tests/unit/volume/drivers/test_zfssa.py +++ b/cinder/tests/unit/volume/drivers/test_zfssa.py @@ -37,6 +37,7 @@ from cinder.volume.drivers.zfssa import webdavclient from cinder.volume.drivers.zfssa import zfssaiscsi as iscsi from cinder.volume.drivers.zfssa import zfssanfs from cinder.volume.drivers.zfssa import zfssarest as rest +from cinder.volume import utils as volume_utils nfs_logbias = 'latency' @@ -137,6 +138,8 @@ class TestZFSSAISCSIDriver(test.TestCase): def setUp(self, _factory_zfssa): super(TestZFSSAISCSIDriver, self).setUp() self._create_fake_config() + self.mock_object(volume_utils, 'get_max_over_subscription_ratio', + return_value=1.0) _factory_zfssa.return_value = mock.MagicMock(spec=rest.ZFSSAApi) iscsi.ZFSSAISCSIDriver._execute = fake_utils.fake_execute self.drv = iscsi.ZFSSAISCSIDriver(configuration=self.configuration) @@ -1038,6 +1041,8 @@ class TestZFSSANFSDriver(test.TestCase): super(TestZFSSANFSDriver, self).setUp() self._create_fake_config() _factory_zfssa.return_value = mock.MagicMock(spec=rest.ZFSSANfsApi) + self.mock_object(volume_utils, 'get_max_over_subscription_ratio', + return_value=1.0) self.drv = zfssanfs.ZFSSANFSDriver(configuration=self.configuration) self.drv._execute = fake_utils.fake_execute self.drv.do_setup({}) diff --git a/cinder/tests/unit/volume/test_driver.py b/cinder/tests/unit/volume/test_driver.py index 643186e6d82..d411de31302 100644 --- a/cinder/tests/unit/volume/test_driver.py +++ b/cinder/tests/unit/volume/test_driver.py @@ -562,6 +562,34 @@ class GenericVolumeDriverTestCase(BaseDriverTestCase): self.assertTrue(terminate_mock.called) self.assertEqual(3, exc.context.call_count) + @ddt.data({'cfg_value': '10', 'valid': True}, + {'cfg_value': 'auto', 'valid': True}, + {'cfg_value': '1', 'valid': True}, + {'cfg_value': '1.2', 'valid': True}, + {'cfg_value': '100', 'valid': True}, + {'cfg_value': '20.15', 'valid': True}, + {'cfg_value': 'True', 'valid': False}, + {'cfg_value': 'False', 'valid': False}, + {'cfg_value': '10.0.0', 'valid': False}, + {'cfg_value': '0.00', 'valid': True}, + {'cfg_value': 'anything', 'valid': False},) + @ddt.unpack + def test_auto_max_subscription_ratio_options(self, cfg_value, valid): + # This tests the max_over_subscription_ratio option as it is now + # checked by a regex + def _set_conf(config, value): + config.set_override('max_over_subscription_ratio', value) + + config = conf.Configuration(None) + config.append_config_values(driver.volume_opts) + + if valid: + _set_conf(config, cfg_value) + self.assertEqual(cfg_value, config.safe_get( + 'max_over_subscription_ratio')) + else: + self.assertRaises(ValueError, _set_conf, config, cfg_value) + class FibreChannelTestCase(BaseDriverTestCase): """Test Case for FibreChannelDriver.""" diff --git a/cinder/utils.py b/cinder/utils.py index 4e941144180..3f77fd23ad8 100644 --- a/cinder/utils.py +++ b/cinder/utils.py @@ -64,6 +64,9 @@ PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f" VALID_TRACE_FLAGS = {'method', 'api'} TRACE_METHOD = False TRACE_API = False +INITIAL_AUTO_MOSR = 20 +INFINITE_UNKNOWN_VALUES = ('infinite', 'unknown') + synchronized = lockutils.synchronized_with_prefix('cinder-') @@ -1044,6 +1047,65 @@ def calculate_virtual_free_capacity(total_capacity, return free +def calculate_max_over_subscription_ratio(capability, + global_max_over_subscription_ratio): + # provisioned_capacity_gb is the apparent total capacity of + # all the volumes created on a backend, which is greater than + # or equal to allocated_capacity_gb, which is the apparent + # total capacity of all the volumes created on a backend + # in Cinder. Using allocated_capacity_gb as the default of + # provisioned_capacity_gb if it is not set. + allocated_capacity_gb = capability.get('allocated_capacity_gb', 0) + provisioned_capacity_gb = capability.get('provisioned_capacity_gb', + allocated_capacity_gb) + thin_provisioning_support = capability.get('thin_provisioning_support', + False) + total_capacity_gb = capability.get('total_capacity_gb', 0) + free_capacity_gb = capability.get('free_capacity_gb', 0) + pool_name = capability.get('pool_name', + capability.get('volume_backend_name')) + + # If thin provisioning is not supported the capacity filter will not use + # the value we return, no matter what it is. + if not thin_provisioning_support: + LOG.debug("Trying to retrieve max_over_subscription_ratio from a " + "service that does not support thin provisioning") + return 1.0 + + # Again, if total or free capacity is infinite or unknown, the capacity + # filter will not use the max_over_subscription_ratio at all. So, does + # not matter what we return here. + if ((total_capacity_gb in INFINITE_UNKNOWN_VALUES) or + (free_capacity_gb in INFINITE_UNKNOWN_VALUES)): + return 1.0 + + max_over_subscription_ratio = (capability.get( + 'max_over_subscription_ratio') or global_max_over_subscription_ratio) + + # We only calculate the automatic max_over_subscription_ratio (mosr) + # when the global or driver conf is set auto and while + # provisioned_capacity_gb is not 0. When auto is set and + # provisioned_capacity_gb is 0, we use the default value 20.0. + if max_over_subscription_ratio == 'auto': + if provisioned_capacity_gb != 0: + used_capacity = total_capacity_gb - free_capacity_gb + LOG.debug("Calculating max_over_subscription_ratio for " + "pool %s: provisioned_capacity_gb=%s, " + "used_capacity=%s", + pool_name, provisioned_capacity_gb, used_capacity) + max_over_subscription_ratio = 1 + ( + float(provisioned_capacity_gb) / (used_capacity + 1)) + else: + max_over_subscription_ratio = INITIAL_AUTO_MOSR + + LOG.info("Auto max_over_subscription_ratio for pool %s is " + "%s", pool_name, max_over_subscription_ratio) + else: + max_over_subscription_ratio = float(max_over_subscription_ratio) + + return max_over_subscription_ratio + + def validate_integer(value, name, min_value=None, max_value=None): """Make sure that value is a valid integer, potentially within range. diff --git a/cinder/volume/driver.py b/cinder/volume/driver.py index 3ef1904d67c..65ebffc96e6 100644 --- a/cinder/volume/driver.py +++ b/cinder/volume/driver.py @@ -162,17 +162,20 @@ volume_opts = [ default=False, help='Tell driver to use SSL for connection to backend ' 'storage if the driver supports it.'), - cfg.FloatOpt('max_over_subscription_ratio', - default=20.0, - min=1, - help='Float representation of the over subscription ratio ' - 'when thin provisioning is involved. Default ratio is ' - '20.0, meaning provisioned capacity can be 20 times of ' - 'the total physical capacity. If the ratio is 10.5, it ' - 'means provisioned capacity can be 10.5 times of the ' - 'total physical capacity. A ratio of 1.0 means ' - 'provisioned capacity cannot exceed the total physical ' - 'capacity. The ratio has to be a minimum of 1.0.'), + cfg.StrOpt('max_over_subscription_ratio', + default='20.0', + regex='^(auto|\d*\.\d+|\d+)$', + help='Representation of the over subscription ratio ' + 'when thin provisioning is enabled. Default ratio is ' + '20.0, meaning provisioned capacity can be 20 times of ' + 'the total physical capacity. If the ratio is 10.5, it ' + 'means provisioned capacity can be 10.5 times of the ' + 'total physical capacity. A ratio of 1.0 means ' + 'provisioned capacity cannot exceed the total physical ' + 'capacity. If ratio is \'auto\', Cinder will ' + 'automatically calculate the ratio based on the ' + 'provisioned capacity and the used space. If not set to ' + 'auto, the ratio has to be a minimum of 1.0.'), cfg.StrOpt('scst_target_iqn_name', help='Certain ISCSI targets have predefined target names, ' 'SCST target driver uses this name.'), diff --git a/cinder/volume/drivers/dell_emc/vmax/common.py b/cinder/volume/drivers/dell_emc/vmax/common.py index 4525db880eb..23e9136cc26 100644 --- a/cinder/volume/drivers/dell_emc/vmax/common.py +++ b/cinder/volume/drivers/dell_emc/vmax/common.py @@ -149,8 +149,9 @@ class VMAXCommon(object): self.retries = self.configuration.safe_get('retries') self.pool_info['backend_name'] = ( self.configuration.safe_get('volume_backend_name')) - self.pool_info['max_over_subscription_ratio'] = ( - self.configuration.safe_get('max_over_subscription_ratio')) + mosr = volume_utils.get_max_over_subscription_ratio( + self.configuration.safe_get('max_over_subscription_ratio'), True) + self.pool_info['max_over_subscription_ratio'] = mosr self.pool_info['reserved_percentage'] = ( self.configuration.safe_get('reserved_percentage')) LOG.debug( @@ -888,11 +889,6 @@ class VMAXCommon(object): else: pool['reserved_percentage'] = array_reserve_percent - if max_oversubscription_ratio and ( - 0.0 < max_oversubscription_ratio < 1): - pool['max_over_subscription_ratio'] = ( - self.utils.get_default_oversubscription_ratio( - max_oversubscription_ratio)) pools.append(pool) pools = self.utils.add_legacy_pools(pools) data = {'vendor_name': "Dell EMC", diff --git a/cinder/volume/drivers/dell_emc/xtremio.py b/cinder/volume/drivers/dell_emc/xtremio.py index ab2a701b1a1..c95be560231 100644 --- a/cinder/volume/drivers/dell_emc/xtremio.py +++ b/cinder/volume/drivers/dell_emc/xtremio.py @@ -54,6 +54,7 @@ from cinder import utils from cinder.volume import configuration from cinder.volume import driver from cinder.volume.drivers.san import san +from cinder.volume import utils as vutils from cinder.zonemanager import utils as fczm_utils @@ -424,9 +425,10 @@ class XtremIOVolumeDriver(san.SanDriver): or self.driver_name) self.cluster_id = (self.configuration.safe_get('xtremio_cluster_name') or '') - self.provisioning_factor = (self.configuration. - safe_get('max_over_subscription_ratio') - or DEFAULT_PROVISIONING_FACTOR) + self.provisioning_factor = vutils.get_max_over_subscription_ratio( + self.configuration.max_over_subscription_ratio, + supports_auto=False) + self.clean_ig = (self.configuration.safe_get('xtremio_clean_unused_ig') or False) self._stats = {} diff --git a/cinder/volume/drivers/kaminario/kaminario_common.py b/cinder/volume/drivers/kaminario/kaminario_common.py index 943beccbe78..b1105f4dd2d 100644 --- a/cinder/volume/drivers/kaminario/kaminario_common.py +++ b/cinder/volume/drivers/kaminario/kaminario_common.py @@ -798,11 +798,14 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): LOG.debug("Searching total volumes in K2 for updating stats.") total_volumes = self.client.search("volumes").total - 1 provisioned_vol = cap.provisioned_volumes + if (conf.auto_calc_max_oversubscription_ratio and cap.provisioned and (cap.total - cap.free) != 0): ratio = provisioned_vol / float(cap.total - cap.free) else: - ratio = conf.max_over_subscription_ratio + ratio = vol_utils.get_max_over_subscription_ratio( + conf.max_over_subscription_ratio, supports_auto=True) + self.stats = {'QoS_support': False, 'free_capacity_gb': cap.free / units.Mi, 'total_capacity_gb': cap.total / units.Mi, diff --git a/cinder/volume/drivers/netapp/dataontap/block_base.py b/cinder/volume/drivers/netapp/dataontap/block_base.py index 9d9f8fe282b..821de9e4664 100644 --- a/cinder/volume/drivers/netapp/dataontap/block_base.py +++ b/cinder/volume/drivers/netapp/dataontap/block_base.py @@ -115,7 +115,9 @@ class NetAppBlockStorageLibrary(object): na_opts.netapp_provisioning_opts) self.configuration.append_config_values(na_opts.netapp_san_opts) self.max_over_subscription_ratio = ( - self.configuration.max_over_subscription_ratio) + volume_utils.get_max_over_subscription_ratio( + self.configuration.max_over_subscription_ratio, + supports_auto=False)) self.reserved_percentage = self._get_reserved_percentage() self.loopingcalls = loopingcalls.LoopingCalls() diff --git a/cinder/volume/drivers/nfs.py b/cinder/volume/drivers/nfs.py index cc7b0fbe883..344f92bb4b6 100644 --- a/cinder/volume/drivers/nfs.py +++ b/cinder/volume/drivers/nfs.py @@ -33,6 +33,7 @@ from cinder import interface from cinder import utils from cinder.volume import configuration from cinder.volume.drivers import remotefs +from cinder.volume import utils as vutils VERSION = '1.4.0' @@ -116,7 +117,9 @@ class NfsDriver(remotefs.RemoteFSSnapDriverDistributed): self._sparse_copy_volume_data = True self.reserved_percentage = self.configuration.reserved_percentage self.max_over_subscription_ratio = ( - self.configuration.max_over_subscription_ratio) + vutils.get_max_over_subscription_ratio( + self.configuration.max_over_subscription_ratio, + supports_auto=False)) def initialize_connection(self, volume, connector): diff --git a/cinder/volume/drivers/pure.py b/cinder/volume/drivers/pure.py index f1f7d44bb76..a8fc248df2a 100644 --- a/cinder/volume/drivers/pure.py +++ b/cinder/volume/drivers/pure.py @@ -613,6 +613,7 @@ class PureBaseVolumeDriver(san.SanDriver): a value, if not we will respect the configuration option for the max_over_subscription_ratio. """ + if (self.configuration.pure_automatic_max_oversubscription_ratio and used_space != 0 and provisioned_space != 0): # If array is empty we can not calculate a max oversubscription @@ -622,7 +623,9 @@ class PureBaseVolumeDriver(san.SanDriver): # presented based on current usage. thin_provisioning = provisioned_space / used_space else: - thin_provisioning = self.configuration.max_over_subscription_ratio + thin_provisioning = volume_utils.get_max_over_subscription_ratio( + self.configuration.max_over_subscription_ratio, + supports_auto=True) return thin_provisioning diff --git a/cinder/volume/drivers/zfssa/zfssanfs.py b/cinder/volume/drivers/zfssa/zfssanfs.py index 8e7d46d90ff..96b4beccff6 100644 --- a/cinder/volume/drivers/zfssa/zfssanfs.py +++ b/cinder/volume/drivers/zfssa/zfssanfs.py @@ -36,6 +36,7 @@ from cinder.volume import configuration from cinder.volume.drivers import nfs from cinder.volume.drivers.san import san from cinder.volume.drivers.zfssa import zfssarest +from cinder.volume import utils as vutils ZFSSA_OPTS = [ @@ -108,6 +109,11 @@ class ZFSSANFSDriver(nfs.NfsDriver): self._stats = None def do_setup(self, context): + self.configuration.max_over_subscription_ratio = ( + vutils.get_max_over_subscription_ratio( + self.configuration.max_over_subscription_ratio, + supports_auto=False)) + if not self.configuration.max_over_subscription_ratio > 0: msg = _("Config 'max_over_subscription_ratio' invalid. Must be > " "0: %s") % self.configuration.max_over_subscription_ratio diff --git a/cinder/volume/utils.py b/cinder/volume/utils.py index 7ba26bc279d..e1354c86df3 100644 --- a/cinder/volume/utils.py +++ b/cinder/volume/utils.py @@ -980,3 +980,35 @@ def is_group_a_type(group, key): ) return spec == " True" return False + + +def get_max_over_subscription_ratio(str_value, supports_auto=False): + """Get the max_over_subscription_ratio from a string + + As some drivers need to do some calculations with the value and we are now + receiving a string value in the conf, this converts the value to float + when appropriate. + + :param str_value: Configuration object + :param supports_auto: Tell if the calling driver supports auto MOSR. + :param drv_msg: Error message from the caller + :response: value of mosr + """ + + if not supports_auto and str_value == "auto": + msg = _("This driver does not support automatic " + "max_over_subscription_ratio calculation. Please use a " + "valid float value.") + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + if str_value == 'auto': + return str_value + + mosr = float(str_value) + if mosr < 1: + msg = _("The value of max_over_subscription_ratio must be " + "greater than 1.") + LOG.error(msg) + raise exception.InvalidParameterValue(message=msg) + return mosr diff --git a/doc/source/admin/blockstorage-over-subscription.rst b/doc/source/admin/blockstorage-over-subscription.rst index 5606e499f6b..e6af996fb3c 100644 --- a/doc/source/admin/blockstorage-over-subscription.rst +++ b/doc/source/admin/blockstorage-over-subscription.rst @@ -23,6 +23,13 @@ A ratio of 1.0 means provisioned capacity cannot exceed the total physical capacity. A ratio lower than 1.0 is ignored and the default value is used instead. +This parameter also can be set as ``max_over_subscription_ratio=auto``. When +using auto, Cinder will automatically calculate the +``max_over_subscription_ratio`` based on the provisioned capacity and the used +space. This allows the creation of a larger number of volumes at the +begginning of the pool's life, and start to restrict the creation as the free +space approaces to 0 or the reserved limit. + .. note:: ``max_over_subscription_ratio`` can be configured for each back end when @@ -34,6 +41,10 @@ instead. driver that supports multiple pools per back end, it can report this ratio for each pool. The LVM driver does not support multiple pools. + Setting this value to 'auto'. The values calculated by Cinder can + dinamically vary according to the pool's provisioned capacity and consumed + space. + The existing ``reserved_percentage`` flag is used to prevent over provisioning. This flag represents the percentage of the back-end capacity that is reserved. diff --git a/releasenotes/notes/bp-provisioning-improvements-bb7e28896e2a2539.yaml b/releasenotes/notes/bp-provisioning-improvements-bb7e28896e2a2539.yaml new file mode 100644 index 00000000000..b9936c9794d --- /dev/null +++ b/releasenotes/notes/bp-provisioning-improvements-bb7e28896e2a2539.yaml @@ -0,0 +1,5 @@ +--- +features: + - Cinder now supports the use of 'max_over_subscription_ratio = auto' which + automatically calculates the value for max_over_subscription_ratio in the + scheduler.