From cd9292b931a0f74079512cd43652129a9c595ee8 Mon Sep 17 00:00:00 2001 From: Jose Castro Leon Date: Tue, 31 Mar 2020 13:33:07 +0200 Subject: [PATCH] Remove provisioned calculation on non thin provision backends On those backends, there is no need to calculate provisioned_capacity_gb as it is not used during the scheduling. This calculation was not scaling properly on big environments as it implies many database queries. Change-Id: If1ea4ceedc495dc6c6e247feccfbdc4899ad725c Closes-Bug: #1869712 --- manila/scheduler/host_manager.py | 15 ++- manila/tests/scheduler/test_host_manager.py | 102 +++++++++++++----- ...provisioned-backends-1da2cc33d365ba4f.yaml | 7 ++ 3 files changed, 92 insertions(+), 32 deletions(-) create mode 100644 releasenotes/notes/bug-1869712-fix-increased-scheduled-time-for-non-thin-provisioned-backends-1da2cc33d365ba4f.yaml diff --git a/manila/scheduler/host_manager.py b/manila/scheduler/host_manager.py index 2c7cacab1d..6ac60f5071 100644 --- a/manila/scheduler/host_manager.py +++ b/manila/scheduler/host_manager.py @@ -422,6 +422,8 @@ class PoolState(HostState): 'allocated_capacity_gb', 0) self.qos = capability.get('qos', False) self.reserved_percentage = capability['reserved_percentage'] + self.thin_provisioning = capability.get( + 'thin_provisioning', False) # NOTE(xyang): provisioned_capacity_gb is the apparent total # capacity of all the shares created on a backend, which is # greater than or equal to allocated_capacity_gb, which is the @@ -430,16 +432,19 @@ class PoolState(HostState): # NOTE(nidhimittalhada): If 'provisioned_capacity_gb' is not set, # then calculating 'provisioned_capacity_gb' from share sizes # on host, as per information available in manila database. + # NOTE(jose-castro-leon): Only calculate provisioned_capacity_gb + # on thin provisioned pools self.provisioned_capacity_gb = capability.get( - 'provisioned_capacity_gb') or ( - self._estimate_provisioned_capacity(self.host, - context=context)) + 'provisioned_capacity_gb', 0) + + if self.thin_provisioning and not self.provisioned_capacity_gb: + self.provisioned_capacity_gb = ( + self._estimate_provisioned_capacity(self.host, + context=context)) self.max_over_subscription_ratio = capability.get( 'max_over_subscription_ratio', CONF.max_over_subscription_ratio) - self.thin_provisioning = capability.get( - 'thin_provisioning', False) self.dedupe = capability.get( 'dedupe', False) self.compression = capability.get( diff --git a/manila/tests/scheduler/test_host_manager.py b/manila/tests/scheduler/test_host_manager.py index 2eb56f0534..d06f809cde 100644 --- a/manila/tests/scheduler/test_host_manager.py +++ b/manila/tests/scheduler/test_host_manager.py @@ -867,7 +867,7 @@ class PoolStateTestCase(test.TestCase): 'share_capability': {'total_capacity_gb': 1024, 'free_capacity_gb': 512, 'reserved_percentage': 0, 'timestamp': None, - 'cap1': 'val1', 'cap2': 'val2'}, + 'thin_provisioning': True, 'cap1': 'val1', 'cap2': 'val2'}, 'instances': [ { @@ -884,6 +884,27 @@ class PoolStateTestCase(test.TestCase): }, ] }, + { + 'share_capability': + {'total_capacity_gb': 1024, 'free_capacity_gb': 512, + 'reserved_percentage': 0, 'timestamp': None, + 'thin_provisioning': False, 'cap1': 'val1', 'cap2': 'val2'}, + 'instances': + [ + { + 'id': 1, 'host': 'host1', + 'status': 'available', + 'share_id': 11, 'size': 1, + 'updated_at': timeutils.utcnow() + }, + { + 'id': 2, 'host': 'host1', + 'status': 'available', + 'share_id': 12, 'size': None, + 'updated_at': timeutils.utcnow() + }, + ] + }, { 'share_capability': {'total_capacity_gb': 1024, 'free_capacity_gb': 512, @@ -892,6 +913,14 @@ class PoolStateTestCase(test.TestCase): 'ipv6_support': False}, 'instances': [] }, + { + 'share_capability': + {'total_capacity_gb': 1024, 'free_capacity_gb': 512, + 'reserved_percentage': 0, 'timestamp': None, + 'thin_provisioning': True, 'cap1': 'val1', 'cap2': 'val2', + 'ipv4_support': True, 'ipv6_support': False}, + 'instances': [] + }, { 'share_capability': {'total_capacity_gb': 1024, 'free_capacity_gb': 512, @@ -934,12 +963,28 @@ class PoolStateTestCase(test.TestCase): }, ] }, + { + 'share_capability': + {'total_capacity_gb': 1024, 'free_capacity_gb': 512, + 'allocated_capacity_gb': 256, 'provisioned_capacity_gb': 1, + 'thin_provisioning': True, 'reserved_percentage': 0, + 'timestamp': None, 'cap1': 'val1', 'cap2': 'val2'}, + 'instances': + [ + { + 'id': 1, 'host': 'host1', + 'status': 'available', + 'share_id': 11, 'size': 1, + 'updated_at': timeutils.utcnow() + }, + ] + }, { 'share_capability': {'total_capacity_gb': 1024, 'free_capacity_gb': 512, 'allocated_capacity_gb': 256, 'provisioned_capacity_gb': 256, - 'reserved_percentage': 0, 'timestamp': None, 'cap1': 'val1', - 'cap2': 'val2'}, + 'thin_provisioning': False, 'reserved_percentage': 0, + 'timestamp': None, 'cap1': 'val1', 'cap2': 'val2'}, 'instances': [ { @@ -969,35 +1014,38 @@ class PoolStateTestCase(test.TestCase): self.assertEqual(512, fake_pool.free_capacity_gb) self.assertDictMatch(share_capability, fake_pool.capabilities) - if 'provisioned_capacity_gb' not in share_capability: - db.share_instances_get_all_by_host.assert_called_once_with( - fake_context, fake_pool.host, with_share_data=True) - - if len(instances) > 0: - self.assertEqual(4, fake_pool.provisioned_capacity_gb) + if 'thin_provisioning' in share_capability and ( + share_capability['thin_provisioning']): + self.assertEqual(share_capability['thin_provisioning'], + fake_pool.thin_provisioning) + if 'provisioned_capacity_gb' not in share_capability or ( + not share_capability['provisioned_capacity_gb']): + db.share_instances_get_all_by_host.assert_called_once_with( + fake_context, fake_pool.host, with_share_data=True) + if len(instances) > 0: + self.assertEqual(4, fake_pool.provisioned_capacity_gb) + else: + self.assertEqual(0, fake_pool.provisioned_capacity_gb) else: + self.assertFalse(db.share_instances_get_all_by_host.called) + self.assertEqual(share_capability['provisioned_capacity_gb'], + fake_pool.provisioned_capacity_gb) + else: + self.assertFalse(fake_pool.thin_provisioning) + self.assertFalse(db.share_instances_get_all_by_host.called) + if 'provisioned_capacity_gb' not in share_capability or ( + not share_capability['provisioned_capacity_gb']): self.assertEqual(0, fake_pool.provisioned_capacity_gb) + else: + self.assertEqual(share_capability['provisioned_capacity_gb'], + fake_pool.provisioned_capacity_gb) - if 'allocated_capacity_gb' in share_capability: - self.assertEqual(share_capability['allocated_capacity_gb'], - fake_pool.allocated_capacity_gb) - elif 'allocated_capacity_gb' not in share_capability: - self.assertEqual(0, fake_pool.allocated_capacity_gb) - elif 'provisioned_capacity_gb' in share_capability and ( - 'allocated_capacity_gb' not in share_capability): - self.assertFalse(db.share_instances_get_all_by_host.called) - - self.assertEqual(0, fake_pool.allocated_capacity_gb) - self.assertEqual(share_capability['provisioned_capacity_gb'], - fake_pool.provisioned_capacity_gb) - elif 'provisioned_capacity_gb' in share_capability and ( - 'allocated_capacity_gb' in share_capability): - self.assertFalse(db.share_instances_get_all_by_host.called) - + if 'allocated_capacity_gb' in share_capability: self.assertEqual(share_capability['allocated_capacity_gb'], fake_pool.allocated_capacity_gb) - self.assertEqual(share_capability['provisioned_capacity_gb'], - fake_pool.provisioned_capacity_gb) + else: + self.assertEqual(0, fake_pool.allocated_capacity_gb) + if 'ipv4_support' in share_capability: self.assertEqual(share_capability['ipv4_support'], fake_pool.ipv4_support) diff --git a/releasenotes/notes/bug-1869712-fix-increased-scheduled-time-for-non-thin-provisioned-backends-1da2cc33d365ba4f.yaml b/releasenotes/notes/bug-1869712-fix-increased-scheduled-time-for-non-thin-provisioned-backends-1da2cc33d365ba4f.yaml new file mode 100644 index 0000000000..2e9d15f960 --- /dev/null +++ b/releasenotes/notes/bug-1869712-fix-increased-scheduled-time-for-non-thin-provisioned-backends-1da2cc33d365ba4f.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Reduces an increase of schedule time for non thin provisioned backends. + On those backends, there is no need to calculate provisioned_capacity_gb, + as it is not used during the scheduling. This calculation was not scaling + properly on big environments as it implies many database queries.