Revert volume consumed capacity when rescheduling
Cinder scheduler will consume pool capacity repeatedly when rescheduling creating request. Closes-Bug: #1741833 Change-Id: Ib11bda868085dfed0ab0e9018b8ed35956dbdf10
This commit is contained in:
parent
6dd3776723
commit
4bc11505be
|
@ -316,6 +316,12 @@ class FilterScheduler(driver.Scheduler):
|
|||
resource_type['extra_specs'].update(
|
||||
multiattach='<is> True')
|
||||
|
||||
# Revert volume consumed capacity if it's a rescheduled request
|
||||
retry = filter_properties.get('retry', {})
|
||||
if retry.get('backends', []):
|
||||
self.host_manager.revert_volume_consumed_capacity(
|
||||
retry['backends'][-1],
|
||||
request_spec['volume_properties']['size'])
|
||||
# Find our local list of acceptable backends by filtering and
|
||||
# weighing our options. we virtually consume resources on
|
||||
# it so subsequent selections can adjust accordingly.
|
||||
|
|
|
@ -298,7 +298,7 @@ class BackendState(object):
|
|||
self.storage_protocol = capability.get('storage_protocol', None)
|
||||
self.updated = capability['timestamp']
|
||||
|
||||
def consume_from_volume(self, volume):
|
||||
def consume_from_volume(self, volume, update_time=True):
|
||||
"""Incrementally update host state from a volume."""
|
||||
volume_gb = volume['size']
|
||||
self.allocated_capacity_gb += volume_gb
|
||||
|
@ -311,7 +311,8 @@ class BackendState(object):
|
|||
pass
|
||||
else:
|
||||
self.free_capacity_gb -= volume_gb
|
||||
self.updated = timeutils.utcnow()
|
||||
if update_time:
|
||||
self.updated = timeutils.utcnow()
|
||||
|
||||
def __repr__(self):
|
||||
# FIXME(zhiteng) backend level free_capacity_gb isn't as
|
||||
|
@ -614,6 +615,14 @@ class HostManager(object):
|
|||
"scheduler cache.", {'backend': backend_key})
|
||||
del self.backend_state_map[backend_key]
|
||||
|
||||
def revert_volume_consumed_capacity(self, pool_name, size):
|
||||
for backend_key, state in self.backend_state_map.items():
|
||||
for key in state.pools:
|
||||
pool_state = state.pools[key]
|
||||
if pool_name == '#'.join([backend_key, pool_state.pool_name]):
|
||||
pool_state.consume_from_volume({'size': -size},
|
||||
update_time=False)
|
||||
|
||||
def get_all_backend_states(self, context):
|
||||
"""Returns a dict of all the backends the HostManager knows about.
|
||||
|
||||
|
|
|
@ -311,6 +311,22 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
|||
self.context, request_spec,
|
||||
filter_properties=filter_properties)
|
||||
|
||||
def test_retry_revert_consumed_capacity(self):
|
||||
sched = fakes.FakeFilterScheduler()
|
||||
request_spec = {'volume_type': {'name': 'LVM_iSCSI'},
|
||||
'volume_properties': {'project_id': 1,
|
||||
'size': 2}}
|
||||
request_spec = objects.RequestSpec.from_primitives(request_spec)
|
||||
retry = dict(num_attempts=1, backends=['fake_backend_name'])
|
||||
filter_properties = dict(retry=retry)
|
||||
|
||||
with mock.patch.object(
|
||||
sched.host_manager,
|
||||
'revert_volume_consumed_capacity') as mock_revert:
|
||||
sched._schedule(self.context, request_spec,
|
||||
filter_properties=filter_properties)
|
||||
mock_revert.assert_called_once_with('fake_backend_name', 2)
|
||||
|
||||
def test_add_retry_backend(self):
|
||||
retry = dict(num_attempts=1, backends=[])
|
||||
filter_properties = dict(retry=retry)
|
||||
|
|
Loading…
Reference in New Issue