Route extend_volume calls to scheduler

When extending a volume in a THIN pool, Cinder has no checks of whether
the new allocated space is provisioned by the backend. This allow a
tenant, with enough quota space to extend the volume to a size much
higher than the provisioned space reported by the backend.

So, by routing the extend request through the scheduler, it will
assure that the host can provision the needed space.

Closes-bug: #1600295
Closes-bug: #1539112
Change-Id: I4a4ee8acac258d773b99e2767f1c75faa909813b
This commit is contained in:
Erlon R. Cruz 2016-11-29 09:20:42 -02:00
parent 0e47f3ce3a
commit c5afe768cc
7 changed files with 267 additions and 56 deletions

View File

@ -39,7 +39,23 @@ class CapacityFilter(filters.BaseHostFilter):
if host_state.host == filter_properties.get('vol_exists_on'):
return True
volume_size = filter_properties.get('size')
spec = filter_properties.get('request_spec')
if spec:
volid = spec.get('volume_id')
if filter_properties.get('new_size'):
# If new_size is passed, we are allocating space to extend a volume
requested_size = (int(filter_properties.get('new_size')) -
int(filter_properties.get('size')))
LOG.debug('Checking if host %(host)s can extend the volume %(id)s'
'in %(size)s GB', {'host': host_state.host, 'id': volid,
'size': requested_size})
else:
requested_size = filter_properties.get('size')
LOG.debug('Checking if host %(host)s can create a %(size)s GB '
'volume (%(id)s)',
{'host': host_state.host, 'id': volid,
'size': requested_size})
if host_state.free_capacity_gb is None:
# Fail Safe
@ -78,7 +94,7 @@ class CapacityFilter(filters.BaseHostFilter):
free = free_space - math.floor(total * reserved)
msg_args = {"host": host_state.host,
"requested": volume_size,
"requested": requested_size,
"available": free}
# NOTE(xyang): If 'provisioning:type' is 'thick' in extra_specs,
@ -99,7 +115,7 @@ class CapacityFilter(filters.BaseHostFilter):
if (thin and host_state.thin_provisioning_support and
host_state.max_over_subscription_ratio >= 1):
provisioned_ratio = ((host_state.provisioned_capacity_gb +
volume_size) / total)
requested_size) / total)
if provisioned_ratio > host_state.max_over_subscription_ratio:
LOG.warning(_LW(
"Insufficient free space for thin provisioning. "
@ -120,7 +136,7 @@ class CapacityFilter(filters.BaseHostFilter):
# of reserved space) which we can over-subscribe.
adjusted_free_virtual = (
free * host_state.max_over_subscription_ratio)
return adjusted_free_virtual >= volume_size
return adjusted_free_virtual >= requested_size
elif thin and host_state.thin_provisioning_support:
LOG.warning(_LW("Filtering out host %(host)s with an invalid "
"maximum over subscription ratio of "
@ -131,7 +147,7 @@ class CapacityFilter(filters.BaseHostFilter):
"host": host_state.host})
return False
if free < volume_size:
if free < requested_size:
LOG.warning(_LW("Insufficient free space for volume creation "
"on host %(host)s (requested / avail): "
"%(requested)s/%(available)s"), msg_args)

View File

@ -293,6 +293,28 @@ class SchedulerManager(manager.Manager):
"""
return self.driver.get_pools(context, filters)
def extend_volume(self, context, volume, new_size, reservations,
request_spec=None, filter_properties=None):
def _extend_volume_set_error(self, context, ex, request_spec):
volume_state = {'volume_state': {'status': 'available'}}
self._set_volume_state_and_notify('extend_volume', volume_state,
context, ex, request_spec)
if not filter_properties:
filter_properties = {}
filter_properties['new_size'] = new_size
try:
self.driver.host_passes_filters(context, volume.host,
request_spec, filter_properties)
volume_rpcapi.VolumeAPI().extend_volume(context, volume, new_size,
reservations)
except exception.NoValidHost as ex:
QUOTAS.rollback(context, reservations,
project_id=volume.project_id)
_extend_volume_set_error(self, context, ex, request_spec)
def _set_volume_state_and_notify(self, method, updates, context, ex,
request_spec, msg=None):
# TODO(harlowja): move into a task that just does this later.

View File

@ -61,9 +61,10 @@ class SchedulerAPI(rpc.RPCAPI):
3.0 - Remove 2.x compatibility
3.1 - Adds notify_service_capabilities()
3.2 - Adds extend_volume()
"""
RPC_API_VERSION = '3.1'
RPC_API_VERSION = '3.2'
RPC_DEFAULT_VERSION = '3.0'
TOPIC = constants.SCHEDULER_TOPIC
BINARY = 'cinder-scheduler'
@ -132,6 +133,25 @@ class SchedulerAPI(rpc.RPCAPI):
}
return cctxt.cast(ctxt, 'manage_existing', **msg_args)
def extend_volume(self, ctxt, volume, new_size, reservations,
request_spec, filter_properties=None):
cctxt = self._get_cctxt()
if not cctxt.can_send_version('3.2'):
msg = _('extend_volume requires cinder-scheduler '
'RPC API version >= 3.2.')
raise exception.ServiceTooOld(msg)
request_spec_p = jsonutils.to_primitive(request_spec)
msg_args = {
'volume': volume,
'new_size': new_size,
'reservations': reservations,
'request_spec': request_spec_p,
'filter_properties': filter_properties,
}
return cctxt.cast(ctxt, 'extend_volume', **msg_args)
def get_pools(self, ctxt, filters=None):
cctxt = self._get_cctxt()
return cctxt.call(ctxt, 'get_pools', filters=filters)

View File

@ -61,7 +61,8 @@ class CapacityFilterTestCase(HostFiltersTestCase):
def test_filter_passes(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
filter_properties = {'size': 100,
'request_spec': {'volume_id': fake.VOLUME_ID}}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
@ -73,7 +74,8 @@ class CapacityFilterTestCase(HostFiltersTestCase):
def test_filter_current_host_passes(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100, 'vol_exists_on': 'host1'}
filter_properties = {'size': 100, 'vol_exists_on': 'host1',
'request_spec': {'volume_id': fake.VOLUME_ID}}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 100,
@ -85,7 +87,8 @@ class CapacityFilterTestCase(HostFiltersTestCase):
def test_filter_fails(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
filter_properties = {'size': 100,
'request_spec': {'volume_id': fake.VOLUME_ID}}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 200,
@ -98,7 +101,8 @@ class CapacityFilterTestCase(HostFiltersTestCase):
def test_filter_fails_free_capacity_None(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
filter_properties = {'size': 100,
'request_spec': {'volume_id': fake.VOLUME_ID}}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'free_capacity_gb': None,
@ -109,7 +113,8 @@ class CapacityFilterTestCase(HostFiltersTestCase):
def test_filter_passes_infinite(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
filter_properties = {'size': 100,
'request_spec': {'volume_id': fake.VOLUME_ID}}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'free_capacity_gb': 'infinite',
@ -117,10 +122,37 @@ class CapacityFilterTestCase(HostFiltersTestCase):
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_filter_extend_request(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'new_size': 100, 'size': 50,
'request_spec': {'volume_id': fake.VOLUME_ID}}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'free_capacity_gb': 200,
'updated_at': None,
'total_capacity_gb': 500,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_filter_extend_request_negative(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 50,
'request_spec': {'volume_id': fake.VOLUME_ID}}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'free_capacity_gb': 49,
'updated_at': None,
'total_capacity_gb': 500,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_filter_passes_unknown(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
filter_properties = {'size': 100,
'request_spec': {'volume_id': fake.VOLUME_ID}}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'free_capacity_gb': 'unknown',
@ -131,7 +163,8 @@ class CapacityFilterTestCase(HostFiltersTestCase):
def test_filter_passes_total_infinite(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
filter_properties = {'size': 100,
'request_spec': {'volume_id': fake.VOLUME_ID}}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'free_capacity_gb': 'infinite',
@ -144,7 +177,8 @@ class CapacityFilterTestCase(HostFiltersTestCase):
def test_filter_passes_total_unknown(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
filter_properties = {'size': 100,
'request_spec': {'volume_id': fake.VOLUME_ID}}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'free_capacity_gb': 'unknown',
@ -157,7 +191,8 @@ class CapacityFilterTestCase(HostFiltersTestCase):
def test_filter_fails_total_infinite(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
filter_properties = {'size': 100,
'request_spec': {'volume_id': fake.VOLUME_ID}}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 'infinite',
@ -169,7 +204,8 @@ class CapacityFilterTestCase(HostFiltersTestCase):
def test_filter_fails_total_unknown(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
filter_properties = {'size': 100,
'request_spec': {'volume_id': fake.VOLUME_ID}}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 'unknown',
@ -181,7 +217,8 @@ class CapacityFilterTestCase(HostFiltersTestCase):
def test_filter_fails_total_zero(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
filter_properties = {'size': 100,
'request_spec': {'volume_id': fake.VOLUME_ID}}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 0,
@ -197,7 +234,8 @@ class CapacityFilterTestCase(HostFiltersTestCase):
'capabilities:thin_provisioning_support':
'<is> True',
'capabilities:thick_provisioning_support':
'<is> False'}
'<is> False',
'request_spec': {'volume_id': fake.VOLUME_ID}}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
@ -218,7 +256,8 @@ class CapacityFilterTestCase(HostFiltersTestCase):
'capabilities:thin_provisioning_support':
'<is> True',
'capabilities:thick_provisioning_support':
'<is> False'}
'<is> False',
'request_spec': {'volume_id': fake.VOLUME_ID}}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
@ -239,7 +278,8 @@ class CapacityFilterTestCase(HostFiltersTestCase):
'capabilities:thin_provisioning_support':
'<is> False',
'capabilities:thick_provisioning_support':
'<is> True'}
'<is> True',
'request_spec': {'volume_id': fake.VOLUME_ID}}
service = {'disabled': False}
# If "thin_provisioning_support" is False,
# "max_over_subscription_ratio" will be ignored.
@ -262,7 +302,8 @@ class CapacityFilterTestCase(HostFiltersTestCase):
'capabilities:thin_provisioning_support':
'<is> True',
'capabilities:thick_provisioning_support':
'<is> False'}
'<is> False',
'request_spec': {'volume_id': fake.VOLUME_ID}}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
@ -283,7 +324,8 @@ class CapacityFilterTestCase(HostFiltersTestCase):
'capabilities:thin_provisioning_support':
'<is> True',
'capabilities:thick_provisioning_support':
'<is> False'}
'<is> False',
'request_spec': {'volume_id': fake.VOLUME_ID}}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
@ -304,7 +346,8 @@ class CapacityFilterTestCase(HostFiltersTestCase):
'capabilities:thin_provisioning_support':
'<is> True',
'capabilities:thick_provisioning_support':
'<is> False'}
'<is> False',
'request_spec': {'volume_id': fake.VOLUME_ID}}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
@ -325,7 +368,8 @@ class CapacityFilterTestCase(HostFiltersTestCase):
'capabilities:thin_provisioning_support':
'<is> True',
'capabilities:thick_provisioning_support':
'<is> False'}
'<is> False',
'request_spec': {'volume_id': fake.VOLUME_ID}}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
@ -346,7 +390,8 @@ class CapacityFilterTestCase(HostFiltersTestCase):
'capabilities:thin_provisioning_support':
'<is> True',
'capabilities:thick_provisioning_support':
'<is> False'}
'<is> False',
'request_spec': {'volume_id': fake.VOLUME_ID}}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
@ -367,7 +412,8 @@ class CapacityFilterTestCase(HostFiltersTestCase):
'capabilities:thin_provisioning_support':
'<is> False',
'capabilities:thick_provisioning_support':
'<is> True'}
'<is> True',
'request_spec': {'volume_id': fake.VOLUME_ID}}
service = {'disabled': False}
# If "thin_provisioning_support" is False,
# "max_over_subscription_ratio" will be ignored.
@ -390,7 +436,8 @@ class CapacityFilterTestCase(HostFiltersTestCase):
'capabilities:thin_provisioning_support':
'<is> True',
'capabilities:thick_provisioning_support':
'<is> True'}
'<is> True',
'request_spec': {'volume_id': fake.VOLUME_ID}}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
@ -411,7 +458,8 @@ class CapacityFilterTestCase(HostFiltersTestCase):
'capabilities:thin_provisioning_support':
'<is> True',
'capabilities:thick_provisioning_support':
'<is> True'}
'<is> True',
'request_spec': {'volume_id': fake.VOLUME_ID}}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
@ -432,7 +480,8 @@ class CapacityFilterTestCase(HostFiltersTestCase):
'capabilities:thin_provisioning_support':
'<is> True',
'capabilities:thick_provisioning_support':
'<is> False'}
'<is> False',
'request_spec': {'volume_id': fake.VOLUME_ID}}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
@ -453,7 +502,8 @@ class CapacityFilterTestCase(HostFiltersTestCase):
'capabilities:thin_provisioning_support':
'<is> True',
'capabilities:thick_provisioning_support':
'<is> True'}
'<is> True',
'request_spec': {'volume_id': fake.VOLUME_ID}}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
@ -474,7 +524,8 @@ class CapacityFilterTestCase(HostFiltersTestCase):
'capabilities:thin_provisioning_support':
'<is> True',
'capabilities:thick_provisioning_support':
'<is> True'}
'<is> True',
'request_spec': {'volume_id': fake.VOLUME_ID}}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
@ -500,7 +551,8 @@ class CapacityFilterTestCase(HostFiltersTestCase):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100,
'volume_type': volume_type}
'volume_type': volume_type,
'request_spec': {'volume_id': fake.VOLUME_ID}}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'total_capacity_gb': 500,
@ -530,8 +582,8 @@ class AffinityFilterTestCase(HostFiltersTestCase):
vol_id = volume.id
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': [vol_id], }}
'scheduler_hints': {'different_host': [vol_id], },
'request_spec': {'volume_id': fake.VOLUME_ID}}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@ -550,8 +602,8 @@ class AffinityFilterTestCase(HostFiltersTestCase):
vol_id = volume.id
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': [vol_id], }}
'scheduler_hints': {'different_host': [vol_id], },
'request_spec': {'volume_id': fake.VOLUME_ID}}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@ -574,8 +626,8 @@ class AffinityFilterTestCase(HostFiltersTestCase):
vol_id = volume.id
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': [vol_id], }}
'scheduler_hints': {'different_host': [vol_id], },
'request_spec': {'volume_id': fake.VOLUME_ID}}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@ -584,7 +636,8 @@ class AffinityFilterTestCase(HostFiltersTestCase):
host = fakes.FakeHostState('host1', {})
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': None}
'scheduler_hints': None,
'request_spec': {'volume_id': fake.VOLUME_ID}}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@ -944,7 +997,8 @@ class InstanceLocalityFilterTestCase(HostFiltersTestCase):
uuid = nova.novaclient().servers.create('host1')
filter_properties = {'context': self.context,
'scheduler_hints': {'local_to_instance': uuid}}
'scheduler_hints': {'local_to_instance': uuid},
'request_spec': {'volume_id': fake.VOLUME_ID}}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('novaclient.client.discover_extensions')
@ -958,7 +1012,8 @@ class InstanceLocalityFilterTestCase(HostFiltersTestCase):
uuid = nova.novaclient().servers.create('host2')
filter_properties = {'context': self.context,
'scheduler_hints': {'local_to_instance': uuid}}
'scheduler_hints': {'local_to_instance': uuid},
'request_spec': {'volume_id': fake.VOLUME_ID}}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_handles_none(self):
@ -966,7 +1021,8 @@ class InstanceLocalityFilterTestCase(HostFiltersTestCase):
host = fakes.FakeHostState('host1', {})
filter_properties = {'context': self.context,
'scheduler_hints': None}
'scheduler_hints': None,
'request_spec': {'volume_id': fake.VOLUME_ID}}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_invalid_uuid(self):
@ -975,7 +1031,8 @@ class InstanceLocalityFilterTestCase(HostFiltersTestCase):
filter_properties = {'context': self.context,
'scheduler_hints':
{'local_to_instance': 'e29b11d4-not-valid-a716'}}
{'local_to_instance': 'e29b11d4-not-valid-a716'},
'request_spec': {'volume_id': fake.VOLUME_ID}}
self.assertRaises(exception.InvalidUUID,
filt_cls.host_passes, host, filter_properties)
@ -988,7 +1045,8 @@ class InstanceLocalityFilterTestCase(HostFiltersTestCase):
uuid = nova.novaclient().servers.create('host1')
filter_properties = {'context': self.context,
'scheduler_hints': {'local_to_instance': uuid}}
'scheduler_hints': {'local_to_instance': uuid},
'request_spec': {'volume_id': fake.VOLUME_ID}}
self.assertRaises(exception.CinderException,
filt_cls.host_passes, host, filter_properties)
@ -1000,7 +1058,8 @@ class InstanceLocalityFilterTestCase(HostFiltersTestCase):
filt_cls = self.class_map['InstanceLocalityFilter']()
host = fakes.FakeHostState('host1', {})
filter_properties = {'context': self.context, 'size': 100}
filter_properties = {'context': self.context, 'size': 100,
'request_spec': {'volume_id': fake.VOLUME_ID}}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.compute.nova.novaclient')
@ -1014,7 +1073,8 @@ class InstanceLocalityFilterTestCase(HostFiltersTestCase):
filter_properties = \
{'context': self.context, 'scheduler_hints':
{'local_to_instance': 'e29b11d4-15ef-34a9-a716-598a6f0b5467'}}
{'local_to_instance': 'e29b11d4-15ef-34a9-a716-598a6f0b5467'},
'request_spec': {'volume_id': fake.VOLUME_ID}}
self.assertRaises(exception.APITimeout,
filt_cls.host_passes, host, filter_properties)
@ -1279,7 +1339,8 @@ class BasicFiltersTestCase(HostFiltersTestCase):
capabilities.update(ecaps)
service = {'disabled': False}
filter_properties = {'resource_type': {'name': 'fake_type',
'extra_specs': especs}}
'extra_specs': especs},
'request_spec': {'volume_id': fake.VOLUME_ID}}
host = fakes.FakeHostState('host1',
{'free_capacity_gb': 1024,
'capabilities': capabilities,
@ -1436,7 +1497,8 @@ class BasicFiltersTestCase(HostFiltersTestCase):
filter_properties = {'resource_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
'scheduler_hints': {'query': self.json_query},
'request_spec': {'volume_id': fake.VOLUME_ID}}
capabilities = {'enabled': True}
host = fakes.FakeHostState('host1',
{'free_ram_mb': 1024,
@ -1448,7 +1510,8 @@ class BasicFiltersTestCase(HostFiltersTestCase):
filt_cls = self.class_map['JsonFilter']()
filter_properties = {'resource_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0}}
'ephemeral_gb': 0},
'request_spec': {'volume_id': fake.VOLUME_ID}}
capabilities = {'enabled': True}
host = fakes.FakeHostState('host1',
{'free_ram_mb': 0,
@ -1461,7 +1524,8 @@ class BasicFiltersTestCase(HostFiltersTestCase):
filter_properties = {'resource_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
'scheduler_hints': {'query': self.json_query},
'request_spec': {'volume_id': fake.VOLUME_ID}}
capabilities = {'enabled': True}
host = fakes.FakeHostState('host1',
{'free_ram_mb': 1023,
@ -1474,7 +1538,8 @@ class BasicFiltersTestCase(HostFiltersTestCase):
filter_properties = {'resource_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
'scheduler_hints': {'query': self.json_query},
'request_spec': {'volume_id': fake.VOLUME_ID}}
capabilities = {'enabled': True}
host = fakes.FakeHostState('host1',
{'free_ram_mb': 1024,
@ -1491,7 +1556,8 @@ class BasicFiltersTestCase(HostFiltersTestCase):
filter_properties = {'resource_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0},
'scheduler_hints': {'query': json_query}}
'scheduler_hints': {'query': json_query},
'request_spec': {'volume_id': fake.VOLUME_ID}}
capabilities = {'enabled': False}
host = fakes.FakeHostState('host1',
{'free_ram_mb': 1024,
@ -1507,7 +1573,8 @@ class BasicFiltersTestCase(HostFiltersTestCase):
['not', '$service.disabled']])
filter_properties = {'resource_type': {'memory_mb': 1024,
'local_gb': 200},
'scheduler_hints': {'query': json_query}}
'scheduler_hints': {'query': json_query},
'request_spec': {'volume_id': fake.VOLUME_ID}}
capabilities = {'enabled': True}
host = fakes.FakeHostState('host1',
{'free_ram_mb': 1024,
@ -1532,6 +1599,7 @@ class BasicFiltersTestCase(HostFiltersTestCase):
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
'request_spec': {'volume_id': fake.VOLUME_ID}
}
# Passes
@ -1633,6 +1701,7 @@ class BasicFiltersTestCase(HostFiltersTestCase):
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
'request_spec': {'volume_id': fake.VOLUME_ID}
}
self.assertEqual(expected,
filt_cls.host_passes(host, filter_properties))

View File

@ -170,6 +170,37 @@ class SchedulerRpcAPITestCase(test.TestCase):
version='3.0')
create_worker_mock.assert_not_called()
@mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=False)
def test_extend_volume_capped(self, can_send_version_mock):
new_size = 4
volume = fake_volume.fake_volume_obj(self.context)
self.assertRaises(exception.ServiceTooOld,
self._test_scheduler_api,
'extend_volume',
rpc_method='cast',
request_spec='fake_request_spec',
filter_properties='filter_properties',
volume=volume,
new_size=new_size,
reservations=['RESERVATIONS'],
version='3.0')
@mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True)
def test_extend_volume(self, can_send_version_mock):
new_size = 4
volume = fake_volume.fake_volume_obj(self.context)
create_worker_mock = self.mock_object(volume, 'create_worker')
self._test_scheduler_api('extend_volume',
rpc_method='cast',
request_spec='fake_request_spec',
filter_properties='filter_properties',
volume=volume,
new_size=new_size,
reservations=['RESERVATIONS'],
version='3.0')
create_worker_mock.assert_not_called()
def test_get_pools(self):
self._test_scheduler_api('get_pools',
rpc_method='call',

View File

@ -1071,7 +1071,6 @@ class VolumeTestCase(base.BaseVolumeTestCase):
def test_get_all_limit_bad_value(self):
"""Test value of 'limit' is numeric and >= 0"""
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidInput,
volume_api.get_all,
self.context,
@ -3956,6 +3955,36 @@ class VolumeTestCase(base.BaseVolumeTestCase):
volume_api.extend, self.context,
volume, 3)
# Test scheduler path
limit_check.side_effect = None
reserve.side_effect = None
db.volume_update(self.context, volume.id, {'status': 'available'})
volume_api.scheduler_rpcapi = mock.MagicMock()
volume_api.scheduler_rpcapi.extend_volume = mock.MagicMock()
volume_api.extend(self.context, volume, 3)
request_spec = {
'volume_properties': volume,
'volume_type': {},
'volume_id': volume.id
}
volume_api.scheduler_rpcapi.extend_volume.assert_called_once_with(
self.context, volume, 3, ["RESERVATION"], request_spec)
# Test direct volume path
limit_check.side_effect = None
reserve.side_effect = None
db.volume_update(self.context, volume.id, {'status': 'available'})
ext_mock = mock.MagicMock(side_effect=exception.ServiceTooOld)
volume_api.volume_rpcapi.extend_volume = mock.MagicMock()
volume_api.scheduler_rpcapi.extend_volume = ext_mock
volume_api.extend(self.context, volume, 3)
volume_api.volume_rpcapi.extend_volume.assert_called_once_with(
self.context, volume, 3, ["RESERVATION"])
# clean up
self.volume.delete_volume(self.context, volume)

View File

@ -1314,8 +1314,32 @@ class API(base.Base):
if reservations is None:
_roll_back_status()
self.volume_rpcapi.extend_volume(context, volume, new_size,
reservations)
volume_type = {}
if volume.volume_type_id:
volume_type = volume_types.get_volume_type(context.elevated(),
volume.volume_type_id)
request_spec = {
'volume_properties': volume,
'volume_type': volume_type,
'volume_id': volume.id
}
try:
self.scheduler_rpcapi.extend_volume(context, volume, new_size,
reservations, request_spec)
except exception.ServiceTooOld as e:
# NOTE(erlon): During rolling upgrades scheduler and volume can
# have different versions. This check makes sure that a new
# version of the volume service won't break.
msg = _LW("Failed to send extend volume request to scheduler. "
"Falling back to old behaviour. This is normal during a "
"live-upgrade. Error: %(e)s")
LOG.warning(msg, {'e': e})
# TODO(erlon): Remove in Pike
self.volume_rpcapi.extend_volume(context, volume, new_size,
reservations)
LOG.info(_LI("Extend volume request issued successfully."),
resource=volume)