Support filter backend based on operation type

During Rocky PTG, we discussed the concept of
'sold out'. In order to fully utilize
current codes, we decided to achieve this via
scheduler filters, cloud vendors can write their own
scheduler filter plugin to disable new resource
creation actions on sold out pools. Therefore, the
only change on cinder framework side is to delivery
'operation' when asking scheduler to filter hosts.

For this first stage, the initial operations are:
1. create_group
2. manage_existing
3. extend_volume
4. create_volume
5. create_snapshot
6. migrate_volume
7. retype_volume
8. manage_existing_snapshot

Partial-Implements: bp support-mark-pool-sold-out
Change-Id: I4f0a14444675ebd0fe6397a5ff2ef9dca62b4453
This commit is contained in:
TommyLike 2018-03-21 17:19:02 +08:00
parent ccbd84439b
commit e1ec4b4c2e
6 changed files with 86 additions and 3 deletions

View File

@ -143,6 +143,7 @@ OBJ_VERSIONS.add('1.32', {'RequestSpec': '1.3'})
OBJ_VERSIONS.add('1.33', {'Volume': '1.8'}) OBJ_VERSIONS.add('1.33', {'Volume': '1.8'})
OBJ_VERSIONS.add('1.34', {'VolumeAttachment': '1.3'}) OBJ_VERSIONS.add('1.34', {'VolumeAttachment': '1.3'})
OBJ_VERSIONS.add('1.35', {'Backup': '1.6', 'BackupImport': '1.6'}) OBJ_VERSIONS.add('1.35', {'Backup': '1.6', 'BackupImport': '1.6'})
OBJ_VERSIONS.add('1.36', {'RequestSpec': '1.4'})
class CinderObjectRegistry(base.VersionedObjectRegistry): class CinderObjectRegistry(base.VersionedObjectRegistry):

View File

@ -26,7 +26,8 @@ class RequestSpec(base.CinderObject, base.CinderObjectDictCompat,
# Version 1.1: Added group_id and group_backend # Version 1.1: Added group_id and group_backend
# Version 1.2 Added ``resource_backend`` # Version 1.2 Added ``resource_backend``
# Version 1.3: Added backup_id # Version 1.3: Added backup_id
VERSION = '1.3' # Version 1.4: Add 'operation'
VERSION = '1.4'
fields = { fields = {
'consistencygroup_id': fields.UUIDField(nullable=True), 'consistencygroup_id': fields.UUIDField(nullable=True),
@ -45,6 +46,7 @@ class RequestSpec(base.CinderObject, base.CinderObjectDictCompat,
'group_backend': fields.StringField(nullable=True), 'group_backend': fields.StringField(nullable=True),
'resource_backend': fields.StringField(nullable=True), 'resource_backend': fields.StringField(nullable=True),
'backup_id': fields.UUIDField(nullable=True), 'backup_id': fields.UUIDField(nullable=True),
'operation': fields.StringField(nullable=True),
} }
obj_extra_fields = ['resource_properties'] obj_extra_fields = ['resource_properties']
@ -96,7 +98,9 @@ class RequestSpec(base.CinderObject, base.CinderObjectDictCompat,
super(RequestSpec, self).obj_make_compatible(primitive, target_version) super(RequestSpec, self).obj_make_compatible(primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version) target_version = versionutils.convert_version_to_tuple(target_version)
added_fields = (((1, 1), ('group_id', 'group_backend')), added_fields = (((1, 1), ('group_id', 'group_backend')),
((1, 2), ('resource_backend'))) ((1, 2), ('resource_backend')),
((1, 3), ('backup_id')),
((1, 4), ('operation')))
for version, remove_fields in added_fields: for version, remove_fields in added_fields:
if target_version < version: if target_version < version:
for obj_field in remove_fields: for obj_field in remove_fields:

View File

@ -62,6 +62,23 @@ QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
def append_operation_type(name=None):
def _decorator(schedule_function):
@six.wraps(schedule_function)
def inject_operation_decorator(*args, **kwargs):
request_spec = kwargs.get('request_spec', None)
request_spec_list = kwargs.get('request_spec_list', None)
if request_spec:
request_spec['operation'] = name or schedule_function.__name__
if request_spec_list:
for rs in request_spec_list:
rs['operation'] = name or schedule_function.__name__
return schedule_function(*args, **kwargs)
return inject_operation_decorator
return _decorator
class SchedulerManager(manager.CleanableManager, manager.Manager): class SchedulerManager(manager.CleanableManager, manager.Manager):
"""Chooses a host to create volumes.""" """Chooses a host to create volumes."""
@ -146,6 +163,7 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
while self._startup_delay and not self.driver.is_ready(): while self._startup_delay and not self.driver.is_ready():
eventlet.sleep(1) eventlet.sleep(1)
@append_operation_type()
def create_group(self, context, group, group_spec=None, def create_group(self, context, group, group_spec=None,
group_filter_properties=None, request_spec_list=None, group_filter_properties=None, request_spec_list=None,
filter_properties_list=None): filter_properties_list=None):
@ -172,6 +190,7 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
group.save() group.save()
@objects.Volume.set_workers @objects.Volume.set_workers
@append_operation_type()
def create_volume(self, context, volume, snapshot_id=None, image_id=None, def create_volume(self, context, volume, snapshot_id=None, image_id=None,
request_spec=None, filter_properties=None, request_spec=None, filter_properties=None,
backup_id=None): backup_id=None):
@ -194,6 +213,7 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
with flow_utils.DynamicLogListener(flow_engine, logger=LOG): with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run() flow_engine.run()
@append_operation_type()
def create_snapshot(self, ctxt, volume, snapshot, backend, def create_snapshot(self, ctxt, volume, snapshot, backend,
request_spec=None, filter_properties=None): request_spec=None, filter_properties=None):
"""Create snapshot for a volume. """Create snapshot for a volume.
@ -230,6 +250,7 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
def request_service_capabilities(self, context): def request_service_capabilities(self, context):
volume_rpcapi.VolumeAPI().publish_service_capabilities(context) volume_rpcapi.VolumeAPI().publish_service_capabilities(context)
@append_operation_type()
def migrate_volume(self, context, volume, backend, force_copy, def migrate_volume(self, context, volume, backend, force_copy,
request_spec, filter_properties): request_spec, filter_properties):
"""Ensure that the backend exists and can accept the volume.""" """Ensure that the backend exists and can accept the volume."""
@ -267,6 +288,7 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
return self.migrate_volume(context, volume, host, force_host_copy, return self.migrate_volume(context, volume, host, force_host_copy,
request_spec, filter_properties) request_spec, filter_properties)
@append_operation_type(name='retype_volume')
def retype(self, context, volume, request_spec, filter_properties=None): def retype(self, context, volume, request_spec, filter_properties=None):
"""Schedule the modification of a volume's type. """Schedule the modification of a volume's type.
@ -321,6 +343,7 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
reservations, reservations,
old_reservations) old_reservations)
@append_operation_type()
def manage_existing(self, context, volume, request_spec, def manage_existing(self, context, volume, request_spec,
filter_properties=None): filter_properties=None):
"""Ensure that the host exists and can accept the volume.""" """Ensure that the host exists and can accept the volume."""
@ -352,6 +375,7 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
volume_rpcapi.VolumeAPI().manage_existing(context, volume, volume_rpcapi.VolumeAPI().manage_existing(context, volume,
request_spec.get('ref')) request_spec.get('ref'))
@append_operation_type()
def manage_existing_snapshot(self, context, volume, snapshot, ref, def manage_existing_snapshot(self, context, volume, snapshot, ref,
request_spec, filter_properties=None): request_spec, filter_properties=None):
"""Ensure that the host exists and can accept the snapshot.""" """Ensure that the host exists and can accept the snapshot."""
@ -383,6 +407,7 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
""" """
return self.driver.get_pools(context, filters) return self.driver.get_pools(context, filters)
@append_operation_type(name='create_group')
def validate_host_capacity(self, context, backend, request_spec, def validate_host_capacity(self, context, backend, request_spec,
filter_properties): filter_properties):
try: try:
@ -398,6 +423,7 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
return False return False
return True return True
@append_operation_type()
def extend_volume(self, context, volume, new_size, reservations, def extend_volume(self, context, volume, new_size, reservations,
request_spec=None, filter_properties=None): request_spec=None, filter_properties=None):

View File

@ -42,7 +42,7 @@ object_data = {
'ManageableVolumeList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'ManageableVolumeList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'QualityOfServiceSpecs': '1.0-0b212e0a86ee99092229874e03207fe8', 'QualityOfServiceSpecs': '1.0-0b212e0a86ee99092229874e03207fe8',
'QualityOfServiceSpecsList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'QualityOfServiceSpecsList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'RequestSpec': '1.3-9510bf37e30fd4c282599a4b2a26675e', 'RequestSpec': '1.4-2f858ebf18fa1dfe00fba7c3ec5cf303',
'Service': '1.6-e881b6b324151dd861e09cdfffcdaccd', 'Service': '1.6-e881b6b324151dd861e09cdfffcdaccd',
'ServiceList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'ServiceList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'Snapshot': '1.5-ac1cdbd5b89588f6a8f44afdf6b8b201', 'Snapshot': '1.5-ac1cdbd5b89588f6a8f44afdf6b8b201',

View File

@ -18,6 +18,7 @@ Tests For Scheduler
""" """
import collections import collections
import copy
from datetime import datetime from datetime import datetime
import ddt import ddt
import mock import mock
@ -138,6 +139,42 @@ class SchedulerManagerTestCase(test.TestCase):
mock_extend.assert_called_once_with( mock_extend.assert_called_once_with(
self.context, volume, 2, 'fake_reservation') self.context, volume, 2, 'fake_reservation')
@ddt.data({'key': 'value'},
objects.RequestSpec(volume_id=fake.VOLUME2_ID))
def test_append_operation_decorator(self, rs):
@manager.append_operation_type()
def _fake_schedule_method1(request_spec=None):
return request_spec
@manager.append_operation_type(name='_fake_schedule_method22')
def _fake_schedule_method2(request_spec=None):
return request_spec
@manager.append_operation_type()
def _fake_schedule_method3(request_spec2=None):
return request_spec2
result1 = _fake_schedule_method1(request_spec=copy.deepcopy(rs))
result2 = _fake_schedule_method2(request_spec=copy.deepcopy(rs))
result3 = _fake_schedule_method3(request_spec2=copy.deepcopy(rs))
self.assertEqual('_fake_schedule_method1', result1['operation'])
self.assertEqual('_fake_schedule_method22', result2['operation'])
self.assertEqual(rs, result3)
@ddt.data([{'key1': 'value1'}, {'key1': 'value2'}],
[objects.RequestSpec(volume_id='fake_volume1'),
objects.RequestSpec(volume_id='fake_volume2')])
def test_append_operation_decorator_with_list(self, rs_list):
@manager.append_operation_type()
def _fake_schedule_method(request_spec_list=None):
return request_spec_list
result1 = _fake_schedule_method(request_spec_list=rs_list)
for rs in result1:
self.assertEqual('_fake_schedule_method', rs['operation'])
@ddt.data('available', 'in-use') @ddt.data('available', 'in-use')
@mock.patch('cinder.scheduler.driver.Scheduler.backend_passes_filters') @mock.patch('cinder.scheduler.driver.Scheduler.backend_passes_filters')
@mock.patch( @mock.patch(

View File

@ -0,0 +1,15 @@
---
features:
- |
Now scheduler plugins are aware of operation type via ``operation`` attribute in
RequestSpec dictionary, plugins can support backend filtering according to backend
status as well as operation type. Current possible values for ``operation`` are:
- create_volume
- extend_volume
- create_snapshot
- retype_volume
- migrate_volume
- manage_existing
- manage_existing_snapshot
- create_group