Browse Source

Merge "Filter storage protocol in the scheduler" into stable/train

stable/train
Zuul 2 weeks ago
committed by Gerrit Code Review
parent
commit
5cbc47b0c5
5 changed files with 111 additions and 17 deletions
  1. +15
    -9
      manila/scheduler/drivers/filter.py
  2. +0
    -3
      manila/scheduler/filters/capabilities.py
  3. +85
    -5
      manila/tests/scheduler/drivers/test_filter.py
  4. +6
    -0
      manila/tests/scheduler/fakes.py
  5. +5
    -0
      releasenotes/notes/bug-1783736-add-share-proto-filtering-to-the-capabilities-scheduler-d8391183335def9f.yaml

+ 15
- 9
manila/scheduler/drivers/filter.py View File

@ -130,10 +130,9 @@ class FilterScheduler(base.Scheduler):
share_properties = request_spec['share_properties']
share_instance_properties = (request_spec.get(
'share_instance_properties', {}))
share_proto = request_spec.get('share_proto',
share_properties.get('share_proto'))
# Since Manila is using mixed filters from Oslo and it's own, which
# takes 'resource_XX' and 'volume_XX' as input respectively, copying
# 'volume_XX' to 'resource_XX' will make both filters happy.
resource_properties = share_properties.copy()
resource_properties.update(share_instance_properties.copy())
share_type = request_spec.get("share_type", {})
@ -144,18 +143,25 @@ class FilterScheduler(base.Scheduler):
LOG.error(msg)
raise exception.InvalidParameterValue(err=msg)
extra_specs = share_type.get('extra_specs', {})
share_type['extra_specs'] = share_type.get('extra_specs') or {}
if extra_specs:
if share_type['extra_specs']:
for extra_spec_name in share_types.get_boolean_extra_specs():
extra_spec = extra_specs.get(extra_spec_name)
extra_spec = share_type['extra_specs'].get(extra_spec_name)
if extra_spec is not None:
if not extra_spec.startswith("<is>"):
extra_spec = "<is> %s" % extra_spec
share_type['extra_specs'][extra_spec_name] = extra_spec
share_type['extra_specs'][extra_spec_name] = extra_spec
storage_protocol_spec = (
share_type['extra_specs'].get('storage_protocol')
)
if storage_protocol_spec is None and share_proto is not None:
# a host can report multiple protocols as "storage_protocol"
spec_value = "<in> %s" % share_proto
share_type['extra_specs']['storage_protocol'] = spec_value
resource_type = request_spec.get("share_type") or {}
resource_type = share_type
request_spec.update({'resource_properties': resource_properties})
config_options = self._get_configuration_options()


+ 0
- 3
manila/scheduler/filters/capabilities.py View File

@ -38,9 +38,6 @@ class CapabilitiesFilter(base_host.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
"""Return a list of hosts that can create resource_type."""
# Note(zhiteng) Currently only Cinder and Nova are using
# this filter, so the resource type is either instance or
# volume.
resource_type = filter_properties.get('resource_type')
if not self._satisfies_extra_specs(host_state.capabilities,
resource_type):


+ 85
- 5
manila/tests/scheduler/drivers/test_filter.py View File

@ -42,10 +42,11 @@ class FilterSchedulerTestCase(test_base.SchedulerTestCase):
def test___format_filter_properties_active_replica_host_is_provided(self):
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project')
fake_type = {'name': 'NFS'}
request_spec = {
'share_properties': {'project_id': 1, 'size': 1},
'share_instance_properties': {},
'share_type': {'name': 'NFS'},
'share_type': fake_type,
'share_id': ['fake-id1'],
'active_replica_host': 'fake_ar_host',
}
@ -58,20 +59,21 @@ class FilterSchedulerTestCase(test_base.SchedulerTestCase):
retval = sched._format_filter_properties(
fake_context, {}, request_spec)
self.assertDictMatch(fake_type, retval[0]['resource_type'])
self.assertIn('replication_domain', retval[0])
# no "share_proto" was specified in the request_spec
self.assertNotIn('storage_protocol', retval[0])
@ddt.data(True, False)
def test__format_filter_properties_backend_specified_for_replica(
self, has_share_backend_name):
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project')
fake_type = {'name': 'NFS', 'extra_specs': {}}
request_spec = {
'share_properties': {'project_id': 1, 'size': 1},
'share_instance_properties': {},
'share_type': {
'name': 'NFS',
'extra_specs': {},
},
'share_type': fake_type,
'share_id': 'fake-id1',
'active_replica_host': 'fake_ar_host',
}
@ -86,9 +88,37 @@ class FilterSchedulerTestCase(test_base.SchedulerTestCase):
retval = sched._format_filter_properties(
fake_context, {}, request_spec)
self.assertDictMatch(fake_type, retval[0]['resource_type'])
self.assertNotIn('share_backend_name',
retval[0]['share_type']['extra_specs'])
@ddt.data(True, False)
def test__format_filter_properties_storage_protocol_extra_spec_present(
self, spec_present):
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project')
extra_specs_requested = (
{'storage_protocol': 'NFS_CIFS'} if spec_present else {}
)
fake_type = {
'name': 'regalia',
'extra_specs': extra_specs_requested,
}
request_spec = {
'share_properties': {'project_id': 1, 'size': 1},
'share_instance_properties': {},
'share_proto': 'CEPHFS',
'share_type': fake_type,
'share_id': 'fake-id1',
}
retval = sched._format_filter_properties(
fake_context, {}, request_spec)[0]
filter_spec = retval['share_type']['extra_specs']['storage_protocol']
expected_spec = 'NFS_CIFS' if spec_present else '<in> CEPHFS'
self.assertEqual(expected_spec, filter_spec)
self.assertDictMatch(fake_type, retval['resource_type'])
def test_create_share_no_hosts(self):
# Ensure empty hosts/child_zones result in NoValidHosts exception.
sched = fakes.FakeFilterScheduler()
@ -235,6 +265,56 @@ class FilterSchedulerTestCase(test_base.SchedulerTestCase):
fake_context, request_spec, {})
self.assertTrue(_mock_service_get_all_by_topic.called)
@ddt.data({'storage_protocol': 'CEPHFS'},
{'storage_protocol': '<in> CEPHFS'},
{'name': 'foo'})
@mock.patch('manila.db.service_get_all_by_topic')
def test__schedule_share_storage_protocol_not_supported(
self, share_type, _mock_service_get_all_by_topic):
sched = fakes.FakeFilterScheduler()
sched.host_manager = fakes.FakeHostManager()
requested_share_proto = (
share_type.get('storage_protocol', '').strip('<in> ')
or 'MAPRFS'
)
fake_context = context.RequestContext('user', 'project', is_admin=True)
fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic)
request_spec = {
'share_type': share_type,
'share_properties': {'project_id': 1, 'size': 1},
'share_instance_properties': {'project_id': 1, 'size': 1},
'share_proto': requested_share_proto,
}
self.assertRaises(exception.NoValidHost, sched._schedule_share,
fake_context, request_spec, {})
self.assertTrue(_mock_service_get_all_by_topic.called)
@ddt.data({'storage_protocol': 'GLUSTERFS'},
{'storage_protocol': '<in> GLUSTERFS'},
{'name': 'foo'})
@mock.patch('manila.db.service_get_all_by_topic')
def test__schedule_share_valid_storage_protocol(
self, share_type, _mock_service_get_all_by_topic):
sched = fakes.FakeFilterScheduler()
sched.host_manager = fakes.FakeHostManager()
fake_context = context.RequestContext('user', 'project', is_admin=True)
fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic)
request_spec = {
'share_type': share_type,
'share_properties': {'project_id': 1, 'size': 1},
'share_instance_properties': {'project_id': 1, 'size': 1},
'share_proto': 'GLUSTERFS',
}
weighed_host = sched._schedule_share(fake_context, request_spec, {})
self.assertIsNotNone(weighed_host)
self.assertIsNotNone(weighed_host.obj)
self.assertEqual('GLUSTERFS',
getattr(weighed_host.obj, 'storage_protocol'))
self.assertEqual('host6', weighed_host.obj.host.split('#')[0])
self.assertTrue(_mock_service_get_all_by_topic.called)
def _setup_dedupe_fakes(self, extra_specs):
sched = fakes.FakeFilterScheduler()
sched.host_manager = fakes.FakeHostManager()


+ 6
- 0
manila/tests/scheduler/fakes.py View File

@ -230,6 +230,7 @@ class FakeHostManager(host_manager.HostManager):
'create_share_from_snapshot_support': True,
'replication_type': 'writable',
'replication_domain': 'endor',
'storage_protocol': 'NFS_CIFS',
},
'host2': {'total_capacity_gb': 2048,
'free_capacity_gb': 300,
@ -243,6 +244,7 @@ class FakeHostManager(host_manager.HostManager):
'create_share_from_snapshot_support': True,
'replication_type': 'readable',
'replication_domain': 'kashyyyk',
'storage_protocol': 'NFS_CIFS',
},
'host3': {'total_capacity_gb': 512,
'free_capacity_gb': 256,
@ -254,6 +256,7 @@ class FakeHostManager(host_manager.HostManager):
'snapshot_support': True,
'create_share_from_snapshot_support': True,
'timestamp': None,
'storage_protocol': 'NFS_CIFS',
},
'host4': {'total_capacity_gb': 2048,
'free_capacity_gb': 200,
@ -267,6 +270,7 @@ class FakeHostManager(host_manager.HostManager):
'create_share_from_snapshot_support': True,
'replication_type': 'dr',
'replication_domain': 'naboo',
'storage_protocol': 'NFS_CIFS',
},
'host5': {'total_capacity_gb': 2048,
'free_capacity_gb': 500,
@ -279,6 +283,7 @@ class FakeHostManager(host_manager.HostManager):
'snapshot_support': True,
'create_share_from_snapshot_support': True,
'replication_type': None,
'storage_protocol': 'NFS_CIFS',
},
'host6': {'total_capacity_gb': 'unknown',
'free_capacity_gb': 'unknown',
@ -288,6 +293,7 @@ class FakeHostManager(host_manager.HostManager):
'snapshot_support': True,
'create_share_from_snapshot_support': True,
'timestamp': None,
'storage_protocol': 'GLUSTERFS',
},
}


+ 5
- 0
releasenotes/notes/bug-1783736-add-share-proto-filtering-to-the-capabilities-scheduler-d8391183335def9f.yaml View File

@ -0,0 +1,5 @@
---
fixes:
- |
A bug with storage protocol filtering in the scheduler has been fixed.
See `bug <https://launchpad.net/bugs/1783736>`_ for more details.

Loading…
Cancel
Save