Filter storage protocol in the scheduler

The share protocol requested was being
ignored by the scheduler and this would
cause shares to get scheduled to hosts
that don't support the specified protocol.

Change-Id: I2e87264865b645781c481383c039fecbfd7c6eb1
Closes-Bug: #1783736
This commit is contained in:
Goutham Pacha Ravi 2021-03-16 23:48:42 -07:00
parent 9b835f03d5
commit f24fff9522
5 changed files with 111 additions and 17 deletions

View File

@ -130,10 +130,9 @@ class FilterScheduler(base.Scheduler):
share_properties = request_spec['share_properties']
share_instance_properties = (request_spec.get(
'share_instance_properties', {}))
share_proto = request_spec.get('share_proto',
share_properties.get('share_proto'))
# Since Manila is using mixed filters from Oslo and it's own, which
# takes 'resource_XX' and 'volume_XX' as input respectively, copying
# 'volume_XX' to 'resource_XX' will make both filters happy.
resource_properties = share_properties.copy()
resource_properties.update(share_instance_properties.copy())
share_type = request_spec.get("share_type", {})
@ -144,18 +143,25 @@ class FilterScheduler(base.Scheduler):
LOG.error(msg)
raise exception.InvalidParameterValue(err=msg)
extra_specs = share_type.get('extra_specs', {})
share_type['extra_specs'] = share_type.get('extra_specs') or {}
if extra_specs:
if share_type['extra_specs']:
for extra_spec_name in share_types.get_boolean_extra_specs():
extra_spec = extra_specs.get(extra_spec_name)
extra_spec = share_type['extra_specs'].get(extra_spec_name)
if extra_spec is not None:
if not extra_spec.startswith("<is>"):
extra_spec = "<is> %s" % extra_spec
share_type['extra_specs'][extra_spec_name] = extra_spec
resource_type = request_spec.get("share_type") or {}
storage_protocol_spec = (
share_type['extra_specs'].get('storage_protocol')
)
if storage_protocol_spec is None and share_proto is not None:
# a host can report multiple protocols as "storage_protocol"
spec_value = "<in> %s" % share_proto
share_type['extra_specs']['storage_protocol'] = spec_value
resource_type = share_type
request_spec.update({'resource_properties': resource_properties})
config_options = self._get_configuration_options()

View File

@ -38,9 +38,6 @@ class CapabilitiesFilter(base_host.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
"""Return a list of hosts that can create resource_type."""
# Note(zhiteng) Currently only Cinder and Nova are using
# this filter, so the resource type is either instance or
# volume.
resource_type = filter_properties.get('resource_type')
if not self._satisfies_extra_specs(host_state.capabilities,
resource_type):

View File

@ -43,10 +43,11 @@ class FilterSchedulerTestCase(test_base.SchedulerTestCase):
def test___format_filter_properties_active_replica_host_is_provided(self):
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project')
fake_type = {'name': 'NFS'}
request_spec = {
'share_properties': {'project_id': 1, 'size': 1},
'share_instance_properties': {},
'share_type': {'name': 'NFS'},
'share_type': fake_type,
'share_id': ['fake-id1'],
'active_replica_host': 'fake_ar_host',
}
@ -59,20 +60,21 @@ class FilterSchedulerTestCase(test_base.SchedulerTestCase):
retval = sched._format_filter_properties(
fake_context, {}, request_spec)
self.assertDictMatch(fake_type, retval[0]['resource_type'])
self.assertIn('replication_domain', retval[0])
# no "share_proto" was specified in the request_spec
self.assertNotIn('storage_protocol', retval[0])
@ddt.data(True, False)
def test__format_filter_properties_backend_specified_for_replica(
self, has_share_backend_name):
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project')
fake_type = {'name': 'NFS', 'extra_specs': {}}
request_spec = {
'share_properties': {'project_id': 1, 'size': 1},
'share_instance_properties': {},
'share_type': {
'name': 'NFS',
'extra_specs': {},
},
'share_type': fake_type,
'share_id': 'fake-id1',
'active_replica_host': 'fake_ar_host',
}
@ -87,9 +89,37 @@ class FilterSchedulerTestCase(test_base.SchedulerTestCase):
retval = sched._format_filter_properties(
fake_context, {}, request_spec)
self.assertDictMatch(fake_type, retval[0]['resource_type'])
self.assertNotIn('share_backend_name',
retval[0]['share_type']['extra_specs'])
@ddt.data(True, False)
def test__format_filter_properties_storage_protocol_extra_spec_present(
self, spec_present):
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project')
extra_specs_requested = (
{'storage_protocol': 'NFS_CIFS'} if spec_present else {}
)
fake_type = {
'name': 'regalia',
'extra_specs': extra_specs_requested,
}
request_spec = {
'share_properties': {'project_id': 1, 'size': 1},
'share_instance_properties': {},
'share_proto': 'CEPHFS',
'share_type': fake_type,
'share_id': 'fake-id1',
}
retval = sched._format_filter_properties(
fake_context, {}, request_spec)[0]
filter_spec = retval['share_type']['extra_specs']['storage_protocol']
expected_spec = 'NFS_CIFS' if spec_present else '<in> CEPHFS'
self.assertEqual(expected_spec, filter_spec)
self.assertDictMatch(fake_type, retval['resource_type'])
def test_create_share_no_hosts(self):
# Ensure empty hosts/child_zones result in NoValidHosts exception.
sched = fakes.FakeFilterScheduler()
@ -236,6 +266,56 @@ class FilterSchedulerTestCase(test_base.SchedulerTestCase):
fake_context, request_spec, {})
self.assertTrue(_mock_service_get_all_by_topic.called)
@ddt.data({'storage_protocol': 'CEPHFS'},
{'storage_protocol': '<in> CEPHFS'},
{'name': 'foo'})
@mock.patch('manila.db.service_get_all_by_topic')
def test__schedule_share_storage_protocol_not_supported(
self, share_type, _mock_service_get_all_by_topic):
sched = fakes.FakeFilterScheduler()
sched.host_manager = fakes.FakeHostManager()
requested_share_proto = (
share_type.get('storage_protocol', '').strip('<in> ')
or 'MAPRFS'
)
fake_context = context.RequestContext('user', 'project', is_admin=True)
fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic)
request_spec = {
'share_type': share_type,
'share_properties': {'project_id': 1, 'size': 1},
'share_instance_properties': {'project_id': 1, 'size': 1},
'share_proto': requested_share_proto,
}
self.assertRaises(exception.NoValidHost, sched._schedule_share,
fake_context, request_spec, {})
self.assertTrue(_mock_service_get_all_by_topic.called)
@ddt.data({'storage_protocol': 'GLUSTERFS'},
{'storage_protocol': '<in> GLUSTERFS'},
{'name': 'foo'})
@mock.patch('manila.db.service_get_all_by_topic')
def test__schedule_share_valid_storage_protocol(
self, share_type, _mock_service_get_all_by_topic):
sched = fakes.FakeFilterScheduler()
sched.host_manager = fakes.FakeHostManager()
fake_context = context.RequestContext('user', 'project', is_admin=True)
fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic)
request_spec = {
'share_type': share_type,
'share_properties': {'project_id': 1, 'size': 1},
'share_instance_properties': {'project_id': 1, 'size': 1},
'share_proto': 'GLUSTERFS',
}
weighed_host = sched._schedule_share(fake_context, request_spec, {})
self.assertIsNotNone(weighed_host)
self.assertIsNotNone(weighed_host.obj)
self.assertEqual('GLUSTERFS',
getattr(weighed_host.obj, 'storage_protocol'))
self.assertEqual('host6', weighed_host.obj.host.split('#')[0])
self.assertTrue(_mock_service_get_all_by_topic.called)
def _setup_dedupe_fakes(self, extra_specs):
sched = fakes.FakeFilterScheduler()
sched.host_manager = fakes.FakeHostManager()

View File

@ -230,6 +230,7 @@ class FakeHostManager(host_manager.HostManager):
'create_share_from_snapshot_support': True,
'replication_type': 'writable',
'replication_domain': 'endor',
'storage_protocol': 'NFS_CIFS',
},
'host2': {'total_capacity_gb': 2048,
'free_capacity_gb': 300,
@ -243,6 +244,7 @@ class FakeHostManager(host_manager.HostManager):
'create_share_from_snapshot_support': True,
'replication_type': 'readable',
'replication_domain': 'kashyyyk',
'storage_protocol': 'NFS_CIFS',
},
'host3': {'total_capacity_gb': 512,
'free_capacity_gb': 256,
@ -254,6 +256,7 @@ class FakeHostManager(host_manager.HostManager):
'snapshot_support': True,
'create_share_from_snapshot_support': True,
'timestamp': None,
'storage_protocol': 'NFS_CIFS',
},
'host4': {'total_capacity_gb': 2048,
'free_capacity_gb': 200,
@ -267,6 +270,7 @@ class FakeHostManager(host_manager.HostManager):
'create_share_from_snapshot_support': True,
'replication_type': 'dr',
'replication_domain': 'naboo',
'storage_protocol': 'NFS_CIFS',
},
'host5': {'total_capacity_gb': 2048,
'free_capacity_gb': 500,
@ -279,6 +283,7 @@ class FakeHostManager(host_manager.HostManager):
'snapshot_support': True,
'create_share_from_snapshot_support': True,
'replication_type': None,
'storage_protocol': 'NFS_CIFS',
},
'host6': {'total_capacity_gb': 'unknown',
'free_capacity_gb': 'unknown',
@ -288,6 +293,7 @@ class FakeHostManager(host_manager.HostManager):
'snapshot_support': True,
'create_share_from_snapshot_support': True,
'timestamp': None,
'storage_protocol': 'GLUSTERFS',
},
}

View File

@ -0,0 +1,5 @@
---
fixes:
- |
A bug with storage protocol filtering in the scheduler has been fixed.
See `bug <https://launchpad.net/bugs/1783736>`_ for more details.