Merge "Scheduler enhancements for Share Replication"

This commit is contained in:
Jenkins 2016-02-26 16:20:46 +00:00 committed by Gerrit Code Review
commit ba22e27042
17 changed files with 302 additions and 6 deletions

View File

@ -181,6 +181,18 @@ class FilterScheduler(base.Scheduler):
if cg_host:
cg_support = cg_host.consistency_group_support
# NOTE(gouthamr): If 'active_replica_host' is present in the request
# spec, pass that host's 'replication_domain' to the
# ShareReplication filter.
active_replica_host = request_spec.get('active_replica_host')
replication_domain = None
if active_replica_host:
temp_hosts = self.host_manager.get_all_host_states_share(elevated)
ar_host = next((host for host in temp_hosts
if host.host == active_replica_host), None)
if ar_host:
replication_domain = ar_host.replication_domain
if filter_properties is None:
filter_properties = {}
self._populate_retry_share(filter_properties, resource_properties)
@ -192,6 +204,7 @@ class FilterScheduler(base.Scheduler):
'resource_type': resource_type,
'cg_support': cg_support,
'consistency_group': cg,
'replication_domain': replication_domain,
})
self.populate_filter_properties_share(request_spec, filter_properties)

View File

@ -0,0 +1,75 @@
# Copyright (c) 2016 Goutham Pacha Ravi
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from manila.scheduler.filters import base_host
LOG = log.getLogger(__name__)
class ShareReplicationFilter(base_host.BaseHostFilter):
"""ShareReplicationFilter filters hosts based on replication support."""
def host_passes(self, host_state, filter_properties):
"""Return True if 'active' replica's host can replicate with host.
Design of this filter:
- Share replication is symmetric. All backends that can
replicate between each other must share the same
'replication_domain'.
- For scheduling a share that can be replicated in the future,
this filter checks for 'replication_domain' capability.
- For scheduling a replica, it checks for the
'replication_domain' compatibility.
"""
active_replica_host = filter_properties.get('request_spec', {}).get(
'active_replica_host')
replication_type = filter_properties.get('resource_type', {}).get(
'extra_specs', {}).get('replication_type')
active_replica_replication_domain = filter_properties.get(
'replication_domain')
host_replication_domain = host_state.replication_domain
if replication_type is None:
# NOTE(gouthamr): You're probably not creating a replicated
# share or a replica, then this host obviously passes. Also,
# avoid creating a replica on the same host.
return True
elif host_replication_domain is None:
msg = "Replication is not enabled on host %s."
LOG.debug(msg, host_state.host)
return False
elif active_replica_host is None:
# 'replication_type' filtering will be handled by the
# capabilities filter, since it is a share-type extra-spec.
return True
# Scheduler filtering by replication_domain for a replica
if active_replica_replication_domain != host_replication_domain:
msg = ("The replication domain of Host %(host)s is "
"'%(host_domain)s' and it does not match the replication "
"domain of the 'active' replica's host: "
"%(active_replica_host)s, which is '%(arh_domain)s'. ")
kwargs = {
"host": host_state.host,
"host_domain": host_replication_domain,
"active_replica_host": active_replica_host,
"arh_domain": active_replica_replication_domain,
}
LOG.debug(msg, kwargs)
return False
return True

View File

@ -46,6 +46,7 @@ host_manager_opts = [
'CapacityFilter',
'CapabilitiesFilter',
'ConsistencyGroupFilter',
'ShareReplicationFilter',
],
help='Which filter class names to use for filtering hosts '
'when not specified in the request.'),
@ -128,6 +129,7 @@ class HostState(object):
self.dedupe = False
self.compression = False
self.replication_type = None
self.replication_domain = None
# PoolState for all pools
self.pools = {}
@ -296,6 +298,9 @@ class HostState(object):
if not pool_cap.get('replication_type'):
pool_cap['replication_type'] = self.replication_type
if not pool_cap.get('replication_domain'):
pool_cap['replication_domain'] = self.replication_domain
def update_backend(self, capability):
self.share_backend_name = capability.get('share_backend_name')
self.vendor_name = capability.get('vendor_name')
@ -308,6 +313,7 @@ class HostState(object):
'consistency_group_support', False)
self.updated = capability['timestamp']
self.replication_type = capability.get('replication_type')
self.replication_domain = capability.get('replication_domain')
def consume_from_share(self, share):
"""Incrementally update host state from an share."""
@ -372,6 +378,8 @@ class PoolState(HostState):
'compression', False)
self.replication_type = capability.get(
'replication_type', self.replication_type)
self.replication_domain = capability.get(
'replication_domain')
def update_pools(self, capability):
# Do nothing, since we don't have pools within pool, yet

View File

@ -375,8 +375,10 @@ class API(base.Base):
self._check_is_share_busy(share)
if not self.db.share_replicas_get_available_active_replica(
context, share['id']):
active_replica = self.db.share_replicas_get_available_active_replica(
context, share['id'])
if not active_replica:
msg = _("Share %s does not have any active replica in available "
"state.")
raise exception.ReplicationException(reason=msg % share['id'])
@ -386,6 +388,8 @@ class API(base.Base):
context, share, availability_zone=availability_zone,
share_network_id=share_network_id))
request_spec['active_replica_host'] = active_replica['host']
self.db.share_replica_update(
context, share_replica['id'],
{'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC})

View File

@ -117,6 +117,16 @@ share_opts = [
"define network plugin config options in some separate config "
"group and set its name here. Used only with another "
"option 'driver_handles_share_servers' set to 'True'."),
# Replication option/s
cfg.StrOpt(
"replication_domain",
default=None,
help="A string specifying the replication domain that the backend "
"belongs to. This option needs to be specified the same in the "
"configuration sections of all backends that support "
"replication between each other. If this option is not "
"specified in the group, it means that replication is not "
"enabled on the backend."),
]
ssh_opts = [
@ -275,6 +285,12 @@ class ShareDriver(object):
return self.configuration.safe_get('driver_handles_share_servers')
return CONF.driver_handles_share_servers
@property
def replication_domain(self):
if self.configuration:
return self.configuration.safe_get('replication_domain')
return CONF.replication_domain
def _verify_share_server_handling(self, driver_handles_share_servers):
"""Verifies driver_handles_share_servers and given configuration."""
if not isinstance(self.driver_handles_share_servers, bool):
@ -899,6 +915,7 @@ class ShareDriver(object):
qos=False,
pools=self.pools or None,
snapshot_support=self.snapshots_are_supported,
replication_domain=self.replication_domain,
)
if isinstance(data, dict):
common.update(data)

View File

@ -32,8 +32,8 @@ def extract_host(host, level='backend', use_default_pool_name=False):
:param host: String for host, which could include host@backend#pool info
:param level: Indicate which level of information should be extracted
from host string. Level can be 'host', 'backend' or 'pool',
default value is 'backend'
from host string. Level can be 'host', 'backend', 'pool',
or 'backend_name', default value is 'backend'
:param use_default_pool_name: This flag specifies what to do
if level == 'pool' and there is no 'pool' info
encoded in host string. default_pool_name=True
@ -50,7 +50,8 @@ def extract_host(host, level='backend', use_default_pool_name=False):
# ret is 'HostA@BackendB'
ret = extract_host(host, 'pool')
# ret is 'PoolC'
ret = extract_host(host, 'backend_name')
# ret is 'BackendB'
host = 'HostX@BackendY'
ret = extract_host(host, 'pool')
# ret is None
@ -61,6 +62,9 @@ def extract_host(host, level='backend', use_default_pool_name=False):
# Make sure pool is not included
hst = host.split('#')[0]
return hst.split('@')[0]
if level == 'backend_name':
hst = host.split('#')[0]
return hst.split('@')[1]
elif level == 'backend':
return host.split('#')[0]
elif level == 'pool':

View File

@ -39,6 +39,27 @@ class FilterSchedulerTestCase(test_base.SchedulerTestCase):
driver_cls = filter.FilterScheduler
def test___format_filter_properties_active_replica_host_is_provided(self):
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project')
request_spec = {
'share_properties': {'project_id': 1, 'size': 1},
'share_instance_properties': {},
'share_type': {'name': 'NFS'},
'share_id': ['fake-id1'],
'active_replica_host': 'fake_ar_host',
}
hosts = [fakes.FakeHostState(host, {'replication_domain': 'xyzzy'})
for host in ('fake_ar_host', 'fake_host_2')]
self.mock_object(sched.host_manager, 'get_all_host_states_share',
mock.Mock(return_value=hosts))
self.mock_object(sched, 'populate_filter_properties_share')
retval = sched._format_filter_properties(
fake_context, {}, request_spec)
self.assertTrue('replication_domain' in retval[0])
def test_create_share_no_hosts(self):
# Ensure empty hosts/child_zones result in NoValidHosts exception.
sched = fakes.FakeFilterScheduler()

View File

@ -193,6 +193,7 @@ class FakeHostManager(host_manager.HostManager):
'timestamp': None,
'snapshot_support': True,
'replication_type': 'writable',
'replication_domain': 'endor',
},
'host2': {'total_capacity_gb': 2048,
'free_capacity_gb': 300,
@ -204,6 +205,7 @@ class FakeHostManager(host_manager.HostManager):
'timestamp': None,
'snapshot_support': True,
'replication_type': 'readable',
'replication_domain': 'kashyyyk',
},
'host3': {'total_capacity_gb': 512,
'free_capacity_gb': 256,
@ -226,6 +228,7 @@ class FakeHostManager(host_manager.HostManager):
'timestamp': None,
'snapshot_support': True,
'replication_type': 'dr',
'replication_domain': 'naboo',
},
'host5': {'total_capacity_gb': 2048,
'free_capacity_gb': 500,
@ -256,6 +259,10 @@ class FakeHostState(host_manager.HostState):
for (key, val) in attribute_dict.items():
setattr(self, key, val)
FAKE_HOST_STRING_1 = 'openstack@BackendA#PoolX'
FAKE_HOST_STRING_2 = 'openstack@BackendB#PoolY'
FAKE_HOST_STRING_3 = 'openstack@BackendC#PoolZ'
def mock_host_manager_db_calls(mock_obj, disabled=None):
services = [

View File

@ -0,0 +1,116 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for the ShareReplicationFilter.
"""
import ddt
from oslo_context import context
from manila.scheduler.filters import share_replication
from manila import test
from manila.tests.scheduler import fakes
@ddt.ddt
class ShareReplicationFilterTestCase(test.TestCase):
"""Test case for ShareReplicationFilter."""
def setUp(self):
super(ShareReplicationFilterTestCase, self).setUp()
self.filter = share_replication.ShareReplicationFilter()
self.debug_log = self.mock_object(share_replication.LOG, 'debug')
@staticmethod
def _create_replica_request(replication_domain='kashyyyk',
replication_type='dr',
active_replica_host=fakes.FAKE_HOST_STRING_1,
is_admin=False):
ctxt = context.RequestContext('fake', 'fake', is_admin=is_admin)
return {
'context': ctxt,
'request_spec': {
'active_replica_host': active_replica_host,
},
'resource_type': {
'extra_specs': {
'replication_type': replication_type,
},
},
'replication_domain': replication_domain,
}
@ddt.data('tatooine', '')
def test_share_replication_filter_fails_incompatible_domain(self, domain):
request = self._create_replica_request()
host = fakes.FakeHostState('host1',
{
'replication_domain': domain,
})
self.assertFalse(self.filter.host_passes(host, request))
self.assertTrue(self.debug_log.called)
def test_share_replication_filter_fails_no_replication_domain(self):
request = self._create_replica_request()
host = fakes.FakeHostState('host1',
{
'replication_domain': None,
})
self.assertFalse(self.filter.host_passes(host, request))
self.assertTrue(self.debug_log.called)
def test_share_replication_filter_passes_no_replication_type(self):
request = self._create_replica_request(replication_type=None)
host = fakes.FakeHostState('host1',
{
'replication_domain': 'tatooine',
})
self.assertTrue(self.filter.host_passes(host, request))
def test_share_replication_filter_passes_no_active_replica_host(self):
request = self._create_replica_request(active_replica_host=None)
host = fakes.FakeHostState('host1',
{
'replication_domain': 'tatooine',
})
self.assertTrue(self.filter.host_passes(host, request))
def test_share_replication_filter_passes_happy_day(self):
request = self._create_replica_request()
host = fakes.FakeHostState('host1',
{
'replication_domain': 'kashyyyk',
})
self.assertTrue(self.filter.host_passes(host, request))
def test_share_replication_filter_empty(self):
request = {}
host = fakes.FakeHostState('host1',
{
'replication_domain': 'naboo',
})
self.assertTrue(self.filter.host_passes(host, request))

View File

@ -200,6 +200,7 @@ class HostManagerTestCase(test.TestCase):
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
},
}, {
'name': 'host2@back1#BBB',
@ -224,6 +225,7 @@ class HostManagerTestCase(test.TestCase):
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
},
}, {
'name': 'host2@back2#CCC',
@ -248,6 +250,7 @@ class HostManagerTestCase(test.TestCase):
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
},
},
]
@ -294,6 +297,7 @@ class HostManagerTestCase(test.TestCase):
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
},
}, {
'name': 'host2@BBB#pool2',
@ -319,6 +323,7 @@ class HostManagerTestCase(test.TestCase):
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
},
}, {
'name': 'host3@CCC#pool3',
@ -344,6 +349,7 @@ class HostManagerTestCase(test.TestCase):
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
},
}, {
'name': 'host4@DDD#pool4a',
@ -369,6 +375,7 @@ class HostManagerTestCase(test.TestCase):
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
},
}, {
'name': 'host4@DDD#pool4b',
@ -394,6 +401,7 @@ class HostManagerTestCase(test.TestCase):
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
},
},
]
@ -452,6 +460,7 @@ class HostManagerTestCase(test.TestCase):
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
},
}, {
'name': 'host2@back1#BBB',
@ -476,6 +485,7 @@ class HostManagerTestCase(test.TestCase):
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
},
},
]
@ -526,6 +536,7 @@ class HostManagerTestCase(test.TestCase):
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
},
},
]

View File

@ -124,6 +124,7 @@ class EMCShareFrameworkTestCase(test.TestCase):
data['qos'] = False
data['pools'] = None
data['snapshot_support'] = True
data['replication_domain'] = None
self.assertEqual(data, self.driver._stats)
def _fake_safe_get(self, value):

View File

@ -520,6 +520,7 @@ class HPE3ParDriverTestCase(test.TestCase):
'total_capacity_gb': 0,
'vendor_name': 'HPE',
'pools': None,
'replication_domain': None,
}
result = self.driver.get_share_stats(refresh=True)
@ -577,6 +578,7 @@ class HPE3ParDriverTestCase(test.TestCase):
'hpe3par_flash_cache': False,
'hp3par_flash_cache': False,
'snapshot_support': True,
'replication_domain': None,
}
result = self.driver.get_share_stats(refresh=True)
@ -610,6 +612,7 @@ class HPE3ParDriverTestCase(test.TestCase):
'total_capacity_gb': 0,
'vendor_name': 'HPE',
'snapshot_support': True,
'replication_domain': None,
}
result = self.driver.get_share_stats(refresh=True)

View File

@ -722,6 +722,7 @@ class HuaweiShareDriverTestCase(test.TestCase):
self.configuration.huawei_share_backend = 'V3'
self.configuration.max_over_subscription_ratio = 1
self.configuration.driver_handles_share_servers = False
self.configuration.replication_domain = None
self.tmp_dir = tempfile.mkdtemp()
self.fake_conf_file = self.tmp_dir + '/manila_huawei_conf.xml'
@ -2115,6 +2116,7 @@ class HuaweiShareDriverTestCase(test.TestCase):
expected['free_capacity_gb'] = 0.0
expected['qos'] = True
expected["snapshot_support"] = True
expected['replication_domain'] = None
expected["pools"] = []
pool = dict(
pool_name='OpenStack_Pool',

View File

@ -325,6 +325,7 @@ class GlusterfsNativeShareDriverTestCase(test.TestCase):
'free_capacity_gb': 'unknown',
'pools': None,
'snapshot_support': True,
'replication_domain': None,
}
self.assertEqual(test_data, self._driver._stats)

View File

@ -1835,7 +1835,7 @@ class ShareAPITestCase(test.TestCase):
fake_replica = fakes.fake_replica(replica['id'])
fake_request_spec = fakes.fake_replica_request_spec()
self.mock_object(db_api, 'share_replicas_get_available_active_replica',
mock.Mock(return_value='FAKE_ACTIVE_REPLICA'))
mock.Mock(return_value={'host': 'fake_ar_host'}))
self.mock_object(
share_api.API, '_create_share_instance_and_get_request_spec',
mock.Mock(return_value=(fake_request_spec, fake_replica)))

View File

@ -52,6 +52,18 @@ class ShareUtilsTestCase(test.TestCase):
self.assertEqual(
'Host', share_utils.extract_host(host))
def test_extract_host_only_return_backend_name(self):
host = 'Host@Backend#Pool'
self.assertEqual(
'Backend', share_utils.extract_host(host, 'backend_name'))
def test_extract_host_only_return_backend_name_index_error(self):
host = 'Host#Pool'
self.assertRaises(IndexError,
share_utils.extract_host,
host, 'backend_name')
def test_extract_host_missing_backend(self):
host = 'Host#Pool'
self.assertEqual(

View File

@ -40,6 +40,7 @@ manila.scheduler.filters =
IgnoreAttemptedHostsFilter = manila.scheduler.filters.ignore_attempted_hosts:IgnoreAttemptedHostsFilter
JsonFilter = manila.scheduler.filters.json:JsonFilter
RetryFilter = manila.scheduler.filters.retry:RetryFilter
ShareReplicationFilter = manila.scheduler.filters.share_replication:ShareReplicationFilter
manila.scheduler.weighers =
CapacityWeigher = manila.scheduler.weighers.capacity:CapacityWeigher
PoolWeigher = manila.scheduler.weighers.pool:PoolWeigher