Add an instance-locality filter

Having an instance and an attached volume on the same physical host
(i.e. data locality) can be desirable in some configurations, in order
to achieve high-performance disk I/O.

This patch adds an InstanceLocalityFilter filter that allow users to
request creation of volumes 'local' to an existing instance, without
specifying the hypervisor's hostname, and without any knowledge of the
underlying back-ends.

In order to work:
- At least one physical host should run both nova-compute and
  cinder-volume services.
- The Extended Server Attributes extension needs to be active in Nova
  (this is by default), so that the 'OS-EXT-SRV-ATTR:host' property is
  returned when requesting instance info.
- The user making the call needs to have sufficient rights for the
  property to be returned by Nova. This can be achieved either by
  changing Nova's policy.json (the 'extended_server_attributes' option),
  or by setting an account with privileged rights in Cinder conf.

For example:
  Instance 01234567-89ab-cdef is running in a hypervisor on the physical
  host 'my-host'.

  To create a 42 GB volume in a back-end hosted by 'my-host':
    cinder create --hint local_to_instance=01234567-89ab-cdef 42

Note:
  Currently it is not recommended to allow instance migrations for
  hypervisors where this hint will be used. In case of instance
  migration, a previously locally-created volume will not be
  automatically migrated. Also in case of instance migration during the
  volume's scheduling, the result is unpredictable.

DocImpact: New Cinder scheduler filter
Change-Id: Id428fa2132c1afed424443083645787ee3cb0399
This commit is contained in:
Adrien Vergé 2014-12-05 16:09:10 +01:00
parent ebc819cc52
commit 0269a26f13
7 changed files with 370 additions and 51 deletions

View File

@ -17,13 +17,18 @@ Handles all requests to Nova.
"""
from novaclient import exceptions as nova_exceptions
from novaclient import extension
from novaclient import service_catalog
from novaclient.v1_1 import client as nova_client
from novaclient.v1_1.contrib import assisted_volume_snapshots
from novaclient.v1_1.contrib import list_extensions
from oslo.config import cfg
from requests import exceptions as request_exceptions
from cinder import context as ctx
from cinder.db import base
from cinder import exception
from cinder.openstack.common import log as logging
nova_opts = [
@ -60,8 +65,12 @@ CONF.register_opts(nova_opts)
LOG = logging.getLogger(__name__)
nova_extensions = (assisted_volume_snapshots,
extension.Extension('list_extensions', list_extensions))
def novaclient(context, admin_endpoint=False, privileged_user=False):
def novaclient(context, admin_endpoint=False, privileged_user=False,
timeout=None):
"""Returns a Nova client
@param admin_endpoint: If True, use the admin endpoint template from
@ -69,6 +78,8 @@ def novaclient(context, admin_endpoint=False, privileged_user=False):
@param privileged_user: If True, use the account from configuration
(requires 'os_privileged_user_name', 'os_privileged_user_password' and
'os_privileged_user_tenant' to be set)
@param timeout: Number of seconds to wait for an answer before raising a
Timeout exception (None to disable)
"""
# FIXME: the novaclient ServiceCatalog object is mis-named.
# It actually contains the entire access blob.
@ -119,15 +130,14 @@ def novaclient(context, admin_endpoint=False, privileged_user=False):
LOG.debug('Nova client connection created using URL: %s' % url)
extensions = [assisted_volume_snapshots]
c = nova_client.Client(context.user_id,
context.auth_token,
context.project_name,
auth_url=url,
insecure=CONF.nova_api_insecure,
timeout=timeout,
cacert=CONF.nova_ca_certificates_file,
extensions=extensions)
extensions=nova_extensions)
if not privileged_user:
# noauth extracts user_id:project_id from auth_token
@ -140,6 +150,18 @@ def novaclient(context, admin_endpoint=False, privileged_user=False):
class API(base.Base):
"""API for interacting with novaclient."""
def has_extension(self, context, extension, timeout=None):
try:
client = novaclient(context, timeout=timeout)
# Pylint gives a false positive here because the 'list_extensions'
# method is not explicitly declared. Overriding the error.
# pylint: disable-msg=E1101
nova_exts = client.list_extensions.show_all()
except request_exceptions.Timeout:
raise exception.APITimeout(service='Nova')
return extension in [e.name for e in nova_exts]
def update_server_volume(self, context, server_id, attachment_id,
new_volume_id):
novaclient(context).volumes.update_server_volume(server_id,
@ -159,3 +181,13 @@ class API(base.Base):
nova.assisted_volume_snapshots.delete(
snapshot_id,
delete_info=delete_info)
def get_server(self, context, server_id, privileged_user=False,
timeout=None):
try:
return novaclient(context, privileged_user=privileged_user,
timeout=timeout).servers.get(server_id)
except nova_exceptions.NotFound:
raise exception.ServerNotFound(uuid=server_id)
except request_exceptions.Timeout:
raise exception.APITimeout(service='Nova')

View File

@ -229,6 +229,19 @@ class InvalidUUID(Invalid):
message = _("Expected a uuid but received %(uuid)s.")
class APIException(CinderException):
message = _("Error while requesting %(service)s API.")
def __init__(self, message=None, **kwargs):
if 'service' not in kwargs:
kwargs['service'] = 'unknown'
super(APIException, self).__init__(message, **kwargs)
class APITimeout(APIException):
message = _("Timeout while requesting %(service)s API.")
class NotFound(CinderException):
message = _("Resource could not be found.")
code = 404
@ -290,6 +303,10 @@ class SnapshotNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s could not be found.")
class ServerNotFound(NotFound):
message = _("Instance %(uuid)s could not be found.")
class VolumeIsBusy(CinderException):
message = _("deleting volume %(volume_name)s that has snapshot")

View File

@ -0,0 +1,118 @@
# -*- coding: utf-8 -*-
# Copyright 2014, Adrien Vergé <adrien.verge@numergy.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cinder.compute import nova
from cinder import exception
from cinder.i18n import _, _LW
from cinder.openstack.common import log as logging
from cinder.openstack.common.scheduler import filters
from cinder.openstack.common import uuidutils
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
HINT_KEYWORD = 'local_to_instance'
INSTANCE_HOST_PROP = 'OS-EXT-SRV-ATTR:host'
REQUESTS_TIMEOUT = 5
class InstanceLocalityFilter(filters.BaseHostFilter):
"""Schedule volume on the same host as a given instance.
This filter enables selection of a storage back-end located on the host
where the instance's hypervisor is running. This provides data locality:
the instance and the volume are located on the same physical machine.
In order to work:
- The Extended Server Attributes extension needs to be active in Nova (this
is by default), so that the 'OS-EXT-SRV-ATTR:host' property is returned
when requesting instance info.
- Either an account with privileged rights for Nova must be configured in
Cinder configuration (see 'os_privileged_user_name'), or the user making
the call needs to have sufficient rights (see
'extended_server_attributes' in Nova policy).
"""
def __init__(self):
# Cache Nova API answers directly into the Filter object.
# Since a BaseHostFilter instance lives only during the volume's
# scheduling, the cache is re-created for every new volume creation.
self._cache = {}
super(InstanceLocalityFilter, self).__init__()
def _nova_has_extended_server_attributes(self, context):
"""Check Extended Server Attributes presence
Find out whether the Extended Server Attributes extension is activated
in Nova or not. Cache the result to query Nova only once.
"""
if not hasattr(self, '_nova_ext_srv_attr'):
self._nova_ext_srv_attr = nova.API().has_extension(
context, 'ExtendedServerAttributes', timeout=REQUESTS_TIMEOUT)
return self._nova_ext_srv_attr
def host_passes(self, host_state, filter_properties):
context = filter_properties['context']
host = volume_utils.extract_host(host_state.host, 'host')
scheduler_hints = filter_properties.get('scheduler_hints') or {}
instance_uuid = scheduler_hints.get(HINT_KEYWORD, None)
# Without 'local_to_instance' hint
if not instance_uuid:
return True
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
# TODO(adrienverge): Currently it is not recommended to allow instance
# migrations for hypervisors where this hint will be used. In case of
# instance migration, a previously locally-created volume will not be
# automatically migrated. Also in case of instance migration during the
# volume's scheduling, the result is unpredictable. A future
# enhancement would be to subscribe to Nova migration events (e.g. via
# Ceilometer).
# First, lookup for already-known information in local cache
if instance_uuid in self._cache:
return self._cache[instance_uuid] == host
if not self._nova_has_extended_server_attributes(context):
LOG.warning(_LW('Hint "%s" dropped because '
'ExtendedServerAttributes not active in Nova.'),
HINT_KEYWORD)
raise exception.CinderException(_('Hint "%s" not supported.') %
HINT_KEYWORD)
server = nova.API().get_server(context, instance_uuid,
privileged_user=True,
timeout=REQUESTS_TIMEOUT)
if not hasattr(server, INSTANCE_HOST_PROP):
LOG.warning(_LW('Hint "%s" dropped because Nova did not return '
'enough information. Either Nova policy needs to '
'be changed or a privileged account for Nova '
'should be specified in conf.'), HINT_KEYWORD)
raise exception.CinderException(_('Hint "%s" not supported.') %
HINT_KEYWORD)
self._cache[instance_uuid] = getattr(server, INSTANCE_HOST_PROP)
# Match if given instance is hosted on host
return self._cache[instance_uuid] == host

View File

@ -15,7 +15,6 @@
import contextlib
import mock
from novaclient.v1_1.contrib import assisted_volume_snapshots
from cinder.compute import nova
from cinder import context
@ -47,8 +46,8 @@ class NovaClientTestCase(test.TestCase):
p_client.assert_called_once_with(
'regularuser', 'token', None,
auth_url='http://novahost:8774/v2/e3f0833dc08b4cea',
insecure=False, cacert=None,
extensions=[assisted_volume_snapshots])
insecure=False, cacert=None, timeout=None,
extensions=nova.nova_extensions)
@mock.patch('novaclient.v1_1.client.Client')
def test_nova_client_admin_endpoint(self, p_client):
@ -56,8 +55,8 @@ class NovaClientTestCase(test.TestCase):
p_client.assert_called_once_with(
'regularuser', 'token', None,
auth_url='http://novaadmhost:4778/v2/e3f0833dc08b4cea',
insecure=False, cacert=None,
extensions=[assisted_volume_snapshots])
insecure=False, cacert=None, timeout=None,
extensions=nova.nova_extensions)
@mock.patch('novaclient.v1_1.client.Client')
def test_nova_client_privileged_user(self, p_client):
@ -65,8 +64,8 @@ class NovaClientTestCase(test.TestCase):
p_client.assert_called_once_with(
'adminuser', 'strongpassword', None,
auth_url='http://keystonehost:5000/v2.0',
insecure=False, cacert=None,
extensions=[assisted_volume_snapshots])
insecure=False, cacert=None, timeout=None,
extensions=nova.nova_extensions)
class FakeNovaClient(object):

View File

@ -18,6 +18,7 @@ Fakes For Scheduler tests.
from oslo.utils import timeutils
from cinder.openstack.common import uuidutils
from cinder.scheduler import filter_scheduler
from cinder.scheduler import host_manager
@ -73,6 +74,55 @@ class FakeHostState(host_manager.HostState):
setattr(self, key, val)
class FakeNovaClient(object):
class Server(object):
def __init__(self, host):
self.uuid = uuidutils.generate_uuid()
self.host = host
setattr(self, 'OS-EXT-SRV-ATTR:host', host)
class ServerManager(object):
def __init__(self):
self._servers = []
def create(self, host):
self._servers.append(FakeNovaClient.Server(host))
return self._servers[-1].uuid
def get(self, server_uuid):
for s in self._servers:
if s.uuid == server_uuid:
return s
return None
def list(self, detailed=True, search_opts=None):
matching = list(self._servers)
if search_opts:
for opt, val in search_opts.iteritems():
matching = [m for m in matching
if getattr(m, opt, None) == val]
return matching
class ListExtResource(object):
def __init__(self, ext_name):
self.name = ext_name
class ListExtManager(object):
def __init__(self, ext_srv_attr=True):
self.ext_srv_attr = ext_srv_attr
def show_all(self):
if self.ext_srv_attr:
return [
FakeNovaClient.ListExtResource('ExtendedServerAttributes')]
return []
def __init__(self, ext_srv_attr=True):
self.servers = FakeNovaClient.ServerManager()
self.list_extensions = FakeNovaClient.ListExtManager(
ext_srv_attr=ext_srv_attr)
def mock_host_manager_db_calls(mock_obj, disabled=None):
services = [
dict(id=1, host='host1', topic='volume', disabled=False,

View File

@ -17,9 +17,12 @@ Tests For Scheduler Host Filters.
import mock
from oslo.serialization import jsonutils
from requests import exceptions as request_exceptions
from cinder.compute import nova
from cinder import context
from cinder import db
from cinder import exception
from cinder.openstack.common.scheduler import filters
from cinder import test
from cinder.tests.scheduler import fakes
@ -32,10 +35,6 @@ class HostFiltersTestCase(test.TestCase):
def setUp(self):
super(HostFiltersTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.json_query = jsonutils.dumps(
['and',
['>=', '$free_capacity_gb', 1024],
['>=', '$total_capacity_gb', 10 * 1024]])
# This has a side effect of testing 'get_filter_classes'
# when specifying a method (in this case, our standard filters)
filter_handler = filters.HostFilterHandler('cinder.scheduler.filters')
@ -44,8 +43,17 @@ class HostFiltersTestCase(test.TestCase):
for cls in classes:
self.class_map[cls.__name__] = cls
class CapacityFilterTestCase(HostFiltersTestCase):
def setUp(self):
super(CapacityFilterTestCase, self).setUp()
self.json_query = jsonutils.dumps(
['and',
['>=', '$free_capacity_gb', 1024],
['>=', '$total_capacity_gb', 10 * 1024]])
@mock.patch('cinder.utils.service_is_up')
def test_capacity_filter_passes(self, _mock_serv_is_up):
def test_filter_passes(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
@ -57,7 +65,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_capacity_filter_current_host_passes(self, _mock_serv_is_up):
def test_filter_current_host_passes(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100, 'vol_exists_on': 'host1'}
@ -69,7 +77,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_capacity_filter_fails(self, _mock_serv_is_up):
def test_filter_fails(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
@ -82,7 +90,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_capacity_filter_passes_infinite(self, _mock_serv_is_up):
def test_filter_passes_infinite(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
@ -94,7 +102,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_capacity_filter_passes_unknown(self, _mock_serv_is_up):
def test_filter_passes_unknown(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
@ -105,8 +113,10 @@ class HostFiltersTestCase(test.TestCase):
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
class AffinityFilterTestCase(HostFiltersTestCase):
@mock.patch('cinder.utils.service_is_up')
def test_affinity_different_filter_passes(self, _mock_serv_is_up):
def test_different_filter_passes(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['DifferentBackendFilter']()
service = {'disabled': False}
@ -124,7 +134,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.utils.service_is_up')
def test_affinity_different_filter_legacy_volume_hint_passes(
def test_different_filter_legacy_volume_hint_passes(
self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['DifferentBackendFilter']()
@ -142,7 +152,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_different_filter_non_list_fails(self):
def test_different_filter_non_list_fails(self):
filt_cls = self.class_map['DifferentBackendFilter']()
host = fakes.FakeHostState('host2', {})
volume = utils.create_volume(self.context, host='host2')
@ -154,7 +164,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_affinity_different_filter_fails(self):
def test_different_filter_fails(self):
filt_cls = self.class_map['DifferentBackendFilter']()
host = fakes.FakeHostState('host1', {})
volume = utils.create_volume(self.context, host='host1')
@ -166,7 +176,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_affinity_different_filter_handles_none(self):
def test_different_filter_handles_none(self):
filt_cls = self.class_map['DifferentBackendFilter']()
host = fakes.FakeHostState('host1', {})
@ -175,7 +185,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_different_filter_handles_deleted_instance(self):
def test_different_filter_handles_deleted_instance(self):
filt_cls = self.class_map['DifferentBackendFilter']()
host = fakes.FakeHostState('host1', {})
volume = utils.create_volume(self.context, host='host1')
@ -188,7 +198,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_different_filter_fail_nonuuid_hint(self):
def test_different_filter_fail_nonuuid_hint(self):
filt_cls = self.class_map['DifferentBackendFilter']()
host = fakes.FakeHostState('host1', {})
@ -198,7 +208,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_affinity_different_filter_handles_multiple_uuids(self):
def test_different_filter_handles_multiple_uuids(self):
filt_cls = self.class_map['DifferentBackendFilter']()
host = fakes.FakeHostState('host1#pool0', {})
volume1 = utils.create_volume(self.context, host='host1:pool1')
@ -212,7 +222,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_different_filter_handles_invalid_uuids(self):
def test_different_filter_handles_invalid_uuids(self):
filt_cls = self.class_map['DifferentBackendFilter']()
host = fakes.FakeHostState('host1', {})
volume = utils.create_volume(self.context, host='host2')
@ -224,7 +234,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_no_list_passes(self):
def test_same_filter_no_list_passes(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1', {})
volume = utils.create_volume(self.context, host='host1')
@ -236,7 +246,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_passes(self):
def test_same_filter_passes(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1#pool0', {})
volume = utils.create_volume(self.context, host='host1#pool0')
@ -248,7 +258,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_legacy_vol_fails(self):
def test_same_filter_legacy_vol_fails(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1#pool0', {})
volume = utils.create_volume(self.context, host='host1')
@ -260,7 +270,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_fails(self):
def test_same_filter_fails(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1#pool0', {})
volume = utils.create_volume(self.context, host='host1#pool1')
@ -272,7 +282,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_vol_list_pass(self):
def test_same_filter_vol_list_pass(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1', {})
volume1 = utils.create_volume(self.context, host='host1')
@ -286,7 +296,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_handles_none(self):
def test_same_filter_handles_none(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1', {})
@ -295,7 +305,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_handles_deleted_instance(self):
def test_same_filter_handles_deleted_instance(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1', {})
volume = utils.create_volume(self.context, host='host2')
@ -308,7 +318,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_fail_nonuuid_hint(self):
def test_same_filter_fail_nonuuid_hint(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1', {})
@ -318,7 +328,9 @@ class HostFiltersTestCase(test.TestCase):
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_driver_filter_passing_function(self):
class DriverFilterTestCase(HostFiltersTestCase):
def test_passing_function(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
@ -338,7 +350,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
def test_driver_filter_failing_function(self):
def test_failing_function(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
@ -358,7 +370,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertFalse(filt_cls.host_passes(host1, filter_properties))
def test_driver_filter_no_filter_function(self):
def test_no_filter_function(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
@ -378,7 +390,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
def test_driver_filter_not_implemented(self):
def test_not_implemented(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
@ -396,7 +408,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
def test_driver_filter_no_volume_extra_specs(self):
def test_no_volume_extra_specs(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
@ -410,7 +422,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
def test_driver_filter_volume_backend_name_different(self):
def test_volume_backend_name_different(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
@ -430,7 +442,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertFalse(filt_cls.host_passes(host1, filter_properties))
def test_driver_filter_function_extra_spec_replacement(self):
def test_function_extra_spec_replacement(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
@ -451,7 +463,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
def test_driver_filter_function_stats_replacement(self):
def test_function_stats_replacement(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
@ -472,7 +484,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
def test_driver_filter_function_volume_replacement(self):
def test_function_volume_replacement(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
@ -497,7 +509,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
def test_driver_filter_function_qos_spec_replacement(self):
def test_function_qos_spec_replacement(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
@ -520,7 +532,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
def test_driver_filter_function_exception_caught(self):
def test_function_exception_caught(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
@ -540,7 +552,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertFalse(filt_cls.host_passes(host1, filter_properties))
def test_driver_filter_function_empty_qos(self):
def test_function_empty_qos(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
@ -561,7 +573,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertFalse(filt_cls.host_passes(host1, filter_properties))
def test_driver_filter_capabilities(self):
def test_capabilities(self):
filt_cls = self.class_map['DriverFilter']()
host1 = fakes.FakeHostState(
'host1', {
@ -581,3 +593,93 @@ class HostFiltersTestCase(test.TestCase):
}
self.assertTrue(filt_cls.host_passes(host1, filter_properties))
class InstanceLocalityFilterTestCase(HostFiltersTestCase):
def setUp(self):
super(InstanceLocalityFilterTestCase, self).setUp()
self.override_config('nova_endpoint_template',
'http://novahost:8774/v2/%(project_id)s')
self.context.service_catalog = \
[{'type': 'compute', 'name': 'nova', 'endpoints':
[{'publicURL': 'http://novahost:8774/v2/e3f0833dc08b4cea'}]},
{'type': 'identity', 'name': 'keystone', 'endpoints':
[{'publicURL': 'http://keystonehost:5000/v2.0'}]}]
@mock.patch('cinder.compute.nova.novaclient')
def test_same_host(self, _mock_novaclient):
_mock_novaclient.return_value = fakes.FakeNovaClient()
filt_cls = self.class_map['InstanceLocalityFilter']()
host = fakes.FakeHostState('host1', {})
uuid = nova.novaclient().servers.create('host1')
filter_properties = {'context': self.context,
'scheduler_hints': {'local_to_instance': uuid}}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('cinder.compute.nova.novaclient')
def test_different_host(self, _mock_novaclient):
_mock_novaclient.return_value = fakes.FakeNovaClient()
filt_cls = self.class_map['InstanceLocalityFilter']()
host = fakes.FakeHostState('host1', {})
uuid = nova.novaclient().servers.create('host2')
filter_properties = {'context': self.context,
'scheduler_hints': {'local_to_instance': uuid}}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_handles_none(self):
filt_cls = self.class_map['InstanceLocalityFilter']()
host = fakes.FakeHostState('host1', {})
filter_properties = {'context': self.context,
'scheduler_hints': None}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_invalid_uuid(self):
filt_cls = self.class_map['InstanceLocalityFilter']()
host = fakes.FakeHostState('host1', {})
filter_properties = {'context': self.context,
'scheduler_hints':
{'local_to_instance': 'e29b11d4-not-valid-a716'}}
self.assertRaises(exception.InvalidUUID,
filt_cls.host_passes, host, filter_properties)
@mock.patch('cinder.compute.nova.novaclient')
def test_nova_no_extended_server_attributes(self, _mock_novaclient):
_mock_novaclient.return_value = fakes.FakeNovaClient(
ext_srv_attr=False)
filt_cls = self.class_map['InstanceLocalityFilter']()
host = fakes.FakeHostState('host1', {})
uuid = nova.novaclient().servers.create('host1')
filter_properties = {'context': self.context,
'scheduler_hints': {'local_to_instance': uuid}}
self.assertRaises(exception.CinderException,
filt_cls.host_passes, host, filter_properties)
@mock.patch('cinder.compute.nova.novaclient')
def test_nova_down_does_not_alter_other_filters(self, _mock_novaclient):
# Simulate Nova API is not available
_mock_novaclient.side_effect = Exception
filt_cls = self.class_map['InstanceLocalityFilter']()
host = fakes.FakeHostState('host1', {})
filter_properties = {'context': self.context, 'size': 100}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@mock.patch('requests.request')
def test_nova_timeout(self, _mock_request):
# Simulate a HTTP timeout
_mock_request.side_effect = request_exceptions.Timeout
filt_cls = self.class_map['InstanceLocalityFilter']()
host = fakes.FakeHostState('host1', {})
filter_properties = \
{'context': self.context, 'scheduler_hints':
{'local_to_instance': 'e29b11d4-15ef-34a9-a716-598a6f0b5467'}}
self.assertRaises(exception.APITimeout,
filt_cls.host_passes, host, filter_properties)

View File

@ -35,6 +35,7 @@ cinder.scheduler.filters =
JsonFilter = cinder.openstack.common.scheduler.filters.json_filter:JsonFilter
RetryFilter = cinder.openstack.common.scheduler.filters.ignore_attempted_hosts_filter:IgnoreAttemptedHostsFilter
SameBackendFilter = cinder.scheduler.filters.affinity_filter:SameBackendFilter
InstanceLocalityFilter = cinder.scheduler.filters.instance_locality_filter:InstanceLocalityFilter
cinder.scheduler.weights =
AllocatedCapacityWeigher = cinder.scheduler.weights.capacity:AllocatedCapacityWeigher
CapacityWeigher = cinder.scheduler.weights.capacity:CapacityWeigher