Fix create from snapshot and source volume

A regression was introduced in
I970c10f9b50092b659fa2d88bd6a02f6c69899f2
on backends supporting storage pools. By extracting the
'host@backend' out of the host attr from a parent volume,
the scheduler may attempt to put volumes into other pools
belonging to the backend.

Many backends cannot clone across storage pools,
or create storage volumes from snapshots from other
storage pools.

Change-Id: Ic4c8f29bef2c82550d6d6f03f8fa1dc80696f56e
Closes-Bug: #1732557
This commit is contained in:
Goutham Pacha Ravi 2017-11-29 22:13:51 -08:00
parent d414feae98
commit 39816f9ff3
5 changed files with 250 additions and 95 deletions

View File

@ -517,10 +517,15 @@ class FilterScheduler(driver.Scheduler):
# snapshot or volume).
resource_backend = request_spec.get('resource_backend')
if weighed_backends and resource_backend:
resource_backend_has_pool = bool(utils.extract_host(
resource_backend, 'pool'))
# Get host name including host@backend#pool info from
# weighed_backends.
for backend in weighed_backends[::-1]:
backend_id = utils.extract_host(backend.obj.backend_id)
backend_id = (
backend.obj.backend_id if resource_backend_has_pool
else utils.extract_host(backend.obj.backend_id)
)
if backend_id != resource_backend:
weighed_backends.remove(backend)
if not weighed_backends:

View File

@ -15,16 +15,197 @@
"""
Fakes For Scheduler tests.
"""
import copy
from oslo_utils import timeutils
from oslo_utils import uuidutils
from cinder.scheduler import filter_scheduler
from cinder.scheduler import host_manager
from cinder.volume import utils
UTC_NOW = timeutils.utcnow()
SERVICE_STATES = {
'host1': {'total_capacity_gb': 1024,
'free_capacity_gb': 1024,
'allocated_capacity_gb': 0,
'provisioned_capacity_gb': 0,
'max_over_subscription_ratio': 1.0,
'thin_provisioning_support': False,
'thick_provisioning_support': True,
'reserved_percentage': 10,
'volume_backend_name': 'lvm1',
'timestamp': UTC_NOW,
'multiattach': True,
'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'},
'host2': {'total_capacity_gb': 2048,
'free_capacity_gb': 300,
'allocated_capacity_gb': 1748,
'provisioned_capacity_gb': 1748,
'max_over_subscription_ratio': 1.5,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'reserved_percentage': 10,
'volume_backend_name': 'lvm2',
'timestamp': UTC_NOW,
'uuid': '4200b32b-0bf9-436c-86b2-0675f6ac218e'},
'host3': {'total_capacity_gb': 512,
'free_capacity_gb': 256,
'allocated_capacity_gb': 256,
'provisioned_capacity_gb': 256,
'max_over_subscription_ratio': 2.0,
'thin_provisioning_support': False,
'thick_provisioning_support': True,
'reserved_percentage': 0,
'volume_backend_name': 'lvm3',
'timestamp': UTC_NOW,
'uuid': '6d91e7f5-ca17-4e3b-bf4f-19ca77166dd7'},
'host4': {'total_capacity_gb': 2048,
'free_capacity_gb': 200,
'allocated_capacity_gb': 1848,
'provisioned_capacity_gb': 2047,
'max_over_subscription_ratio': 1.0,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'reserved_percentage': 5,
'volume_backend_name': 'lvm4',
'timestamp': UTC_NOW,
'consistent_group_snapshot_enabled': True,
'uuid': '18417850-2ca9-43d1-9619-ae16bfb0f655'},
'host5': {'total_capacity_gb': 'infinite',
'free_capacity_gb': 'unknown',
'allocated_capacity_gb': 1548,
'provisioned_capacity_gb': 1548,
'max_over_subscription_ratio': 1.0,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'reserved_percentage': 5,
'timestamp': UTC_NOW,
'uuid': 'f838f35c-4035-464f-9792-ce60e390c13d'},
}
SERVICE_STATES_WITH_POOLS = {
'host1@BackendA': {
'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824',
'replication_enabled': False,
'driver_version': '1.0.0',
'volume_backend_name': 'BackendA',
'pools': [
{
'total_capacity_gb': 1024,
'free_capacity_gb': 1024,
'allocated_capacity_gb': 0,
'provisioned_capacity_gb': 0,
'max_over_subscription_ratio': 1.0,
'thin_provisioning_support': False,
'thick_provisioning_support': True,
'reserved_percentage': 15,
'pool_name': 'openstack_iscsi_1',
},
{
'total_capacity_gb': 2048,
'free_capacity_gb': 1008,
'allocated_capacity_gb': 0,
'provisioned_capacity_gb': 0,
'max_over_subscription_ratio': 1.0,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'reserved_percentage': 15,
'pool_name': 'openstack_iscsi_2',
},
],
'storage_protocol': 'iSCSI',
'timestamp': UTC_NOW,
},
'host1@BackendB': {
'replication_enabled': True,
'driver_version': '1.5.0',
'volume_backend_name': 'BackendB',
'uuid': '4200b32b-0bf9-436c-86b2-0675f6ac218e',
'pools': [
{
'total_capacity_gb': 2048,
'free_capacity_gb': 300,
'allocated_capacity_gb': 1748,
'provisioned_capacity_gb': 1748,
'max_over_subscription_ratio': 1.5,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'reserved_percentage': 10,
'pool_name': 'openstack_nfs_1',
},
{
'total_capacity_gb': 512,
'free_capacity_gb': 256,
'allocated_capacity_gb': 256,
'provisioned_capacity_gb': 256,
'max_over_subscription_ratio': 2.0,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'reserved_percentage': 10,
'pool_name': 'openstack_nfs_2',
},
],
'storage_protocol': 'nfs',
'timestamp': UTC_NOW,
},
'host2@BackendX': {
'replication_enabled': False,
'driver_version': '3.5.1',
'total_capacity_gb': 512,
'free_capacity_gb': 256,
'allocated_capacity_gb': 256,
'provisioned_capacity_gb': 256,
'max_over_subscription_ratio': 2.0,
'thin_provisioning_support': False,
'thick_provisioning_support': True,
'reserved_percentage': 0,
'volume_backend_name': 'BackendX',
'storage_protocol': 'iSCSI',
'timestamp': UTC_NOW,
'uuid': '6d91e7f5-ca17-4e3b-bf4f-19ca77166dd7'
},
'host3@BackendY': {
'replication_enabled': True,
'driver_version': '1.5.0',
'volume_backend_name': 'BackendY',
'uuid': '18417850-2ca9-43d1-9619-ae16bfb0f655',
'pools': [
{
'total_capacity_gb': 'infinite',
'free_capacity_gb': 'unknown',
'allocated_capacity_gb': 170,
'provisioned_capacity_gb': 170,
'max_over_subscription_ratio': 1.0,
'thin_provisioning_support': False,
'thick_provisioning_support': True,
'QoS_support': True,
'reserved_percentage': 0,
'pool_name': 'openstack_fcp_1',
},
{
'total_capacity_gb': 'infinite',
'free_capacity_gb': 'unknown',
'allocated_capacity_gb': 1548,
'provisioned_capacity_gb': 1548,
'max_over_subscription_ratio': 1.0,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'QoS_support': True,
'reserved_percentage': 0,
'pool_name': 'openstack_fcp_2',
},
],
'storage_protocol': 'fc',
'timestamp': UTC_NOW,
}
}
class FakeFilterScheduler(filter_scheduler.FilterScheduler):
def __init__(self, *args, **kwargs):
@ -33,67 +214,13 @@ class FakeFilterScheduler(filter_scheduler.FilterScheduler):
class FakeHostManager(host_manager.HostManager):
def __init__(self):
def __init__(self, multibackend_with_pools=False):
super(FakeHostManager, self).__init__()
self.service_states = {
'host1': {'total_capacity_gb': 1024,
'free_capacity_gb': 1024,
'allocated_capacity_gb': 0,
'provisioned_capacity_gb': 0,
'max_over_subscription_ratio': 1.0,
'thin_provisioning_support': False,
'thick_provisioning_support': True,
'reserved_percentage': 10,
'volume_backend_name': 'lvm1',
'timestamp': UTC_NOW,
'multiattach': True,
'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'},
'host2': {'total_capacity_gb': 2048,
'free_capacity_gb': 300,
'allocated_capacity_gb': 1748,
'provisioned_capacity_gb': 1748,
'max_over_subscription_ratio': 1.5,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'reserved_percentage': 10,
'volume_backend_name': 'lvm2',
'timestamp': UTC_NOW,
'uuid': '4200b32b-0bf9-436c-86b2-0675f6ac218e'},
'host3': {'total_capacity_gb': 512,
'free_capacity_gb': 256,
'allocated_capacity_gb': 256,
'provisioned_capacity_gb': 256,
'max_over_subscription_ratio': 2.0,
'thin_provisioning_support': False,
'thick_provisioning_support': True,
'reserved_percentage': 0,
'volume_backend_name': 'lvm3',
'timestamp': UTC_NOW,
'uuid': '6d91e7f5-ca17-4e3b-bf4f-19ca77166dd7'},
'host4': {'total_capacity_gb': 2048,
'free_capacity_gb': 200,
'allocated_capacity_gb': 1848,
'provisioned_capacity_gb': 2047,
'max_over_subscription_ratio': 1.0,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'reserved_percentage': 5,
'volume_backend_name': 'lvm4',
'timestamp': UTC_NOW,
'consistent_group_snapshot_enabled': True,
'uuid': '18417850-2ca9-43d1-9619-ae16bfb0f655'},
'host5': {'total_capacity_gb': 'infinite',
'free_capacity_gb': 'unknown',
'allocated_capacity_gb': 1548,
'provisioned_capacity_gb': 1548,
'max_over_subscription_ratio': 1.0,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'reserved_percentage': 5,
'timestamp': UTC_NOW,
'uuid': 'f838f35c-4035-464f-9792-ce60e390c13d'},
}
self.service_states = copy.deepcopy(
SERVICE_STATES_WITH_POOLS if multibackend_with_pools
else SERVICE_STATES
)
class FakeBackendState(host_manager.BackendState):
@ -152,24 +279,34 @@ class FakeNovaClient(object):
ext_srv_attr=ext_srv_attr)
def mock_host_manager_db_calls(mock_obj, disabled=None):
services = [
dict(id=1, host='host1', topic='volume', disabled=False,
availability_zone='zone1', updated_at=timeutils.utcnow(),
uuid='a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'),
dict(id=2, host='host2', topic='volume', disabled=False,
availability_zone='zone1', updated_at=timeutils.utcnow(),
uuid='4200b32b-0bf9-436c-86b2-0675f6ac218e'),
dict(id=3, host='host3', topic='volume', disabled=False,
availability_zone='zone2', updated_at=timeutils.utcnow(),
uuid='4200b32b-0bf9-436c-86b2-0675f6ac218e'),
dict(id=4, host='host4', topic='volume', disabled=False,
availability_zone='zone3', updated_at=timeutils.utcnow(),
uuid='18417850-2ca9-43d1-9619-ae16bfb0f655'),
dict(id=5, host='host5', topic='volume', disabled=False,
availability_zone='zone3', updated_at=timeutils.utcnow(),
uuid='f838f35c-4035-464f-9792-ce60e390c13d'),
]
def mock_host_manager_db_calls(mock_obj, backends_with_pools=False,
disabled=None):
service_states = (
SERVICE_STATES_WITH_POOLS if backends_with_pools else SERVICE_STATES
)
services = []
az_map = {
'host1': 'zone1',
'host2': 'zone1',
'host3': 'zone2',
'host4': 'zone3',
'host5': 'zone3',
}
sid = 0
for svc, state in service_states.items():
sid += 1
services.append(
{
'id': sid,
'host': svc,
'availability_zone': az_map[utils.extract_host(svc, 'host')],
'topic': 'volume',
'disabled': False,
'updated_at': timeutils.utcnow(),
'uuid': state.get('uuid', uuidutils.generate_uuid()),
}
)
if disabled is None:
mock_obj.return_value = services
else:

View File

@ -184,34 +184,50 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertIsNotNone(weighed_host.obj)
self.assertTrue(_mock_service_get_all.called)
@ddt.data(('host10@BackendA', True),
('host10@BackendB#openstack_nfs_1', True),
('host10', False))
@ddt.unpack
@mock.patch('cinder.db.service_get_all')
def test_create_volume_host_different_with_resource_backend(
self, _mock_service_get_all):
self, resource_backend, multibackend_with_pools,
_mock_service_get_all):
sched = fakes.FakeFilterScheduler()
sched.host_manager = fakes.FakeHostManager()
fakes.mock_host_manager_db_calls(_mock_service_get_all)
sched.host_manager = fakes.FakeHostManager(
multibackend_with_pools=multibackend_with_pools)
fakes.mock_host_manager_db_calls(
_mock_service_get_all, backends_with_pools=multibackend_with_pools)
fake_context = context.RequestContext('user', 'project')
request_spec = {'volume_properties': {'project_id': 1,
'size': 1},
'volume_type': {'name': 'LVM_iSCSI'},
'resource_backend': 'host_none'}
'resource_backend': resource_backend}
weighed_host = sched._schedule(fake_context, request_spec, {})
self.assertIsNone(weighed_host)
@ddt.data(('host1@BackendA', True),
('host1@BackendB#openstack_nfs_1', True),
('host1', False))
@ddt.unpack
@mock.patch('cinder.db.service_get_all')
def test_create_volume_host_same_as_resource(self, _mock_service_get_all):
def test_create_volume_host_same_as_resource(self, resource_backend,
multibackend_with_pools,
_mock_service_get_all):
# Ensure we don't clear the host whose backend is same as
# group's backend.
# requested backend (ex: create from source-volume/snapshot,
# or create within a group)
sched = fakes.FakeFilterScheduler()
sched.host_manager = fakes.FakeHostManager()
fakes.mock_host_manager_db_calls(_mock_service_get_all)
sched.host_manager = fakes.FakeHostManager(
multibackend_with_pools=multibackend_with_pools)
fakes.mock_host_manager_db_calls(
_mock_service_get_all, backends_with_pools=multibackend_with_pools)
fake_context = context.RequestContext('user', 'project')
request_spec = {'volume_properties': {'project_id': 1,
'size': 1},
'volume_type': {'name': 'LVM_iSCSI'},
'resource_backend': 'host1'}
'resource_backend': resource_backend}
weighed_host = sched._schedule(fake_context, request_spec, {})
self.assertEqual('host1#lvm1', weighed_host.obj.host)
self.assertIn(resource_backend, weighed_host.obj.host)
def test_max_attempts(self):
self.flags(scheduler_max_attempts=4)

View File

@ -86,7 +86,7 @@ class CreateVolumeFlowTestCase(test.TestCase):
fake_volume_api.FakeDb())
task._cast_create_volume(self.ctxt, spec, {})
mock_extract_host.assert_called_once_with('host@backend#pool')
mock_extract_host.assert_not_called()
snapshot = fake_snapshot.fake_snapshot_obj(self.ctxt,
volume=volume)
@ -110,8 +110,7 @@ class CreateVolumeFlowTestCase(test.TestCase):
task._cast_create_volume(self.ctxt, spec, {})
mock_snapshot_get.assert_called_once_with(self.ctxt, snapshot.id)
mock_extract_host.assert_has_calls([mock.call('host@backend#pool'),
mock.call('host@backend#pool')])
mock_extract_host.assert_not_called()
@mock.patch('cinder.objects.Volume.get_by_id')
@mock.patch('cinder.volume.utils.extract_host')

View File

@ -770,12 +770,10 @@ class VolumeCastTask(flow_utils.CinderTask):
# before creating volume, we schedule this request to scheduler
# service with the desired backend information.
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
request_spec['resource_backend'] = vol_utils.extract_host(
snapshot.volume.host)
request_spec['resource_backend'] = snapshot.volume.host
elif source_volid:
source_volume_ref = objects.Volume.get_by_id(context, source_volid)
request_spec['resource_backend'] = vol_utils.extract_host(
source_volume_ref.host)
request_spec['resource_backend'] = source_volume_ref.host
self.scheduler_rpcapi.create_volume(
context,