dcdce87822
When FilterScheduler was first introduced into Cinder, drivers were required for the first time to report capacity. Some drivers preferred to report 'infinite' or 'unknown' capacity because they were doing thin-provisioning or the total capacity kept increasing. Now that we have better support for thin-provisioning and we do find unrealistic capacity couldn't do us any good in making optimal scheduling decision, because 'infinite' and 'unknown' would always have the highest weight when the weight multiplier is positive, which in most cases it is. Drivers are expected to avoid sending 'infinite' 'unknown' capacity anymore, instead, should report an actual real number for total/free capacity. This fix doesn't fix the driver, instead a small tweak is added to CapacityWeigher in order to downgrade those drivers who report 'infinite' or 'unknown' as free capacity. In particular, those who report 'infinite'/'unknown' free capacity will be adjusted to be the one has lowest weight, no matter in 'spreading' (weight multiplier>0) or 'stacking' (weight multiplier<0) mode. DocImpact Change-Id: Ied087386a1a2f43e6a77499a817d5c637ef448f6 Partial-bug: #1350638
93 lines
3.8 KiB
Python
93 lines
3.8 KiB
Python
# Copyright 2011 OpenStack Foundation
|
|
# All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
# not use this file except in compliance with the License. You may obtain
|
|
# a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
# License for the specific language governing permissions and limitations
|
|
# under the License.
|
|
"""
|
|
Fakes For Scheduler tests.
|
|
"""
|
|
|
|
from cinder.openstack.common import timeutils
|
|
from cinder.scheduler import filter_scheduler
|
|
from cinder.scheduler import host_manager
|
|
|
|
|
|
class FakeFilterScheduler(filter_scheduler.FilterScheduler):
|
|
def __init__(self, *args, **kwargs):
|
|
super(FakeFilterScheduler, self).__init__(*args, **kwargs)
|
|
self.host_manager = host_manager.HostManager()
|
|
|
|
|
|
class FakeHostManager(host_manager.HostManager):
|
|
def __init__(self):
|
|
super(FakeHostManager, self).__init__()
|
|
|
|
self.service_states = {
|
|
'host1': {'total_capacity_gb': 1024,
|
|
'free_capacity_gb': 1024,
|
|
'allocated_capacity_gb': 0,
|
|
'reserved_percentage': 10,
|
|
'volume_backend_name': 'lvm1',
|
|
'timestamp': None},
|
|
'host2': {'total_capacity_gb': 2048,
|
|
'free_capacity_gb': 300,
|
|
'allocated_capacity_gb': 1748,
|
|
'reserved_percentage': 10,
|
|
'volume_backend_name': 'lvm2',
|
|
'timestamp': None},
|
|
'host3': {'total_capacity_gb': 512,
|
|
'free_capacity_gb': 256,
|
|
'allocated_capacity_gb': 256,
|
|
'reserved_percentage': 0,
|
|
'volume_backend_name': 'lvm3',
|
|
'timestamp': None},
|
|
'host4': {'total_capacity_gb': 2048,
|
|
'free_capacity_gb': 200,
|
|
'allocated_capacity_gb': 1848,
|
|
'reserved_percentage': 5,
|
|
'volume_backend_name': 'lvm4',
|
|
'timestamp': None,
|
|
'consistencygroup_support': True},
|
|
'host5': {'total_capacity_gb': 'infinite',
|
|
'free_capacity_gb': 'unknown',
|
|
'allocated_capacity_gb': 1548,
|
|
'reserved_percentage': 5,
|
|
'timestamp': None},
|
|
}
|
|
|
|
|
|
class FakeHostState(host_manager.HostState):
|
|
def __init__(self, host, attribute_dict):
|
|
super(FakeHostState, self).__init__(host)
|
|
for (key, val) in attribute_dict.iteritems():
|
|
setattr(self, key, val)
|
|
|
|
|
|
def mock_host_manager_db_calls(mock_obj, disabled=None):
|
|
services = [
|
|
dict(id=1, host='host1', topic='volume', disabled=False,
|
|
availability_zone='zone1', updated_at=timeutils.utcnow()),
|
|
dict(id=2, host='host2', topic='volume', disabled=False,
|
|
availability_zone='zone1', updated_at=timeutils.utcnow()),
|
|
dict(id=3, host='host3', topic='volume', disabled=False,
|
|
availability_zone='zone2', updated_at=timeutils.utcnow()),
|
|
dict(id=4, host='host4', topic='volume', disabled=False,
|
|
availability_zone='zone3', updated_at=timeutils.utcnow()),
|
|
dict(id=5, host='host5', topic='volume', disabled=False,
|
|
availability_zone='zone3', updated_at=timeutils.utcnow()),
|
|
]
|
|
if disabled is None:
|
|
mock_obj.return_value = services
|
|
else:
|
|
mock_obj.return_value = [service for service in services
|
|
if service['disabled'] == disabled]
|