Add AllocatedCapacityWeigher
AllocatedCapacityWeigher is a weigher that weigh hosts by their allocated capacity. The main purpose of this weigher is to simulate the SimpleScheduler's behavior, which sorts hosts by the size of all volumes on them. So by allocated capacity, it equals to the sum of size of all volumes on target host. In order to keep track of 'allocated' capacity, host state is updated to add a 'allocated_capacity_gb' attribute to record the value, which means each back-end must report one extra stats to scheduler. Fortunately, the 'allocated' capacity we are interested in here is pure Cinder level capacity, the volume manager can take all the burden to calculate this value without having to query back-ends. The volume manager does the initial calculation in init_host() by the time when it has to query all existing volumes from DB for ensure_export(). After initial calculation, volume manager/scheduler will keep track of every new request that changes 'allocated_capacity' and make sure this value is up to date. !DriverImpact! Cinder driver developers, please read on: This patch contains a change that might IMPACT volume drivers: volume manager now uses 'stats' attribute to save 'allocated_capacity_gb'. And this information will be merged with those stats drivers provide as a whole for scheduler to consume. If you plan to report any form of allocated space other than the apparent Cinder level value, (e.g. actual capacity allocated), Please choose a key name other than 'allocated_capacity_gb', otherwise it will *OVERWRITE* the value volume manager has calculated and confuse scheduler. Partially implements bp: deprecate-chance-and-simple-schedulers Change-Id: I306230b8973c2d1ad77bcab14ccde68e997ea816
This commit is contained in:
parent
2d6a903823
commit
254e37ab3c
@ -99,6 +99,9 @@ class HostState(object):
|
|||||||
# Mutable available resources.
|
# Mutable available resources.
|
||||||
# These will change as resources are virtually "consumed".
|
# These will change as resources are virtually "consumed".
|
||||||
self.total_capacity_gb = 0
|
self.total_capacity_gb = 0
|
||||||
|
# capacity has been allocated in cinder POV, which should be
|
||||||
|
# sum(vol['size'] for vol in vols_on_hosts)
|
||||||
|
self.allocated_capacity_gb = 0
|
||||||
self.free_capacity_gb = None
|
self.free_capacity_gb = None
|
||||||
self.reserved_percentage = 0
|
self.reserved_percentage = 0
|
||||||
|
|
||||||
@ -128,6 +131,8 @@ class HostState(object):
|
|||||||
|
|
||||||
self.total_capacity_gb = capability['total_capacity_gb']
|
self.total_capacity_gb = capability['total_capacity_gb']
|
||||||
self.free_capacity_gb = capability['free_capacity_gb']
|
self.free_capacity_gb = capability['free_capacity_gb']
|
||||||
|
self.allocated_capacity_gb = capability.get(
|
||||||
|
'allocated_capacity_gb', 0)
|
||||||
self.reserved_percentage = capability['reserved_percentage']
|
self.reserved_percentage = capability['reserved_percentage']
|
||||||
|
|
||||||
self.updated = capability['timestamp']
|
self.updated = capability['timestamp']
|
||||||
@ -135,6 +140,7 @@ class HostState(object):
|
|||||||
def consume_from_volume(self, volume):
|
def consume_from_volume(self, volume):
|
||||||
"""Incrementally update host state from an volume."""
|
"""Incrementally update host state from an volume."""
|
||||||
volume_gb = volume['size']
|
volume_gb = volume['size']
|
||||||
|
self.allocated_capacity_gb += volume_gb
|
||||||
if self.free_capacity_gb == 'infinite':
|
if self.free_capacity_gb == 'infinite':
|
||||||
# There's virtually infinite space on back-end
|
# There's virtually infinite space on back-end
|
||||||
pass
|
pass
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
# Copyright (c) 2013 eBay Inc.
|
||||||
# Copyright (c) 2012 OpenStack Foundation
|
# Copyright (c) 2012 OpenStack Foundation
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@ -13,11 +14,22 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
"""
|
"""
|
||||||
Capacity Weigher. Weigh hosts by their available capacity.
|
Weighers that weigh hosts by their capacity, including following two
|
||||||
|
weighers:
|
||||||
|
|
||||||
|
1. Capacity Weigher. Weigh hosts by their available capacity.
|
||||||
|
|
||||||
The default is to spread volumes across all hosts evenly. If you prefer
|
The default is to spread volumes across all hosts evenly. If you prefer
|
||||||
stacking, you can set the 'capacity_weight_multiplier' option to a negative
|
stacking, you can set the 'capacity_weight_multiplier' option to a negative
|
||||||
number and the weighing has the opposite effect of the default.
|
number and the weighing has the opposite effect of the default.
|
||||||
|
|
||||||
|
2. Allocated Capacity Weigher. Weigh hosts by their allocated capacity.
|
||||||
|
|
||||||
|
The default behavior is to place new volume to the host allocated the least
|
||||||
|
space. This weigher is intended to simulate the behavior of SimpleScheduler.
|
||||||
|
If you prefer to place volumes to host allocated the most space, you can
|
||||||
|
set the 'allocated_capacity_weight_multiplier' option to a postive number
|
||||||
|
and the weighing has the opposite effect of the default.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
@ -33,6 +45,10 @@ capacity_weight_opts = [
|
|||||||
default=1.0,
|
default=1.0,
|
||||||
help='Multiplier used for weighing volume capacity. '
|
help='Multiplier used for weighing volume capacity. '
|
||||||
'Negative numbers mean to stack vs spread.'),
|
'Negative numbers mean to stack vs spread.'),
|
||||||
|
cfg.FloatOpt('allocated_capacity_weight_multiplier',
|
||||||
|
default=-1.0,
|
||||||
|
help='Multiplier used for weighing volume capacity. '
|
||||||
|
'Negative numbers mean to stack vs spread.'),
|
||||||
]
|
]
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
@ -55,3 +71,15 @@ class CapacityWeigher(weights.BaseHostWeigher):
|
|||||||
else:
|
else:
|
||||||
free = math.floor(host_state.free_capacity_gb * (1 - reserved))
|
free = math.floor(host_state.free_capacity_gb * (1 - reserved))
|
||||||
return free
|
return free
|
||||||
|
|
||||||
|
|
||||||
|
class AllocatedCapacityWeigher(weights.BaseHostWeigher):
|
||||||
|
def _weight_multiplier(self):
|
||||||
|
"""Override the weight multiplier."""
|
||||||
|
return CONF.allocated_capacity_weight_multiplier
|
||||||
|
|
||||||
|
def _weigh_object(self, host_state, weight_properties):
|
||||||
|
# Higher weights win. We want spreading (choose host with lowest
|
||||||
|
# allocated_capacity first) to be the default.
|
||||||
|
allocated_space = host_state.allocated_capacity_gb
|
||||||
|
return allocated_space
|
||||||
|
@ -76,7 +76,7 @@ class AdminActionsTest(test.TestCase):
|
|||||||
def test_reset_status_as_non_admin(self):
|
def test_reset_status_as_non_admin(self):
|
||||||
# current status is 'error'
|
# current status is 'error'
|
||||||
volume = db.volume_create(context.get_admin_context(),
|
volume = db.volume_create(context.get_admin_context(),
|
||||||
{'status': 'error'})
|
{'status': 'error', 'size': 1})
|
||||||
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
|
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
|
||||||
req.method = 'POST'
|
req.method = 'POST'
|
||||||
req.headers['content-type'] = 'application/json'
|
req.headers['content-type'] = 'application/json'
|
||||||
@ -96,7 +96,7 @@ class AdminActionsTest(test.TestCase):
|
|||||||
# admin context
|
# admin context
|
||||||
ctx = context.RequestContext('admin', 'fake', True)
|
ctx = context.RequestContext('admin', 'fake', True)
|
||||||
# current status is available
|
# current status is available
|
||||||
volume = db.volume_create(ctx, {'status': 'available'})
|
volume = db.volume_create(ctx, {'status': 'available', 'size': 1})
|
||||||
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
|
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
|
||||||
req.method = 'POST'
|
req.method = 'POST'
|
||||||
req.headers['content-type'] = 'application/json'
|
req.headers['content-type'] = 'application/json'
|
||||||
@ -115,7 +115,7 @@ class AdminActionsTest(test.TestCase):
|
|||||||
# admin context
|
# admin context
|
||||||
ctx = context.RequestContext('admin', 'fake', True)
|
ctx = context.RequestContext('admin', 'fake', True)
|
||||||
# current status is available
|
# current status is available
|
||||||
volume = db.volume_create(ctx, {'status': 'available'})
|
volume = db.volume_create(ctx, {'status': 'available', 'size': 1})
|
||||||
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
|
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
|
||||||
req.method = 'POST'
|
req.method = 'POST'
|
||||||
req.headers['content-type'] = 'application/json'
|
req.headers['content-type'] = 'application/json'
|
||||||
@ -153,7 +153,8 @@ class AdminActionsTest(test.TestCase):
|
|||||||
# admin context
|
# admin context
|
||||||
ctx = context.RequestContext('admin', 'fake', True)
|
ctx = context.RequestContext('admin', 'fake', True)
|
||||||
# current status is available
|
# current status is available
|
||||||
volume = db.volume_create(ctx, {'status': 'available',
|
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
|
||||||
|
'provider_location': '', 'size': 1,
|
||||||
'attach_status': 'attached'})
|
'attach_status': 'attached'})
|
||||||
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
|
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
|
||||||
req.method = 'POST'
|
req.method = 'POST'
|
||||||
@ -177,7 +178,8 @@ class AdminActionsTest(test.TestCase):
|
|||||||
# admin context
|
# admin context
|
||||||
ctx = context.RequestContext('admin', 'fake', True)
|
ctx = context.RequestContext('admin', 'fake', True)
|
||||||
# current status is available
|
# current status is available
|
||||||
volume = db.volume_create(ctx, {'status': 'available',
|
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
|
||||||
|
'provider_location': '', 'size': 1,
|
||||||
'attach_status': 'detached'})
|
'attach_status': 'detached'})
|
||||||
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
|
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
|
||||||
req.method = 'POST'
|
req.method = 'POST'
|
||||||
@ -200,7 +202,8 @@ class AdminActionsTest(test.TestCase):
|
|||||||
# admin context
|
# admin context
|
||||||
ctx = context.RequestContext('admin', 'fake', True)
|
ctx = context.RequestContext('admin', 'fake', True)
|
||||||
# snapshot in 'error_deleting'
|
# snapshot in 'error_deleting'
|
||||||
volume = db.volume_create(ctx, {})
|
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
|
||||||
|
'provider_location': '', 'size': 1})
|
||||||
snapshot = db.snapshot_create(ctx, {'status': 'error_deleting',
|
snapshot = db.snapshot_create(ctx, {'status': 'error_deleting',
|
||||||
'volume_id': volume['id']})
|
'volume_id': volume['id']})
|
||||||
req = webob.Request.blank('/v2/fake/snapshots/%s/action' %
|
req = webob.Request.blank('/v2/fake/snapshots/%s/action' %
|
||||||
@ -222,7 +225,8 @@ class AdminActionsTest(test.TestCase):
|
|||||||
# admin context
|
# admin context
|
||||||
ctx = context.RequestContext('admin', 'fake', True)
|
ctx = context.RequestContext('admin', 'fake', True)
|
||||||
# snapshot in 'available'
|
# snapshot in 'available'
|
||||||
volume = db.volume_create(ctx, {})
|
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
|
||||||
|
'provider_location': '', 'size': 1})
|
||||||
snapshot = db.snapshot_create(ctx, {'status': 'available',
|
snapshot = db.snapshot_create(ctx, {'status': 'available',
|
||||||
'volume_id': volume['id']})
|
'volume_id': volume['id']})
|
||||||
req = webob.Request.blank('/v2/fake/snapshots/%s/action' %
|
req = webob.Request.blank('/v2/fake/snapshots/%s/action' %
|
||||||
@ -245,7 +249,7 @@ class AdminActionsTest(test.TestCase):
|
|||||||
# admin context
|
# admin context
|
||||||
ctx = context.RequestContext('admin', 'fake', True)
|
ctx = context.RequestContext('admin', 'fake', True)
|
||||||
# current status is creating
|
# current status is creating
|
||||||
volume = db.volume_create(ctx, {'status': 'creating'})
|
volume = db.volume_create(ctx, {'size': 1})
|
||||||
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
|
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
|
||||||
req.method = 'POST'
|
req.method = 'POST'
|
||||||
req.headers['content-type'] = 'application/json'
|
req.headers['content-type'] = 'application/json'
|
||||||
@ -263,7 +267,7 @@ class AdminActionsTest(test.TestCase):
|
|||||||
# admin context
|
# admin context
|
||||||
ctx = context.RequestContext('admin', 'fake', True)
|
ctx = context.RequestContext('admin', 'fake', True)
|
||||||
# current status is creating
|
# current status is creating
|
||||||
volume = db.volume_create(ctx, {'host': 'test'})
|
volume = db.volume_create(ctx, {'host': 'test', 'size': 1})
|
||||||
snapshot = db.snapshot_create(ctx, {'status': 'creating',
|
snapshot = db.snapshot_create(ctx, {'status': 'creating',
|
||||||
'volume_size': 1,
|
'volume_size': 1,
|
||||||
'volume_id': volume['id']})
|
'volume_id': volume['id']})
|
||||||
@ -291,7 +295,7 @@ class AdminActionsTest(test.TestCase):
|
|||||||
ctx = context.RequestContext('admin', 'fake', True)
|
ctx = context.RequestContext('admin', 'fake', True)
|
||||||
# current status is available
|
# current status is available
|
||||||
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
|
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
|
||||||
'provider_location': ''})
|
'provider_location': '', 'size': 1})
|
||||||
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
|
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
|
||||||
# start service to handle rpc messages for attach requests
|
# start service to handle rpc messages for attach requests
|
||||||
svc = self.start_service('volume', host='test')
|
svc = self.start_service('volume', host='test')
|
||||||
@ -346,7 +350,7 @@ class AdminActionsTest(test.TestCase):
|
|||||||
ctx = context.RequestContext('admin', 'fake', True)
|
ctx = context.RequestContext('admin', 'fake', True)
|
||||||
# current status is available
|
# current status is available
|
||||||
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
|
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
|
||||||
'provider_location': ''})
|
'provider_location': '', 'size': 1})
|
||||||
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
|
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
|
||||||
# start service to handle rpc messages for attach requests
|
# start service to handle rpc messages for attach requests
|
||||||
svc = self.start_service('volume', host='test')
|
svc = self.start_service('volume', host='test')
|
||||||
@ -402,7 +406,7 @@ class AdminActionsTest(test.TestCase):
|
|||||||
ctx = context.RequestContext('admin', 'fake', True)
|
ctx = context.RequestContext('admin', 'fake', True)
|
||||||
# current status is available
|
# current status is available
|
||||||
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
|
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
|
||||||
'provider_location': ''})
|
'provider_location': '', 'size': 1})
|
||||||
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
|
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
|
||||||
# start service to handle rpc messages for attach requests
|
# start service to handle rpc messages for attach requests
|
||||||
svc = self.start_service('volume', host='test')
|
svc = self.start_service('volume', host='test')
|
||||||
@ -438,7 +442,7 @@ class AdminActionsTest(test.TestCase):
|
|||||||
ctx = context.RequestContext('admin', 'fake', True)
|
ctx = context.RequestContext('admin', 'fake', True)
|
||||||
# current status is available
|
# current status is available
|
||||||
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
|
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
|
||||||
'provider_location': ''})
|
'provider_location': '', 'size': 1})
|
||||||
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
|
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
|
||||||
# start service to handle rpc messages for attach requests
|
# start service to handle rpc messages for attach requests
|
||||||
svc = self.start_service('volume', host='test')
|
svc = self.start_service('volume', host='test')
|
||||||
@ -474,7 +478,7 @@ class AdminActionsTest(test.TestCase):
|
|||||||
ctx = context.RequestContext('admin', 'fake', True)
|
ctx = context.RequestContext('admin', 'fake', True)
|
||||||
# current status is available
|
# current status is available
|
||||||
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
|
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
|
||||||
'provider_location': ''})
|
'provider_location': '', 'size': 1})
|
||||||
connector = {}
|
connector = {}
|
||||||
# start service to handle rpc messages for attach requests
|
# start service to handle rpc messages for attach requests
|
||||||
svc = self.start_service('volume', host='test')
|
svc = self.start_service('volume', host='test')
|
||||||
@ -489,7 +493,7 @@ class AdminActionsTest(test.TestCase):
|
|||||||
ctx = context.RequestContext('admin', 'fake', True)
|
ctx = context.RequestContext('admin', 'fake', True)
|
||||||
# current status is available
|
# current status is available
|
||||||
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
|
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
|
||||||
'provider_location': ''})
|
'provider_location': '', 'size': 1})
|
||||||
# start service to handle rpc messages for attach requests
|
# start service to handle rpc messages for attach requests
|
||||||
svc = self.start_service('volume', host='test')
|
svc = self.start_service('volume', host='test')
|
||||||
values = {'status': 'attaching',
|
values = {'status': 'attaching',
|
||||||
@ -513,7 +517,7 @@ class AdminActionsTest(test.TestCase):
|
|||||||
ctx = context.RequestContext('admin', 'fake', True)
|
ctx = context.RequestContext('admin', 'fake', True)
|
||||||
# current status is available
|
# current status is available
|
||||||
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
|
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
|
||||||
'provider_location': ''})
|
'provider_location': '', 'size': 1})
|
||||||
# start service to handle rpc messages for attach requests
|
# start service to handle rpc messages for attach requests
|
||||||
svc = self.start_service('volume', host='test')
|
svc = self.start_service('volume', host='test')
|
||||||
values = {'status': 'attaching',
|
values = {'status': 'attaching',
|
||||||
|
@ -34,18 +34,22 @@ class FakeHostManager(host_manager.HostManager):
|
|||||||
self.service_states = {
|
self.service_states = {
|
||||||
'host1': {'total_capacity_gb': 1024,
|
'host1': {'total_capacity_gb': 1024,
|
||||||
'free_capacity_gb': 1024,
|
'free_capacity_gb': 1024,
|
||||||
|
'allocated_capacity_gb': 0,
|
||||||
'reserved_percentage': 10,
|
'reserved_percentage': 10,
|
||||||
'timestamp': None},
|
'timestamp': None},
|
||||||
'host2': {'total_capacity_gb': 2048,
|
'host2': {'total_capacity_gb': 2048,
|
||||||
'free_capacity_gb': 300,
|
'free_capacity_gb': 300,
|
||||||
|
'allocated_capacity_gb': 1748,
|
||||||
'reserved_percentage': 10,
|
'reserved_percentage': 10,
|
||||||
'timestamp': None},
|
'timestamp': None},
|
||||||
'host3': {'total_capacity_gb': 512,
|
'host3': {'total_capacity_gb': 512,
|
||||||
'free_capacity_gb': 512,
|
'free_capacity_gb': 256,
|
||||||
|
'allocated_capacity_gb': 256,
|
||||||
'reserved_percentage': 0,
|
'reserved_percentage': 0,
|
||||||
'timestamp': None},
|
'timestamp': None},
|
||||||
'host4': {'total_capacity_gb': 2048,
|
'host4': {'total_capacity_gb': 2048,
|
||||||
'free_capacity_gb': 200,
|
'free_capacity_gb': 200,
|
||||||
|
'allocated_capacity_gb': 1848,
|
||||||
'reserved_percentage': 5,
|
'reserved_percentage': 5,
|
||||||
'timestamp': None},
|
'timestamp': None},
|
||||||
}
|
}
|
||||||
|
92
cinder/tests/scheduler/test_allocated_capacity_weigher.py
Normal file
92
cinder/tests/scheduler/test_allocated_capacity_weigher.py
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
# Copyright 2013 eBay Inc.
|
||||||
|
#
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
"""
|
||||||
|
Tests For Allocated Capacity Weigher.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import mock
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from cinder import context
|
||||||
|
from cinder.openstack.common.scheduler.weights import HostWeightHandler
|
||||||
|
from cinder.scheduler.weights.capacity import AllocatedCapacityWeigher as ACW
|
||||||
|
from cinder import test
|
||||||
|
from cinder.tests.scheduler import fakes
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
|
class AllocatedCapacityWeigherTestCase(test.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super(AllocatedCapacityWeigherTestCase, self).setUp()
|
||||||
|
self.host_manager = fakes.FakeHostManager()
|
||||||
|
self.weight_handler = HostWeightHandler('cinder.scheduler.weights')
|
||||||
|
|
||||||
|
def _get_weighed_host(self, hosts, weight_properties=None):
|
||||||
|
if weight_properties is None:
|
||||||
|
weight_properties = {}
|
||||||
|
return self.weight_handler.get_weighed_objects([ACW], hosts,
|
||||||
|
weight_properties)[0]
|
||||||
|
|
||||||
|
@mock.patch('cinder.db.sqlalchemy.api.service_get_all_by_topic')
|
||||||
|
def _get_all_hosts(self, _mock_service_get_all_by_topic):
|
||||||
|
ctxt = context.get_admin_context()
|
||||||
|
fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic)
|
||||||
|
host_states = self.host_manager.get_all_host_states(ctxt)
|
||||||
|
_mock_service_get_all_by_topic.assert_called_once_with(
|
||||||
|
ctxt, CONF.volume_topic)
|
||||||
|
return host_states
|
||||||
|
|
||||||
|
def test_default_of_spreading_first(self):
|
||||||
|
hostinfo_list = self._get_all_hosts()
|
||||||
|
|
||||||
|
# host1: allocated_capacity_gb=0, weight=0
|
||||||
|
# host2: allocated_capacity_gb=1748, weight=-1748
|
||||||
|
# host3: allocated_capacity_gb=256, weight=-256
|
||||||
|
# host4: allocated_capacity_gb=1848, weight=-1848
|
||||||
|
|
||||||
|
# so, host1 should win:
|
||||||
|
weighed_host = self._get_weighed_host(hostinfo_list)
|
||||||
|
self.assertEqual(weighed_host.weight, 0)
|
||||||
|
self.assertEqual(weighed_host.obj.host, 'host1')
|
||||||
|
|
||||||
|
def test_capacity_weight_multiplier1(self):
|
||||||
|
self.flags(allocated_capacity_weight_multiplier=1.0)
|
||||||
|
hostinfo_list = self._get_all_hosts()
|
||||||
|
|
||||||
|
# host1: allocated_capacity_gb=0, weight=0
|
||||||
|
# host2: allocated_capacity_gb=1748, weight=1748
|
||||||
|
# host3: allocated_capacity_gb=256, weight=256
|
||||||
|
# host4: allocated_capacity_gb=1848, weight=1848
|
||||||
|
|
||||||
|
# so, host4 should win:
|
||||||
|
weighed_host = self._get_weighed_host(hostinfo_list)
|
||||||
|
self.assertEqual(weighed_host.weight, 1848.0)
|
||||||
|
self.assertEqual(weighed_host.obj.host, 'host4')
|
||||||
|
|
||||||
|
def test_capacity_weight_multiplier2(self):
|
||||||
|
self.flags(allocated_capacity_weight_multiplier=-2.0)
|
||||||
|
hostinfo_list = self._get_all_hosts()
|
||||||
|
|
||||||
|
# host1: allocated_capacity_gb=0, weight=0
|
||||||
|
# host2: allocated_capacity_gb=1748, weight=-3496
|
||||||
|
# host3: allocated_capacity_gb=256, weight=-512
|
||||||
|
# host4: allocated_capacity_gb=1848, weight=-3696
|
||||||
|
|
||||||
|
# so, host1 should win:
|
||||||
|
weighed_host = self._get_weighed_host(hostinfo_list)
|
||||||
|
self.assertEqual(weighed_host.weight, 0)
|
||||||
|
self.assertEqual(weighed_host.obj.host, 'host1')
|
@ -56,7 +56,7 @@ class CapacityWeigherTestCase(test.TestCase):
|
|||||||
|
|
||||||
# host1: free_capacity_gb=1024, free=1024*(1-0.1)
|
# host1: free_capacity_gb=1024, free=1024*(1-0.1)
|
||||||
# host2: free_capacity_gb=300, free=300*(1-0.1)
|
# host2: free_capacity_gb=300, free=300*(1-0.1)
|
||||||
# host3: free_capacity_gb=512, free=512
|
# host3: free_capacity_gb=512, free=256
|
||||||
# host4: free_capacity_gb=200, free=200*(1-0.05)
|
# host4: free_capacity_gb=200, free=200*(1-0.05)
|
||||||
|
|
||||||
# so, host1 should win:
|
# so, host1 should win:
|
||||||
@ -70,7 +70,7 @@ class CapacityWeigherTestCase(test.TestCase):
|
|||||||
|
|
||||||
# host1: free_capacity_gb=1024, free=-1024*(1-0.1)
|
# host1: free_capacity_gb=1024, free=-1024*(1-0.1)
|
||||||
# host2: free_capacity_gb=300, free=-300*(1-0.1)
|
# host2: free_capacity_gb=300, free=-300*(1-0.1)
|
||||||
# host3: free_capacity_gb=512, free=-512
|
# host3: free_capacity_gb=512, free=-256
|
||||||
# host4: free_capacity_gb=200, free=-200*(1-0.05)
|
# host4: free_capacity_gb=200, free=-200*(1-0.05)
|
||||||
|
|
||||||
# so, host4 should win:
|
# so, host4 should win:
|
||||||
@ -84,7 +84,7 @@ class CapacityWeigherTestCase(test.TestCase):
|
|||||||
|
|
||||||
# host1: free_capacity_gb=1024, free=1024*(1-0.1)*2
|
# host1: free_capacity_gb=1024, free=1024*(1-0.1)*2
|
||||||
# host2: free_capacity_gb=300, free=300*(1-0.1)*2
|
# host2: free_capacity_gb=300, free=300*(1-0.1)*2
|
||||||
# host3: free_capacity_gb=512, free=512*2
|
# host3: free_capacity_gb=512, free=256*2
|
||||||
# host4: free_capacity_gb=200, free=200*(1-0.05)*2
|
# host4: free_capacity_gb=200, free=200*(1-0.05)*2
|
||||||
|
|
||||||
# so, host1 should win:
|
# so, host1 should win:
|
||||||
|
@ -90,6 +90,7 @@ class GPFSDriverTestCase(test.TestCase):
|
|||||||
self.volume = importutils.import_object(CONF.volume_manager)
|
self.volume = importutils.import_object(CONF.volume_manager)
|
||||||
self.volume.driver.set_execute(self._execute_wrapper)
|
self.volume.driver.set_execute(self._execute_wrapper)
|
||||||
self.volume.driver.set_initialized()
|
self.volume.driver.set_initialized()
|
||||||
|
self.volume.stats = dict(allocated_capacity_gb=0)
|
||||||
|
|
||||||
self.stubs.Set(GPFSDriver, '_create_gpfs_snap',
|
self.stubs.Set(GPFSDriver, '_create_gpfs_snap',
|
||||||
self._fake_gpfs_snap)
|
self._fake_gpfs_snap)
|
||||||
|
@ -702,6 +702,7 @@ class ManagedRBDTestCase(DriverTestCase):
|
|||||||
super(ManagedRBDTestCase, self).setUp()
|
super(ManagedRBDTestCase, self).setUp()
|
||||||
fake_image.stub_out_image_service(self.stubs)
|
fake_image.stub_out_image_service(self.stubs)
|
||||||
self.volume.driver.set_initialized()
|
self.volume.driver.set_initialized()
|
||||||
|
self.volume.stats = {'allocated_capacity_gb': 0}
|
||||||
self.called = []
|
self.called = []
|
||||||
|
|
||||||
def _create_volume_from_image(self, expected_status, raw=False,
|
def _create_volume_from_image(self, expected_status, raw=False,
|
||||||
|
@ -113,6 +113,7 @@ class BaseVolumeTestCase(test.TestCase):
|
|||||||
self.stubs.Set(brick_lvm.LVM, '_vg_exists', lambda x: True)
|
self.stubs.Set(brick_lvm.LVM, '_vg_exists', lambda x: True)
|
||||||
self.stubs.Set(os.path, 'exists', lambda x: True)
|
self.stubs.Set(os.path, 'exists', lambda x: True)
|
||||||
self.volume.driver.set_initialized()
|
self.volume.driver.set_initialized()
|
||||||
|
self.volume.stats = {'allocated_capacity_gb': 0}
|
||||||
# keep ordered record of what we execute
|
# keep ordered record of what we execute
|
||||||
self.called = []
|
self.called = []
|
||||||
|
|
||||||
|
@ -193,6 +193,7 @@ class VolumeManager(manager.SchedulerDependentManager):
|
|||||||
self.configuration = Configuration(volume_manager_opts,
|
self.configuration = Configuration(volume_manager_opts,
|
||||||
config_group=service_name)
|
config_group=service_name)
|
||||||
self._tp = GreenPool()
|
self._tp = GreenPool()
|
||||||
|
self.stats = {}
|
||||||
|
|
||||||
if not volume_driver:
|
if not volume_driver:
|
||||||
# Get from configuration, which will get the default
|
# Get from configuration, which will get the default
|
||||||
@ -243,8 +244,13 @@ class VolumeManager(manager.SchedulerDependentManager):
|
|||||||
LOG.debug(_("Re-exporting %s volumes"), len(volumes))
|
LOG.debug(_("Re-exporting %s volumes"), len(volumes))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
sum = 0
|
||||||
|
self.stats.update({'allocated_capacity_gb': sum})
|
||||||
for volume in volumes:
|
for volume in volumes:
|
||||||
if volume['status'] in ['available', 'in-use']:
|
if volume['status'] in ['available', 'in-use']:
|
||||||
|
# calculate allocated capacity for driver
|
||||||
|
sum += volume['size']
|
||||||
|
self.stats['allocated_capacity_gb'] = sum
|
||||||
self.driver.ensure_export(ctxt, volume)
|
self.driver.ensure_export(ctxt, volume)
|
||||||
elif volume['status'] == 'downloading':
|
elif volume['status'] == 'downloading':
|
||||||
LOG.info(_("volume %s stuck in a downloading state"),
|
LOG.info(_("volume %s stuck in a downloading state"),
|
||||||
@ -341,6 +347,8 @@ class VolumeManager(manager.SchedulerDependentManager):
|
|||||||
|
|
||||||
# Fetch created volume from storage
|
# Fetch created volume from storage
|
||||||
volume_ref = flow_engine.storage.fetch('volume')
|
volume_ref = flow_engine.storage.fetch('volume')
|
||||||
|
# Update volume stats
|
||||||
|
self.stats['allocated_capacity_gb'] += volume_ref['size']
|
||||||
return volume_ref['id']
|
return volume_ref['id']
|
||||||
|
|
||||||
@utils.require_driver_initialized
|
@utils.require_driver_initialized
|
||||||
@ -417,6 +425,7 @@ class VolumeManager(manager.SchedulerDependentManager):
|
|||||||
if reservations:
|
if reservations:
|
||||||
QUOTAS.commit(context, reservations, project_id=project_id)
|
QUOTAS.commit(context, reservations, project_id=project_id)
|
||||||
|
|
||||||
|
self.stats['allocated_capacity_gb'] -= volume_ref['size']
|
||||||
self.publish_service_capabilities(context)
|
self.publish_service_capabilities(context)
|
||||||
|
|
||||||
return True
|
return True
|
||||||
@ -925,8 +934,9 @@ class VolumeManager(manager.SchedulerDependentManager):
|
|||||||
else:
|
else:
|
||||||
volume_stats = self.driver.get_volume_stats(refresh=True)
|
volume_stats = self.driver.get_volume_stats(refresh=True)
|
||||||
if volume_stats:
|
if volume_stats:
|
||||||
# This will grab info about the host and queue it
|
# Append volume stats with 'allocated_capacity_gb'
|
||||||
# to be sent to the Schedulers.
|
volume_stats.update(self.stats)
|
||||||
|
# queue it to be sent to the Schedulers.
|
||||||
self.update_service_capabilities(volume_stats)
|
self.update_service_capabilities(volume_stats)
|
||||||
|
|
||||||
def publish_service_capabilities(self, context):
|
def publish_service_capabilities(self, context):
|
||||||
@ -1001,6 +1011,7 @@ class VolumeManager(manager.SchedulerDependentManager):
|
|||||||
QUOTAS.commit(context, reservations)
|
QUOTAS.commit(context, reservations)
|
||||||
self.db.volume_update(context, volume['id'], {'size': int(new_size),
|
self.db.volume_update(context, volume['id'], {'size': int(new_size),
|
||||||
'status': 'available'})
|
'status': 'available'})
|
||||||
|
self.stats['allocated_capacity_gb'] += size_increase
|
||||||
self._notify_about_volume_usage(
|
self._notify_about_volume_usage(
|
||||||
context, volume, "resize.end",
|
context, volume, "resize.end",
|
||||||
extra_usage_info={'size': int(new_size)})
|
extra_usage_info={'size': int(new_size)})
|
||||||
|
@ -996,6 +996,10 @@
|
|||||||
# numbers mean to stack vs spread. (floating point value)
|
# numbers mean to stack vs spread. (floating point value)
|
||||||
#capacity_weight_multiplier=1.0
|
#capacity_weight_multiplier=1.0
|
||||||
|
|
||||||
|
# Multiplier used for weighing volume capacity. Negative
|
||||||
|
# numbers mean to stack vs spread. (floating point value)
|
||||||
|
#allocated_capacity_weight_multiplier=-1.0
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Options defined in cinder.transfer.api
|
# Options defined in cinder.transfer.api
|
||||||
|
@ -46,6 +46,7 @@ cinder.scheduler.filters =
|
|||||||
JsonFilter = cinder.openstack.common.scheduler.filters.json_filter:JsonFilter
|
JsonFilter = cinder.openstack.common.scheduler.filters.json_filter:JsonFilter
|
||||||
RetryFilter = cinder.scheduler.filters.retry_filter:RetryFilter
|
RetryFilter = cinder.scheduler.filters.retry_filter:RetryFilter
|
||||||
cinder.scheduler.weights =
|
cinder.scheduler.weights =
|
||||||
|
AllocatedCapacityWeigher = cinder.scheduler.weights.capacity:AllocatedCapacityWeigher
|
||||||
CapacityWeigher = cinder.scheduler.weights.capacity:CapacityWeigher
|
CapacityWeigher = cinder.scheduler.weights.capacity:CapacityWeigher
|
||||||
ChanceWeigher = cinder.scheduler.weights.chance:ChanceWeigher
|
ChanceWeigher = cinder.scheduler.weights.chance:ChanceWeigher
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user