Reduce config access in scheduler
Caching the configuration appears to make a impact on the time spent in the scheduler. These config values reduce the time spent down from around 5ms to around 4ms per request, when with 200 hosts. Change-Id: I433cbbdbef4ac3c01a3e2b6ed5cf092a5cce6372
This commit is contained in:
parent
93f29574c4
commit
956b37dcf0
|
@ -67,9 +67,10 @@ class BaseRamFilter(filters.BaseHostFilter):
|
|||
|
||||
class RamFilter(BaseRamFilter):
|
||||
"""Ram Filter with over subscription flag."""
|
||||
ram_allocation_ratio = CONF.ram_allocation_ratio
|
||||
|
||||
def _get_ram_allocation_ratio(self, host_state, filter_properties):
|
||||
return CONF.ram_allocation_ratio
|
||||
return self.ram_allocation_ratio
|
||||
|
||||
|
||||
class AggregateRamFilter(BaseRamFilter):
|
||||
|
|
|
@ -35,6 +35,7 @@ class DbDriver(api.ServiceGroupDriver):
|
|||
def __init__(self, *args, **kwargs):
|
||||
self.db_allowed = kwargs.get('db_allowed', True)
|
||||
self.conductor_api = conductor.API(use_local=self.db_allowed)
|
||||
self.service_down_time = CONF.service_down_time
|
||||
|
||||
def join(self, member_id, group_id, service=None):
|
||||
"""Join the given service with it's group."""
|
||||
|
@ -67,7 +68,7 @@ class DbDriver(api.ServiceGroupDriver):
|
|||
last_heartbeat = last_heartbeat.replace(tzinfo=None)
|
||||
# Timestamps in DB are UTC.
|
||||
elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow())
|
||||
is_up = abs(elapsed) <= CONF.service_down_time
|
||||
is_up = abs(elapsed) <= self.service_down_time
|
||||
if not is_up:
|
||||
msg = _('Seems service is down. Last heartbeat was %(lhb)s. '
|
||||
'Elapsed time is %(el)s')
|
||||
|
|
|
@ -27,6 +27,7 @@ from nova.openstack.common import timeutils
|
|||
from nova.pci import pci_stats
|
||||
from nova.scheduler import filters
|
||||
from nova.scheduler.filters import extra_specs_ops
|
||||
from nova.scheduler.filters import ram_filter
|
||||
from nova.scheduler.filters import trusted_filter
|
||||
from nova import servicegroup
|
||||
from nova import test
|
||||
|
@ -499,7 +500,7 @@ class HostFiltersTestCase(test.NoDBTestCase):
|
|||
def test_ram_filter_fails_on_memory(self):
|
||||
self._stub_service_is_up(True)
|
||||
filt_cls = self.class_map['RamFilter']()
|
||||
self.flags(ram_allocation_ratio=1.0)
|
||||
ram_filter.RamFilter.ram_allocation_ratio = 1.0
|
||||
filter_properties = {'instance_type': {'memory_mb': 1024}}
|
||||
service = {'disabled': False}
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
|
@ -510,7 +511,7 @@ class HostFiltersTestCase(test.NoDBTestCase):
|
|||
def test_ram_filter_passes(self):
|
||||
self._stub_service_is_up(True)
|
||||
filt_cls = self.class_map['RamFilter']()
|
||||
self.flags(ram_allocation_ratio=1.0)
|
||||
ram_filter.RamFilter.ram_allocation_ratio = 1.0
|
||||
filter_properties = {'instance_type': {'memory_mb': 1024}}
|
||||
service = {'disabled': False}
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
|
@ -521,7 +522,7 @@ class HostFiltersTestCase(test.NoDBTestCase):
|
|||
def test_ram_filter_oversubscribe(self):
|
||||
self._stub_service_is_up(True)
|
||||
filt_cls = self.class_map['RamFilter']()
|
||||
self.flags(ram_allocation_ratio=2.0)
|
||||
ram_filter.RamFilter.ram_allocation_ratio = 2.0
|
||||
filter_properties = {'instance_type': {'memory_mb': 1024}}
|
||||
service = {'disabled': False}
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
|
|
Loading…
Reference in New Issue