Added compute node stats to HostState
Added compute node statistics to the HostState class for use by scheduler filters. These are statistics pre-computed on the compute side of the house and are of general interest for filter authors. Change-Id: I6d979b1a9f26b8390c20b4ebbde3c9cb8f000396
This commit is contained in:
@@ -174,7 +174,11 @@ class FilterScheduler(driver.Scheduler):
|
||||
"""Stuff things into filter_properties. Can be overridden in a
|
||||
subclass to add more data.
|
||||
"""
|
||||
pass
|
||||
# Save useful information from the request spec for filter processing:
|
||||
project_id = request_spec['instance_properties']['project_id']
|
||||
os_type = request_spec['instance_properties']['os_type']
|
||||
filter_properties['project_id'] = project_id
|
||||
filter_properties['os_type'] = os_type
|
||||
|
||||
def _max_attempts(self):
|
||||
max_attempts = FLAGS.scheduler_max_attempts
|
||||
|
||||
@@ -19,6 +19,8 @@ Manage hosts in the current zone.
|
||||
|
||||
import UserDict
|
||||
|
||||
from nova.compute import task_states
|
||||
from nova.compute import vm_states
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
@@ -111,6 +113,19 @@ class HostState(object):
|
||||
self.free_disk_mb = 0
|
||||
self.vcpus_total = 0
|
||||
self.vcpus_used = 0
|
||||
# Valid vm types on this host: 'pv', 'hvm' or 'all'
|
||||
if 'allowed_vm_type' in self.capabilities:
|
||||
self.allowed_vm_type = self.capabilities['allowed_vm_type']
|
||||
else:
|
||||
self.allowed_vm_type = 'all'
|
||||
|
||||
# Additional host information from the compute node stats:
|
||||
self.vm_states = {}
|
||||
self.task_states = {}
|
||||
self.num_instances = 0
|
||||
self.num_instances_by_project = {}
|
||||
self.num_instances_by_os_type = {}
|
||||
self.num_io_ops = 0
|
||||
|
||||
# Resource oversubscription values for the compute host:
|
||||
self.limits = {}
|
||||
@@ -134,6 +149,40 @@ class HostState(object):
|
||||
self.vcpus_total = compute['vcpus']
|
||||
self.vcpus_used = compute['vcpus_used']
|
||||
|
||||
stats = compute.get('stats', [])
|
||||
statmap = self._statmap(stats)
|
||||
|
||||
# Track number of instances on host
|
||||
self.num_instances = int(statmap.get('num_instances', 0))
|
||||
|
||||
# Track number of instances by project_id
|
||||
project_id_keys = [k for k in statmap.keys() if
|
||||
k.startswith("num_proj_")]
|
||||
for key in project_id_keys:
|
||||
project_id = key[9:]
|
||||
self.num_instances_by_project[project_id] = int(statmap[key])
|
||||
|
||||
# Track number of instances in certain vm_states
|
||||
vm_state_keys = [k for k in statmap.keys() if k.startswith("num_vm_")]
|
||||
for key in vm_state_keys:
|
||||
vm_state = key[7:]
|
||||
self.vm_states[vm_state] = int(statmap[key])
|
||||
|
||||
# Track number of instances in certain task_states
|
||||
task_state_keys = [k for k in statmap.keys() if
|
||||
k.startswith("num_task_")]
|
||||
for key in task_state_keys:
|
||||
task_state = key[9:]
|
||||
self.task_states[task_state] = int(statmap[key])
|
||||
|
||||
# Track number of instances by host_type
|
||||
os_keys = [k for k in statmap.keys() if k.startswith("num_os_type_")]
|
||||
for key in os_keys:
|
||||
os = key[12:]
|
||||
self.num_instances_by_os_type[os] = int(statmap[key])
|
||||
|
||||
self.num_io_ops = int(statmap.get('io_workload', 0))
|
||||
|
||||
def consume_from_instance(self, instance):
|
||||
"""Incrementally update host state from an instance"""
|
||||
disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024
|
||||
@@ -143,6 +192,44 @@ class HostState(object):
|
||||
self.free_disk_mb -= disk_mb
|
||||
self.vcpus_used += vcpus
|
||||
|
||||
# Track number of instances on host
|
||||
self.num_instances += 1
|
||||
|
||||
# Track number of instances by project_id
|
||||
project_id = instance.get('project_id')
|
||||
if project_id not in self.num_instances_by_project:
|
||||
self.num_instances_by_project[project_id] = 0
|
||||
self.num_instances_by_project[project_id] += 1
|
||||
|
||||
# Track number of instances in certain vm_states
|
||||
vm_state = instance.get('vm_state', vm_states.BUILDING)
|
||||
if vm_state not in self.vm_states:
|
||||
self.vm_states[vm_state] = 0
|
||||
self.vm_states[vm_state] += 1
|
||||
|
||||
# Track number of instances in certain task_states
|
||||
task_state = instance.get('task_state')
|
||||
if task_state not in self.task_states:
|
||||
self.task_states[task_state] = 0
|
||||
self.task_states[task_state] += 1
|
||||
|
||||
# Track number of instances by host_type
|
||||
os_type = instance.get('os_type')
|
||||
if os_type not in self.num_instances_by_os_type:
|
||||
self.num_instances_by_os_type[os_type] = 0
|
||||
self.num_instances_by_os_type[os_type] += 1
|
||||
|
||||
vm_state = instance.get('vm_state', vm_states.BUILDING)
|
||||
task_state = instance.get('task_state')
|
||||
if vm_state == vm_states.BUILDING or task_state in [
|
||||
task_states.RESIZE_MIGRATING, task_states.REBUILDING,
|
||||
task_states.RESIZE_PREP, task_states.IMAGE_SNAPSHOT,
|
||||
task_states.IMAGE_BACKUP]:
|
||||
self.num_io_ops += 1
|
||||
|
||||
def _statmap(self, stats):
|
||||
return dict((st['key'], st['value']) for st in stats)
|
||||
|
||||
def passes_filters(self, filter_fns, filter_properties):
|
||||
"""Return whether or not this host passes filters."""
|
||||
|
||||
@@ -170,8 +257,9 @@ class HostState(object):
|
||||
return True
|
||||
|
||||
def __repr__(self):
|
||||
return ("host '%s': free_ram_mb:%s free_disk_mb:%s" %
|
||||
(self.host, self.free_ram_mb, self.free_disk_mb))
|
||||
return ("%s ram:%s disk:%s io_ops:%s instances:%s vm_type:%s" %
|
||||
(self.host, self.free_ram_mb, self.free_disk_mb,
|
||||
self.num_io_ops, self.num_instances, self.allowed_vm_type))
|
||||
|
||||
|
||||
class HostManager(object):
|
||||
|
||||
@@ -49,9 +49,10 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
|
||||
uuid = 'fake-uuid1'
|
||||
fake_context = context.RequestContext('user', 'project')
|
||||
instance_properties = {'project_id': 1, 'os_type': 'Linux'}
|
||||
request_spec = {'instance_type': {'memory_mb': 1, 'root_gb': 1,
|
||||
'ephemeral_gb': 0},
|
||||
'instance_properties': {'project_id': 1},
|
||||
'instance_properties': instance_properties,
|
||||
'instance_uuids': [uuid]}
|
||||
|
||||
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
|
||||
@@ -80,8 +81,9 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
fake_context = context.RequestContext('user', 'project')
|
||||
|
||||
uuid = 'fake-uuid1'
|
||||
instance_properties = {'project_id': 1, 'os_type': 'Linux'}
|
||||
request_spec = {'instance_type': {'memory_mb': 1, 'local_gb': 1},
|
||||
'instance_properties': {'project_id': 1},
|
||||
'instance_properties': instance_properties,
|
||||
'instance_uuids': [uuid]}
|
||||
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
|
||||
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
|
||||
@@ -180,7 +182,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
'root_gb': 512,
|
||||
'memory_mb': 512,
|
||||
'ephemeral_gb': 0,
|
||||
'vcpus': 1}}
|
||||
'vcpus': 1,
|
||||
'os_type': 'Linux'}}
|
||||
self.mox.ReplayAll()
|
||||
weighted_hosts = sched._schedule(fake_context, 'compute',
|
||||
request_spec, {})
|
||||
@@ -245,7 +248,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
self.flags(scheduler_max_attempts=1)
|
||||
sched = fakes.FakeFilterScheduler()
|
||||
|
||||
instance_properties = {}
|
||||
instance_properties = {'project_id': '12345', 'os_type': 'Linux'}
|
||||
request_spec = dict(instance_properties=instance_properties)
|
||||
filter_properties = {}
|
||||
|
||||
@@ -260,7 +263,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
self.flags(scheduler_max_attempts=2)
|
||||
sched = fakes.FakeFilterScheduler()
|
||||
|
||||
instance_properties = {}
|
||||
instance_properties = {'project_id': '12345', 'os_type': 'Linux'}
|
||||
request_spec = dict(instance_properties=instance_properties)
|
||||
filter_properties = {}
|
||||
|
||||
@@ -275,7 +278,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
self.flags(scheduler_max_attempts=2)
|
||||
sched = fakes.FakeFilterScheduler()
|
||||
|
||||
instance_properties = {}
|
||||
instance_properties = {'project_id': '12345', 'os_type': 'Linux'}
|
||||
request_spec = dict(instance_properties=instance_properties)
|
||||
|
||||
retry = dict(num_attempts=1)
|
||||
@@ -292,7 +295,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
self.flags(scheduler_max_attempts=2)
|
||||
sched = fakes.FakeFilterScheduler()
|
||||
|
||||
instance_properties = {}
|
||||
instance_properties = {'project_id': '12345', 'os_type': 'Linux'}
|
||||
request_spec = dict(instance_properties=instance_properties)
|
||||
|
||||
retry = dict(num_attempts=2)
|
||||
|
||||
@@ -17,6 +17,8 @@ Tests For HostManager
|
||||
"""
|
||||
|
||||
|
||||
from nova.compute import task_states
|
||||
from nova.compute import vm_states
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova.openstack.common import timeutils
|
||||
@@ -67,7 +69,7 @@ class HostManagerTestCase(test.TestCase):
|
||||
fake_host1 = host_manager.HostState('host1', topic)
|
||||
fake_host2 = host_manager.HostState('host2', topic)
|
||||
hosts = [fake_host1, fake_host2]
|
||||
filter_properties = 'fake_properties'
|
||||
filter_properties = {'fake_prop': 'fake_val'}
|
||||
|
||||
self.mox.StubOutWithMock(self.host_manager,
|
||||
'_choose_host_filters')
|
||||
@@ -247,3 +249,55 @@ class HostStateTestCase(test.TestCase):
|
||||
self.mox.ReplayAll()
|
||||
result = fake_host.passes_filters(filter_fns, filter_properties)
|
||||
self.assertTrue(result)
|
||||
|
||||
def test_stat_consumption_from_compute_node(self):
|
||||
stats = [
|
||||
dict(key='num_instances', value='5'),
|
||||
dict(key='num_proj_12345', value='3'),
|
||||
dict(key='num_proj_23456', value='1'),
|
||||
dict(key='num_vm_%s' % vm_states.BUILDING, value='2'),
|
||||
dict(key='num_vm_%s' % vm_states.SUSPENDED, value='1'),
|
||||
dict(key='num_task_%s' % task_states.RESIZE_MIGRATING, value='1'),
|
||||
dict(key='num_task_%s' % task_states.MIGRATING, value='2'),
|
||||
dict(key='num_os_type_linux', value='4'),
|
||||
dict(key='num_os_type_windoze', value='1'),
|
||||
dict(key='io_workload', value='42'),
|
||||
]
|
||||
compute = dict(stats=stats, memory_mb=0, free_disk_gb=0, local_gb=0,
|
||||
local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0)
|
||||
|
||||
host = host_manager.HostState("fakehost", "faketopic")
|
||||
host.update_from_compute_node(compute)
|
||||
|
||||
self.assertEqual(5, host.num_instances)
|
||||
self.assertEqual(3, host.num_instances_by_project['12345'])
|
||||
self.assertEqual(1, host.num_instances_by_project['23456'])
|
||||
self.assertEqual(2, host.vm_states[vm_states.BUILDING])
|
||||
self.assertEqual(1, host.vm_states[vm_states.SUSPENDED])
|
||||
self.assertEqual(1, host.task_states[task_states.RESIZE_MIGRATING])
|
||||
self.assertEqual(2, host.task_states[task_states.MIGRATING])
|
||||
self.assertEqual(4, host.num_instances_by_os_type['linux'])
|
||||
self.assertEqual(1, host.num_instances_by_os_type['windoze'])
|
||||
self.assertEqual(42, host.num_io_ops)
|
||||
|
||||
def test_stat_consumption_from_instance(self):
|
||||
host = host_manager.HostState("fakehost", "faketopic")
|
||||
|
||||
instance = dict(root_gb=0, ephemeral_gb=0, memory_mb=0, vcpus=0,
|
||||
project_id='12345', vm_state=vm_states.BUILDING,
|
||||
task_state=task_states.SCHEDULING, os_type='Linux')
|
||||
host.consume_from_instance(instance)
|
||||
|
||||
instance = dict(root_gb=0, ephemeral_gb=0, memory_mb=0, vcpus=0,
|
||||
project_id='12345', vm_state=vm_states.PAUSED,
|
||||
task_state=None, os_type='Linux')
|
||||
host.consume_from_instance(instance)
|
||||
|
||||
self.assertEqual(2, host.num_instances)
|
||||
self.assertEqual(2, host.num_instances_by_project['12345'])
|
||||
self.assertEqual(1, host.vm_states[vm_states.BUILDING])
|
||||
self.assertEqual(1, host.vm_states[vm_states.PAUSED])
|
||||
self.assertEqual(1, host.task_states[task_states.SCHEDULING])
|
||||
self.assertEqual(1, host.task_states[None])
|
||||
self.assertEqual(2, host.num_instances_by_os_type['Linux'])
|
||||
self.assertEqual(1, host.num_io_ops)
|
||||
|
||||
Reference in New Issue
Block a user