Updated scheduler and compute for multiple capabilities.
Part 1 of 6: blueprint general-bare-metal-provisioning-framework. This patch includes updates on scheduler and compute codes for multiple capabilities. This feature is needed in bare-metal provisioning which is implemented in later patches --- a bare-metal nova-compute manages multiple bare-metal nodes where instances are provisioned. Nova DB's compute_nodes entry needs to be created for each bare-metal node, and a scheduler can choose an appropriate bare-metal node to provision an instance. With this patch, one service entry with multiple compute_node entries can be registered by nova-compute. Distinct 'node name' is given for each node and is stored at compute_node['hypervisor_hostname']. And we added a new column "node" to "instances" table in Nova DB to associate instances with compute_node. FilterScheduler puts <nodename> to the column when it provisions the instance. And nova-computes respect <nodename> when run/stop instances and when calculate resources. Also, 'capability’ is extended from a dictionary to a list of dictionaries to describe the multiple capabilities of the multiple nodes. Change-Id: I527febe4dbd887b2e6596ce7226c1ae3386e2ae6 Co-authored-by: Mikyung Kang <mkkang@isi.edu> Co-authored-by: David Kang <dkang@isi.edu> Co-authored-by: Ken Igarashi <igarashik@nttdocomo.co.jp> Co-authored-by: Arata Notsu <notsu@virtualtech.jp>
This commit is contained in:
parent
1ce7e23fa7
commit
e12e31b10b
@ -240,6 +240,8 @@ class SchedulerDependentManager(Manager):
|
||||
|
||||
def update_service_capabilities(self, capabilities):
|
||||
"""Remember these capabilities to send on next periodic update."""
|
||||
if not isinstance(capabilities, list):
|
||||
capabilities = [capabilities]
|
||||
self.last_capabilities = capabilities
|
||||
|
||||
@periodic_task
|
||||
@ -251,5 +253,8 @@ class SchedulerDependentManager(Manager):
|
||||
"""
|
||||
if self.last_capabilities:
|
||||
LOG.debug(_('Notifying Schedulers of capabilities ...'))
|
||||
for capability_item in self.last_capabilities:
|
||||
self.scheduler_rpcapi.update_service_capabilities(context,
|
||||
self.service_name, self.host, self.last_capabilities)
|
||||
self.service_name, self.host, capability_item)
|
||||
# TODO(NTTdocomo): Make update_service_capabilities() accept a list
|
||||
# of capabilities
|
||||
|
@ -99,6 +99,15 @@ def instance_update_db(context, instance_uuid):
|
||||
return db.instance_update(context, instance_uuid, values)
|
||||
|
||||
|
||||
def db_instance_node_set(context, instance_uuid, node):
|
||||
'''Set the node field of an Instance.
|
||||
|
||||
:returns: An Instance with the updated fields set properly.
|
||||
'''
|
||||
values = {'node': node}
|
||||
return db.instance_update(context, instance_uuid, values)
|
||||
|
||||
|
||||
def cast_to_compute_host(context, host, method, **kwargs):
|
||||
"""Cast request to a compute host queue"""
|
||||
|
||||
|
@ -134,7 +134,11 @@ class FilterScheduler(driver.Scheduler):
|
||||
'scheduler.run_instance.scheduled', notifier.INFO,
|
||||
payload)
|
||||
|
||||
updated_instance = driver.instance_update_db(context, instance_uuid)
|
||||
# TODO(NTTdocomo): Combine the next two updates into one
|
||||
driver.db_instance_node_set(context,
|
||||
instance_uuid, weighted_host.host_state.nodename)
|
||||
updated_instance = driver.instance_update_db(context,
|
||||
instance_uuid)
|
||||
|
||||
self._post_select_populate_filter_properties(filter_properties,
|
||||
weighted_host.host_state)
|
||||
|
@ -93,8 +93,9 @@ class HostState(object):
|
||||
previously used and lock down access.
|
||||
"""
|
||||
|
||||
def __init__(self, host, capabilities=None, service=None):
|
||||
def __init__(self, host, node, capabilities=None, service=None):
|
||||
self.host = host
|
||||
self.nodename = node
|
||||
self.update_capabilities(capabilities, service)
|
||||
|
||||
# Mutable available resources.
|
||||
@ -265,8 +266,8 @@ class HostState(object):
|
||||
return True
|
||||
|
||||
def __repr__(self):
|
||||
return ("%s ram:%s disk:%s io_ops:%s instances:%s vm_type:%s" %
|
||||
(self.host, self.free_ram_mb, self.free_disk_mb,
|
||||
return ("(%s, %s) ram:%s disk:%s io_ops:%s instances:%s vm_type:%s" %
|
||||
(self.host, self.nodename, self.free_ram_mb, self.free_disk_mb,
|
||||
self.num_io_ops, self.num_instances, self.allowed_vm_type))
|
||||
|
||||
|
||||
@ -277,7 +278,8 @@ class HostManager(object):
|
||||
host_state_cls = HostState
|
||||
|
||||
def __init__(self):
|
||||
self.service_states = {} # { <host> : { <service> : { cap k : v }}}
|
||||
# { (host, hypervisor_hostname) : { <service> : { cap k : v }}}
|
||||
self.service_states = {}
|
||||
self.host_state_map = {}
|
||||
self.filter_classes = filters.get_filter_classes(
|
||||
CONF.scheduler_available_filters)
|
||||
@ -330,12 +332,13 @@ class HostManager(object):
|
||||
'from %(host)s'), locals())
|
||||
return
|
||||
|
||||
state_key = (host, capabilities.get('hypervisor_hostname'))
|
||||
LOG.debug(_("Received %(service_name)s service update from "
|
||||
"%(host)s."), locals())
|
||||
"%(state_key)s.") % locals())
|
||||
# Copy the capabilities, so we don't modify the original dict
|
||||
capab_copy = dict(capabilities)
|
||||
capab_copy["timestamp"] = timeutils.utcnow() # Reported time
|
||||
self.service_states[host] = capab_copy
|
||||
self.service_states[state_key] = capab_copy
|
||||
|
||||
def get_all_host_states(self, context):
|
||||
"""Returns a list of HostStates that represents all the hosts
|
||||
@ -351,16 +354,18 @@ class HostManager(object):
|
||||
LOG.warn(_("No service for compute ID %s") % compute['id'])
|
||||
continue
|
||||
host = service['host']
|
||||
capabilities = self.service_states.get(host, None)
|
||||
host_state = self.host_state_map.get(host)
|
||||
node = compute.get('hypervisor_hostname')
|
||||
state_key = (host, node)
|
||||
capabilities = self.service_states.get(state_key, None)
|
||||
host_state = self.host_state_map.get(state_key)
|
||||
if host_state:
|
||||
host_state.update_capabilities(capabilities,
|
||||
dict(service.iteritems()))
|
||||
else:
|
||||
host_state = self.host_state_cls(host,
|
||||
host_state = self.host_state_cls(host, node,
|
||||
capabilities=capabilities,
|
||||
service=dict(service.iteritems()))
|
||||
self.host_state_map[host] = host_state
|
||||
self.host_state_map[state_key] = host_state
|
||||
host_state.update_from_compute_node(compute)
|
||||
|
||||
return self.host_state_map.itervalues()
|
||||
|
@ -29,38 +29,42 @@ COMPUTE_NODES = [
|
||||
dict(id=1, local_gb=1024, memory_mb=1024, vcpus=1,
|
||||
disk_available_least=512, free_ram_mb=512, vcpus_used=1,
|
||||
free_disk_mb=512, local_gb_used=0, updated_at=None,
|
||||
service=dict(host='host1', disabled=False)),
|
||||
service=dict(host='host1', disabled=False),
|
||||
hypervisor_hostname='node1'),
|
||||
dict(id=2, local_gb=2048, memory_mb=2048, vcpus=2,
|
||||
disk_available_least=1024, free_ram_mb=1024, vcpus_used=2,
|
||||
free_disk_mb=1024, local_gb_used=0, updated_at=None,
|
||||
service=dict(host='host2', disabled=True)),
|
||||
service=dict(host='host2', disabled=True),
|
||||
hypervisor_hostname='node2'),
|
||||
dict(id=3, local_gb=4096, memory_mb=4096, vcpus=4,
|
||||
disk_available_least=3072, free_ram_mb=3072, vcpus_used=1,
|
||||
free_disk_mb=3072, local_gb_used=0, updated_at=None,
|
||||
service=dict(host='host3', disabled=False)),
|
||||
service=dict(host='host3', disabled=False),
|
||||
hypervisor_hostname='node3'),
|
||||
dict(id=4, local_gb=8192, memory_mb=8192, vcpus=8,
|
||||
disk_available_least=8192, free_ram_mb=8192, vcpus_used=0,
|
||||
free_disk_mb=8192, local_gb_used=0, updated_at=None,
|
||||
service=dict(host='host4', disabled=False)),
|
||||
service=dict(host='host4', disabled=False),
|
||||
hypervisor_hostname='node4'),
|
||||
# Broken entry
|
||||
dict(id=5, local_gb=1024, memory_mb=1024, vcpus=1, service=None),
|
||||
]
|
||||
|
||||
INSTANCES = [
|
||||
dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
|
||||
host='host1'),
|
||||
host='host1', node='node1'),
|
||||
dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
|
||||
host='host2'),
|
||||
host='host2', node='node2'),
|
||||
dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
|
||||
host='host2'),
|
||||
host='host2', node='node2'),
|
||||
dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
|
||||
host='host3'),
|
||||
host='host3', node='node3'),
|
||||
# Broken host
|
||||
dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
|
||||
host=None),
|
||||
# No matching host
|
||||
dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
|
||||
host='host5'),
|
||||
host='host5', node='node5'),
|
||||
]
|
||||
|
||||
|
||||
@ -96,8 +100,8 @@ class FakeHostManager(host_manager.HostManager):
|
||||
|
||||
|
||||
class FakeHostState(host_manager.HostState):
|
||||
def __init__(self, host, attribute_dict):
|
||||
super(FakeHostState, self).__init__(host)
|
||||
def __init__(self, host, node, attribute_dict):
|
||||
super(FakeHostState, self).__init__(host, node)
|
||||
for (key, val) in attribute_dict.iteritems():
|
||||
setattr(self, key, val)
|
||||
|
||||
|
@ -183,7 +183,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
sched = fakes.FakeFilterScheduler()
|
||||
|
||||
def _return_hosts(*args, **kwargs):
|
||||
host_state = host_manager.HostState('host2')
|
||||
host_state = host_manager.HostState('host2', 'node2')
|
||||
return [least_cost.WeightedHost(1.0, host_state=host_state)]
|
||||
|
||||
self.stubs.Set(sched, '_schedule', _return_hosts)
|
||||
@ -209,7 +209,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
self.assertEquals(len(fns), 1)
|
||||
weight, fn = fns[0]
|
||||
self.assertEquals(weight, -1.0)
|
||||
hostinfo = host_manager.HostState('host')
|
||||
hostinfo = host_manager.HostState('host', 'node')
|
||||
hostinfo.update_from_compute_node(dict(memory_mb=1000,
|
||||
local_gb=0, vcpus=1, disk_available_least=1000,
|
||||
free_disk_mb=1000, free_ram_mb=872, vcpus_used=0,
|
||||
@ -307,7 +307,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
filter_properties = {'retry': retry}
|
||||
sched = fakes.FakeFilterScheduler()
|
||||
|
||||
host_state = host_manager.HostState('host')
|
||||
host_state = host_manager.HostState('host', 'node')
|
||||
host_state.limits['vcpus'] = 5
|
||||
sched._post_select_populate_filter_properties(filter_properties,
|
||||
host_state)
|
||||
@ -331,7 +331,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
filter_properties = {'retry': retry}
|
||||
reservations = None
|
||||
|
||||
host = fakes.FakeHostState('host', {})
|
||||
host = fakes.FakeHostState('host', 'node', {})
|
||||
weighted_host = least_cost.WeightedHost(1, host)
|
||||
hosts = [weighted_host]
|
||||
|
||||
|
@ -291,7 +291,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
|
||||
def test_all_host_filter(self):
|
||||
filt_cls = self.class_map['AllHostsFilter']()
|
||||
host = fakes.FakeHostState('host1', {})
|
||||
host = fakes.FakeHostState('host1', 'node1', {})
|
||||
self.assertTrue(filt_cls.host_passes(host, {}))
|
||||
|
||||
def _stub_service_is_up(self, ret_value):
|
||||
@ -301,7 +301,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
|
||||
def test_affinity_different_filter_passes(self):
|
||||
filt_cls = self.class_map['DifferentHostFilter']()
|
||||
host = fakes.FakeHostState('host1', {})
|
||||
host = fakes.FakeHostState('host1', 'node1', {})
|
||||
instance = fakes.FakeInstance(context=self.context,
|
||||
params={'host': 'host2'})
|
||||
instance_uuid = instance.uuid
|
||||
@ -314,7 +314,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
|
||||
def test_affinity_different_filter_no_list_passes(self):
|
||||
filt_cls = self.class_map['DifferentHostFilter']()
|
||||
host = fakes.FakeHostState('host1', {})
|
||||
host = fakes.FakeHostState('host1', 'node1', {})
|
||||
instance = fakes.FakeInstance(context=self.context,
|
||||
params={'host': 'host2'})
|
||||
instance_uuid = instance.uuid
|
||||
@ -327,7 +327,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
|
||||
def test_affinity_different_filter_fails(self):
|
||||
filt_cls = self.class_map['DifferentHostFilter']()
|
||||
host = fakes.FakeHostState('host1', {})
|
||||
host = fakes.FakeHostState('host1', 'node1', {})
|
||||
instance = fakes.FakeInstance(context=self.context,
|
||||
params={'host': 'host1'})
|
||||
instance_uuid = instance.uuid
|
||||
@ -340,7 +340,11 @@ class HostFiltersTestCase(test.TestCase):
|
||||
|
||||
def test_affinity_different_filter_handles_none(self):
|
||||
filt_cls = self.class_map['DifferentHostFilter']()
|
||||
host = fakes.FakeHostState('host1', {})
|
||||
host = fakes.FakeHostState('host1', 'node1', {})
|
||||
instance = fakes.FakeInstance(context=self.context,
|
||||
params={'host': 'host2'})
|
||||
instance_uuid = instance.uuid
|
||||
|
||||
filter_properties = {'context': self.context.elevated(),
|
||||
'scheduler_hints': None}
|
||||
|
||||
@ -348,7 +352,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
|
||||
def test_affinity_same_filter_no_list_passes(self):
|
||||
filt_cls = self.class_map['SameHostFilter']()
|
||||
host = fakes.FakeHostState('host1', {})
|
||||
host = fakes.FakeHostState('host1', 'node1', {})
|
||||
instance = fakes.FakeInstance(context=self.context,
|
||||
params={'host': 'host1'})
|
||||
instance_uuid = instance.uuid
|
||||
@ -361,7 +365,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
|
||||
def test_affinity_same_filter_passes(self):
|
||||
filt_cls = self.class_map['SameHostFilter']()
|
||||
host = fakes.FakeHostState('host1', {})
|
||||
host = fakes.FakeHostState('host1', 'node1', {})
|
||||
instance = fakes.FakeInstance(context=self.context,
|
||||
params={'host': 'host1'})
|
||||
instance_uuid = instance.uuid
|
||||
@ -374,7 +378,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
|
||||
def test_affinity_same_filter_fails(self):
|
||||
filt_cls = self.class_map['SameHostFilter']()
|
||||
host = fakes.FakeHostState('host1', {})
|
||||
host = fakes.FakeHostState('host1', 'node1', {})
|
||||
instance = fakes.FakeInstance(context=self.context,
|
||||
params={'host': 'host2'})
|
||||
instance_uuid = instance.uuid
|
||||
@ -387,7 +391,11 @@ class HostFiltersTestCase(test.TestCase):
|
||||
|
||||
def test_affinity_same_filter_handles_none(self):
|
||||
filt_cls = self.class_map['SameHostFilter']()
|
||||
host = fakes.FakeHostState('host1', {})
|
||||
host = fakes.FakeHostState('host1', 'node1', {})
|
||||
instance = fakes.FakeInstance(context=self.context,
|
||||
params={'host': 'host2'})
|
||||
instance_uuid = instance.uuid
|
||||
|
||||
filter_properties = {'context': self.context.elevated(),
|
||||
'scheduler_hints': None}
|
||||
|
||||
@ -395,7 +403,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
|
||||
def test_affinity_simple_cidr_filter_passes(self):
|
||||
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
|
||||
host = fakes.FakeHostState('host1', {})
|
||||
host = fakes.FakeHostState('host1', 'node1', {})
|
||||
host.capabilities = {'host_ip': '10.8.1.1'}
|
||||
|
||||
affinity_ip = "10.8.1.100"
|
||||
@ -409,7 +417,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
|
||||
def test_affinity_simple_cidr_filter_fails(self):
|
||||
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
|
||||
host = fakes.FakeHostState('host1', {})
|
||||
host = fakes.FakeHostState('host1', 'node1', {})
|
||||
host.capabilities = {'host_ip': '10.8.1.1'}
|
||||
|
||||
affinity_ip = "10.8.1.100"
|
||||
@ -423,7 +431,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
|
||||
def test_affinity_simple_cidr_filter_handles_none(self):
|
||||
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
|
||||
host = fakes.FakeHostState('host1', {})
|
||||
host = fakes.FakeHostState('host1', 'node1', {})
|
||||
|
||||
affinity_ip = flags.FLAGS.my_ip.split('.')[0:3]
|
||||
affinity_ip.append('100')
|
||||
@ -440,7 +448,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
filter_properties = {'instance_type': {'memory_mb': 1024}}
|
||||
capabilities = {'enabled': True}
|
||||
service = {'disabled': False}
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_ram_mb': 1024, 'capabilities': capabilities,
|
||||
'service': service})
|
||||
self.assertTrue(filt_cls.host_passes(host, filter_properties))
|
||||
@ -456,7 +464,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
|
||||
capabilities = {'enabled': True}
|
||||
service = {'disabled': False}
|
||||
host = fakes.FakeHostState('fake_host',
|
||||
host = fakes.FakeHostState('fake_host', 'fake_node',
|
||||
{'capabilities': capabilities,
|
||||
'service': service})
|
||||
#True since empty
|
||||
@ -482,7 +490,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
'instance_type': {'name': 'fake2'}}
|
||||
capabilities = {'enabled': True}
|
||||
service = {'disabled': False}
|
||||
host = fakes.FakeHostState('fake_host',
|
||||
host = fakes.FakeHostState('fake_host', 'fake_node',
|
||||
{'capabilities': capabilities,
|
||||
'service': service})
|
||||
#True since no aggregates
|
||||
@ -501,7 +509,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
filter_properties = {'instance_type': {'memory_mb': 1024}}
|
||||
capabilities = {'enabled': True}
|
||||
service = {'disabled': False}
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_ram_mb': 1023, 'total_usable_ram_mb': 1024,
|
||||
'capabilities': capabilities, 'service': service})
|
||||
self.assertFalse(filt_cls.host_passes(host, filter_properties))
|
||||
@ -513,7 +521,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
filter_properties = {'instance_type': {'memory_mb': 1024}}
|
||||
capabilities = {'enabled': True}
|
||||
service = {'disabled': False}
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_ram_mb': 1024, 'total_usable_ram_mb': 1024,
|
||||
'capabilities': capabilities, 'service': service})
|
||||
self.assertTrue(filt_cls.host_passes(host, filter_properties))
|
||||
@ -525,7 +533,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
filter_properties = {'instance_type': {'memory_mb': 1024}}
|
||||
capabilities = {'enabled': True}
|
||||
service = {'disabled': False}
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_ram_mb': -1024, 'total_usable_ram_mb': 2048,
|
||||
'capabilities': capabilities, 'service': service})
|
||||
self.assertTrue(filt_cls.host_passes(host, filter_properties))
|
||||
@ -539,7 +547,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
'ephemeral_gb': 1}}
|
||||
capabilities = {'enabled': True}
|
||||
service = {'disabled': False}
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13,
|
||||
'capabilities': capabilities, 'service': service})
|
||||
self.assertTrue(filt_cls.host_passes(host, filter_properties))
|
||||
@ -552,7 +560,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
'ephemeral_gb': 1}}
|
||||
capabilities = {'enabled': True}
|
||||
service = {'disabled': False}
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13,
|
||||
'capabilities': capabilities, 'service': service})
|
||||
self.assertFalse(filt_cls.host_passes(host, filter_properties))
|
||||
@ -566,7 +574,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
capabilities = {'enabled': True}
|
||||
service = {'disabled': False}
|
||||
# 1GB used... so 119GB allowed...
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12,
|
||||
'capabilities': capabilities, 'service': service})
|
||||
self.assertTrue(filt_cls.host_passes(host, filter_properties))
|
||||
@ -581,7 +589,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
capabilities = {'enabled': True}
|
||||
service = {'disabled': False}
|
||||
# 1GB used... so 119GB allowed...
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12,
|
||||
'capabilities': capabilities, 'service': service})
|
||||
self.assertFalse(filt_cls.host_passes(host, filter_properties))
|
||||
@ -592,7 +600,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
filter_properties = {'instance_type': {'memory_mb': 1024}}
|
||||
capabilities = {'enabled': True}
|
||||
service = {'disabled': True}
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_ram_mb': 1024, 'capabilities': capabilities,
|
||||
'service': service})
|
||||
self.assertFalse(filt_cls.host_passes(host, filter_properties))
|
||||
@ -603,7 +611,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
filter_properties = {'instance_type': {'memory_mb': 1024}}
|
||||
capabilities = {'enabled': True}
|
||||
service = {'disabled': False}
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_ram_mb': 1024, 'capabilities': capabilities,
|
||||
'service': service})
|
||||
self.assertFalse(filt_cls.host_passes(host, filter_properties))
|
||||
@ -614,7 +622,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
filter_properties = {'instance_type': {'memory_mb': 1024}}
|
||||
capabilities = {'enabled': False}
|
||||
service = {'disabled': False}
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_ram_mb': 1024, 'capabilities': capabilities,
|
||||
'service': service})
|
||||
self.assertFalse(filt_cls.host_passes(host, filter_properties))
|
||||
@ -625,7 +633,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
filter_properties = {}
|
||||
capabilities = {'enabled': False}
|
||||
service = {'disabled': False}
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_ram_mb': 1024, 'capabilities': capabilities,
|
||||
'service': service})
|
||||
self.assertTrue(filt_cls.host_passes(host, filter_properties))
|
||||
@ -640,7 +648,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
capabilities = {'enabled': True,
|
||||
'supported_instances': [
|
||||
('x86_64', 'kvm', 'hvm')]}
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'capabilities': capabilities})
|
||||
self.assertTrue(filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
@ -654,7 +662,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
capabilities = {'enabled': True,
|
||||
'supported_instances': [
|
||||
('x86_64', 'kvm', 'hvm')]}
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'capabilities': capabilities})
|
||||
self.assertFalse(filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
@ -667,7 +675,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
capabilities = {'enabled': True,
|
||||
'supported_instances': [
|
||||
('x86_64', 'kvm', 'hvm')]}
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'capabilities': capabilities})
|
||||
self.assertTrue(filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
@ -680,7 +688,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
capabilities = {'enabled': True,
|
||||
'supported_instances': [
|
||||
('x86_64', 'xen', 'xen')]}
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'capabilities': capabilities})
|
||||
self.assertFalse(filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
@ -691,7 +699,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
capabilities = {'enabled': True,
|
||||
'supported_instances': [
|
||||
('x86_64', 'kvm', 'hvm')]}
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'capabilities': capabilities})
|
||||
self.assertTrue(filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
@ -703,7 +711,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
'vm_mode': 'hvm'}}
|
||||
filter_properties = {'request_spec': {'image': img_props}}
|
||||
capabilities = {'enabled': True}
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'capabilities': capabilities})
|
||||
self.assertFalse(filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
@ -715,7 +723,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
service = {'disabled': False}
|
||||
filter_properties = {'instance_type': {'memory_mb': 1024,
|
||||
'extra_specs': especs}}
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_ram_mb': 1024, 'capabilities': capabilities,
|
||||
'service': service})
|
||||
assertion = self.assertTrue if passes else self.assertFalse
|
||||
@ -740,7 +748,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
|
||||
filter_properties = {'context': self.context, 'instance_type':
|
||||
{'memory_mb': 1024}}
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'capabilities': capabilities})
|
||||
self.assertTrue(filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
@ -760,7 +768,8 @@ class HostFiltersTestCase(test.TestCase):
|
||||
self._create_aggregate_with_host(name='fake2', metadata=emeta)
|
||||
filter_properties = {'context': self.context,
|
||||
'instance_type': {'memory_mb': 1024, 'extra_specs': especs}}
|
||||
host = fakes.FakeHostState('host1', {'free_ram_mb': 1024})
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_ram_mb': 1024})
|
||||
assertion = self.assertTrue if passes else self.assertFalse
|
||||
assertion(filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
@ -774,7 +783,8 @@ class HostFiltersTestCase(test.TestCase):
|
||||
metadata={'opt2': '2'})
|
||||
filter_properties = {'context': self.context, 'instance_type':
|
||||
{'memory_mb': 1024, 'extra_specs': extra_specs}}
|
||||
host = fakes.FakeHostState('host1', {'free_ram_mb': 1024})
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_ram_mb': 1024})
|
||||
db.aggregate_host_delete(self.context.elevated(), agg2.id, 'host1')
|
||||
self.assertFalse(filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
@ -800,7 +810,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
'instance_properties': {'image_ref': 'isolated'}
|
||||
}
|
||||
}
|
||||
host = fakes.FakeHostState('non-isolated', {})
|
||||
host = fakes.FakeHostState('non-isolated', 'node', {})
|
||||
self.assertFalse(filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
def test_isolated_hosts_fails_non_isolated_on_isolated(self):
|
||||
@ -811,7 +821,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
'instance_properties': {'image_ref': 'non-isolated'}
|
||||
}
|
||||
}
|
||||
host = fakes.FakeHostState('isolated', {})
|
||||
host = fakes.FakeHostState('isolated', 'node', {})
|
||||
self.assertFalse(filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
def test_isolated_hosts_passes_isolated_on_isolated(self):
|
||||
@ -822,7 +832,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
'instance_properties': {'image_ref': 'isolated'}
|
||||
}
|
||||
}
|
||||
host = fakes.FakeHostState('isolated', {})
|
||||
host = fakes.FakeHostState('isolated', 'node', {})
|
||||
self.assertTrue(filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
def test_isolated_hosts_passes_non_isolated_on_non_isolated(self):
|
||||
@ -833,7 +843,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
'instance_properties': {'image_ref': 'non-isolated'}
|
||||
}
|
||||
}
|
||||
host = fakes.FakeHostState('non-isolated', {})
|
||||
host = fakes.FakeHostState('non-isolated', 'node', {})
|
||||
self.assertTrue(filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
def test_json_filter_passes(self):
|
||||
@ -843,7 +853,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
'ephemeral_gb': 0},
|
||||
'scheduler_hints': {'query': self.json_query}}
|
||||
capabilities = {'enabled': True}
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_ram_mb': 1024,
|
||||
'free_disk_mb': 200 * 1024,
|
||||
'capabilities': capabilities})
|
||||
@ -855,7 +865,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
'root_gb': 200,
|
||||
'ephemeral_gb': 0}}
|
||||
capabilities = {'enabled': True}
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_ram_mb': 0,
|
||||
'free_disk_mb': 0,
|
||||
'capabilities': capabilities})
|
||||
@ -868,7 +878,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
'ephemeral_gb': 0},
|
||||
'scheduler_hints': {'query': self.json_query}}
|
||||
capabilities = {'enabled': True}
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_ram_mb': 1023,
|
||||
'free_disk_mb': 200 * 1024,
|
||||
'capabilities': capabilities})
|
||||
@ -881,7 +891,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
'ephemeral_gb': 0},
|
||||
'scheduler_hints': {'query': self.json_query}}
|
||||
capabilities = {'enabled': True}
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_ram_mb': 1024,
|
||||
'free_disk_mb': (200 * 1024) - 1,
|
||||
'capabilities': capabilities})
|
||||
@ -898,7 +908,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
'ephemeral_gb': 0},
|
||||
'scheduler_hints': {'query': json_query}}
|
||||
capabilities = {'enabled': False}
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_ram_mb': 1024,
|
||||
'free_disk_mb': 200 * 1024,
|
||||
'capabilities': capabilities})
|
||||
@ -915,7 +925,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
'scheduler_hints': {'query': json_query}}
|
||||
capabilities = {'enabled': True}
|
||||
service = {'disabled': True}
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_ram_mb': 1024,
|
||||
'free_disk_mb': 200 * 1024,
|
||||
'capabilities': capabilities})
|
||||
@ -943,7 +953,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
# Passes
|
||||
capabilities = {'enabled': True, 'opt1': 'match'}
|
||||
service = {'disabled': False}
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_ram_mb': 10,
|
||||
'free_disk_mb': 200,
|
||||
'capabilities': capabilities,
|
||||
@ -953,7 +963,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
# Passes
|
||||
capabilities = {'enabled': True, 'opt1': 'match'}
|
||||
service = {'disabled': False}
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_ram_mb': 40,
|
||||
'free_disk_mb': 400,
|
||||
'capabilities': capabilities,
|
||||
@ -963,7 +973,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
# Fails due to capabilities being disabled
|
||||
capabilities = {'enabled': False, 'opt1': 'match'}
|
||||
service = {'disabled': False}
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_ram_mb': 40,
|
||||
'free_disk_mb': 400,
|
||||
'capabilities': capabilities,
|
||||
@ -973,7 +983,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
# Fails due to being exact memory/disk we don't want
|
||||
capabilities = {'enabled': True, 'opt1': 'match'}
|
||||
service = {'disabled': False}
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_ram_mb': 30,
|
||||
'free_disk_mb': 300,
|
||||
'capabilities': capabilities,
|
||||
@ -983,7 +993,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
# Fails due to memory lower but disk higher
|
||||
capabilities = {'enabled': True, 'opt1': 'match'}
|
||||
service = {'disabled': False}
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_ram_mb': 20,
|
||||
'free_disk_mb': 400,
|
||||
'capabilities': capabilities,
|
||||
@ -993,7 +1003,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
# Fails due to capabilities 'opt1' not equal
|
||||
capabilities = {'enabled': True, 'opt1': 'no-match'}
|
||||
service = {'enabled': True}
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_ram_mb': 20,
|
||||
'free_disk_mb': 400,
|
||||
'capabilities': capabilities,
|
||||
@ -1002,7 +1012,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
|
||||
def test_json_filter_basic_operators(self):
|
||||
filt_cls = self.class_map['JsonFilter']()
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'capabilities': {'enabled': True}})
|
||||
# (operator, arguments, expected_result)
|
||||
ops_to_test = [
|
||||
@ -1071,14 +1081,14 @@ class HostFiltersTestCase(test.TestCase):
|
||||
'query': jsonutils.dumps(raw),
|
||||
},
|
||||
}
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'capabilities': {'enabled': True}})
|
||||
self.assertRaises(KeyError,
|
||||
filt_cls.host_passes, host, filter_properties)
|
||||
|
||||
def test_json_filter_empty_filters_pass(self):
|
||||
filt_cls = self.class_map['JsonFilter']()
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'capabilities': {'enabled': True}})
|
||||
|
||||
raw = []
|
||||
@ -1098,7 +1108,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
|
||||
def test_json_filter_invalid_num_arguments_fails(self):
|
||||
filt_cls = self.class_map['JsonFilter']()
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'capabilities': {'enabled': True}})
|
||||
|
||||
raw = ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]
|
||||
@ -1119,7 +1129,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
|
||||
def test_json_filter_unknown_variable_ignored(self):
|
||||
filt_cls = self.class_map['JsonFilter']()
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'capabilities': {'enabled': True}})
|
||||
|
||||
raw = ['=', '$........', 1, 1]
|
||||
@ -1142,7 +1152,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
self._stub_service_is_up(True)
|
||||
filt_cls = self.class_map['TrustedFilter']()
|
||||
filter_properties = {'instance_type': {'memory_mb': 1024}}
|
||||
host = fakes.FakeHostState('host1', {})
|
||||
host = fakes.FakeHostState('host1', 'node1', {})
|
||||
self.assertTrue(filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
def test_trusted_filter_trusted_and_trusted_passes(self):
|
||||
@ -1153,7 +1163,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
extra_specs = {'trust:trusted_host': 'trusted'}
|
||||
filter_properties = {'instance_type': {'memory_mb': 1024,
|
||||
'extra_specs': extra_specs}}
|
||||
host = fakes.FakeHostState('host1', {})
|
||||
host = fakes.FakeHostState('host1', 'node1', {})
|
||||
self.assertTrue(filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
def test_trusted_filter_trusted_and_untrusted_fails(self):
|
||||
@ -1164,7 +1174,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
extra_specs = {'trust:trusted_host': 'trusted'}
|
||||
filter_properties = {'instance_type': {'memory_mb': 1024,
|
||||
'extra_specs': extra_specs}}
|
||||
host = fakes.FakeHostState('host1', {})
|
||||
host = fakes.FakeHostState('host1', 'node1', {})
|
||||
self.assertFalse(filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
def test_trusted_filter_untrusted_and_trusted_fails(self):
|
||||
@ -1175,7 +1185,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
extra_specs = {'trust:trusted_host': 'untrusted'}
|
||||
filter_properties = {'instance_type': {'memory_mb': 1024,
|
||||
'extra_specs': extra_specs}}
|
||||
host = fakes.FakeHostState('host1', {})
|
||||
host = fakes.FakeHostState('host1', 'node1', {})
|
||||
self.assertFalse(filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
def test_trusted_filter_untrusted_and_untrusted_passes(self):
|
||||
@ -1186,28 +1196,28 @@ class HostFiltersTestCase(test.TestCase):
|
||||
extra_specs = {'trust:trusted_host': 'untrusted'}
|
||||
filter_properties = {'instance_type': {'memory_mb': 1024,
|
||||
'extra_specs': extra_specs}}
|
||||
host = fakes.FakeHostState('host1', {})
|
||||
host = fakes.FakeHostState('host1', 'node1', {})
|
||||
self.assertTrue(filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
def test_core_filter_passes(self):
|
||||
filt_cls = self.class_map['CoreFilter']()
|
||||
filter_properties = {'instance_type': {'vcpus': 1}}
|
||||
self.flags(cpu_allocation_ratio=2)
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'vcpus_total': 4, 'vcpus_used': 7})
|
||||
self.assertTrue(filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
def test_core_filter_fails_safe(self):
|
||||
filt_cls = self.class_map['CoreFilter']()
|
||||
filter_properties = {'instance_type': {'vcpus': 1}}
|
||||
host = fakes.FakeHostState('host1', {})
|
||||
host = fakes.FakeHostState('host1', 'node1', {})
|
||||
self.assertTrue(filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
def test_core_filter_fails(self):
|
||||
filt_cls = self.class_map['CoreFilter']()
|
||||
filter_properties = {'instance_type': {'vcpus': 1}}
|
||||
self.flags(cpu_allocation_ratio=2)
|
||||
host = fakes.FakeHostState('host1',
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'vcpus_total': 4, 'vcpus_used': 8})
|
||||
self.assertFalse(filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
@ -1227,27 +1237,29 @@ class HostFiltersTestCase(test.TestCase):
|
||||
filt_cls = self.class_map['AvailabilityZoneFilter']()
|
||||
service = {'availability_zone': 'nova'}
|
||||
request = self._make_zone_request('nova')
|
||||
host = fakes.FakeHostState('host1', {'service': service})
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'service': service})
|
||||
self.assertTrue(filt_cls.host_passes(host, request))
|
||||
|
||||
def test_availability_zone_filter_different(self):
|
||||
filt_cls = self.class_map['AvailabilityZoneFilter']()
|
||||
service = {'availability_zone': 'nova'}
|
||||
request = self._make_zone_request('bad')
|
||||
host = fakes.FakeHostState('host1', {'service': service})
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'service': service})
|
||||
self.assertFalse(filt_cls.host_passes(host, request))
|
||||
|
||||
def test_retry_filter_disabled(self):
|
||||
"""Test case where retry/re-scheduling is disabled"""
|
||||
filt_cls = self.class_map['RetryFilter']()
|
||||
host = fakes.FakeHostState('host1', {})
|
||||
host = fakes.FakeHostState('host1', 'node1', {})
|
||||
filter_properties = {}
|
||||
self.assertTrue(filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
def test_retry_filter_pass(self):
|
||||
"""Host not previously tried"""
|
||||
filt_cls = self.class_map['RetryFilter']()
|
||||
host = fakes.FakeHostState('host1', {})
|
||||
host = fakes.FakeHostState('host1', 'node1', {})
|
||||
retry = dict(num_attempts=1, hosts=['host2', 'host3'])
|
||||
filter_properties = dict(retry=retry)
|
||||
self.assertTrue(filt_cls.host_passes(host, filter_properties))
|
||||
@ -1255,7 +1267,7 @@ class HostFiltersTestCase(test.TestCase):
|
||||
def test_retry_filter_fail(self):
|
||||
"""Host was already tried"""
|
||||
filt_cls = self.class_map['RetryFilter']()
|
||||
host = fakes.FakeHostState('host1', {})
|
||||
host = fakes.FakeHostState('host1', 'node1', {})
|
||||
retry = dict(num_attempts=1, hosts=['host3', 'host1'])
|
||||
filter_properties = dict(retry=retry)
|
||||
self.assertFalse(filt_cls.host_passes(host, filter_properties))
|
||||
@ -1263,25 +1275,29 @@ class HostFiltersTestCase(test.TestCase):
|
||||
def test_filter_num_iops_passes(self):
|
||||
self.flags(max_io_ops_per_host=8)
|
||||
filt_cls = self.class_map['IoOpsFilter']()
|
||||
host = fakes.FakeHostState('host1', {'num_io_ops': 7})
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'num_io_ops': 7})
|
||||
filter_properties = {}
|
||||
self.assertTrue(filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
def test_filter_num_iops_fails(self):
|
||||
self.flags(max_io_ops_per_host=8)
|
||||
filt_cls = self.class_map['IoOpsFilter']()
|
||||
host = fakes.FakeHostState('host1', {'num_io_ops': 8})
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'num_io_ops': 8})
|
||||
|
||||
def test_filter_num_instances_passes(self):
|
||||
self.flags(max_instances_per_host=5)
|
||||
filt_cls = self.class_map['NumInstancesFilter']()
|
||||
host = fakes.FakeHostState('host1', {'num_instances': 4})
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'num_instances': 4})
|
||||
filter_properties = {}
|
||||
self.assertTrue(filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
def test_filter_num_instances_fails(self):
|
||||
self.flags(max_instances_per_host=5)
|
||||
filt_cls = self.class_map['NumInstancesFilter']()
|
||||
host = fakes.FakeHostState('host1', {'num_instances': 5})
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'num_instances': 5})
|
||||
filter_properties = {}
|
||||
self.assertFalse(filt_cls.host_passes(host, filter_properties))
|
||||
|
@ -44,6 +44,10 @@ class HostManagerTestCase(test.TestCase):
|
||||
super(HostManagerTestCase, self).setUp()
|
||||
self.host_manager = host_manager.HostManager()
|
||||
|
||||
def tearDown(self):
|
||||
timeutils.clear_time_override()
|
||||
super(HostManagerTestCase, self).tearDown()
|
||||
|
||||
def test_choose_host_filters_not_found(self):
|
||||
self.flags(scheduler_default_filters='ComputeFilterClass3')
|
||||
self.host_manager.filter_classes = [ComputeFilterClass1,
|
||||
@ -64,8 +68,8 @@ class HostManagerTestCase(test.TestCase):
|
||||
|
||||
def test_filter_hosts(self):
|
||||
filters = ['fake-filter1', 'fake-filter2']
|
||||
fake_host1 = host_manager.HostState('host1')
|
||||
fake_host2 = host_manager.HostState('host2')
|
||||
fake_host1 = host_manager.HostState('host1', 'node1')
|
||||
fake_host2 = host_manager.HostState('host2', 'node2')
|
||||
hosts = [fake_host1, fake_host2]
|
||||
filter_properties = {'fake_prop': 'fake_val'}
|
||||
|
||||
@ -94,8 +98,9 @@ class HostManagerTestCase(test.TestCase):
|
||||
timeutils.utcnow().AndReturn(31339)
|
||||
|
||||
host1_compute_capabs = dict(free_memory=1234, host_memory=5678,
|
||||
timestamp=1)
|
||||
host2_compute_capabs = dict(free_memory=8756, timestamp=1)
|
||||
timestamp=1, hypervisor_hostname='node1')
|
||||
host2_compute_capabs = dict(free_memory=8756, timestamp=1,
|
||||
hypervisor_hostname='node2')
|
||||
|
||||
self.mox.ReplayAll()
|
||||
self.host_manager.update_service_capabilities('compute', 'host1',
|
||||
@ -109,8 +114,27 @@ class HostManagerTestCase(test.TestCase):
|
||||
host1_compute_capabs['timestamp'] = 31337
|
||||
host2_compute_capabs['timestamp'] = 31339
|
||||
|
||||
expected = {'host1': host1_compute_capabs,
|
||||
'host2': host2_compute_capabs}
|
||||
expected = {('host1', 'node1'): host1_compute_capabs,
|
||||
('host2', 'node2'): host2_compute_capabs}
|
||||
self.assertDictMatch(service_states, expected)
|
||||
|
||||
def test_update_service_capabilities_node_key(self):
|
||||
service_states = self.host_manager.service_states
|
||||
self.assertDictMatch(service_states, {})
|
||||
|
||||
host1_cap = {'hypervisor_hostname': 'host1-hvhn'}
|
||||
host2_cap = {}
|
||||
|
||||
timeutils.set_time_override(31337)
|
||||
self.host_manager.update_service_capabilities('compute', 'host1',
|
||||
host1_cap)
|
||||
timeutils.set_time_override(31338)
|
||||
self.host_manager.update_service_capabilities('compute', 'host2',
|
||||
host2_cap)
|
||||
host1_cap['timestamp'] = 31337
|
||||
host2_cap['timestamp'] = 31338
|
||||
expected = {('host1', 'host1-hvhn'): host1_cap,
|
||||
('host2', None): host2_cap}
|
||||
self.assertDictMatch(service_states, expected)
|
||||
|
||||
def test_get_all_host_states(self):
|
||||
@ -133,20 +157,30 @@ class HostManagerTestCase(test.TestCase):
|
||||
for i in xrange(4):
|
||||
compute_node = fakes.COMPUTE_NODES[i]
|
||||
host = compute_node['service']['host']
|
||||
self.assertEqual(host_states_map[host].service,
|
||||
node = compute_node['hypervisor_hostname']
|
||||
state_key = (host, node)
|
||||
self.assertEqual(host_states_map[state_key].service,
|
||||
compute_node['service'])
|
||||
self.assertEqual(host_states_map['host1'].free_ram_mb, 512)
|
||||
self.assertEqual(host_states_map[('host1', 'node1')].free_ram_mb,
|
||||
512)
|
||||
# 511GB
|
||||
self.assertEqual(host_states_map['host1'].free_disk_mb, 524288)
|
||||
self.assertEqual(host_states_map['host2'].free_ram_mb, 1024)
|
||||
self.assertEqual(host_states_map[('host1', 'node1')].free_disk_mb,
|
||||
524288)
|
||||
self.assertEqual(host_states_map[('host2', 'node2')].free_ram_mb,
|
||||
1024)
|
||||
# 1023GB
|
||||
self.assertEqual(host_states_map['host2'].free_disk_mb, 1048576)
|
||||
self.assertEqual(host_states_map['host3'].free_ram_mb, 3072)
|
||||
self.assertEqual(host_states_map[('host2', 'node2')].free_disk_mb,
|
||||
1048576)
|
||||
self.assertEqual(host_states_map[('host3', 'node3')].free_ram_mb,
|
||||
3072)
|
||||
# 3071GB
|
||||
self.assertEqual(host_states_map['host3'].free_disk_mb, 3145728)
|
||||
self.assertEqual(host_states_map['host4'].free_ram_mb, 8192)
|
||||
self.assertEqual(host_states_map[('host3', 'node3')].free_disk_mb,
|
||||
3145728)
|
||||
self.assertEqual(host_states_map[('host4', 'node4')].free_ram_mb,
|
||||
8192)
|
||||
# 8191GB
|
||||
self.assertEqual(host_states_map['host4'].free_disk_mb, 8388608)
|
||||
self.assertEqual(host_states_map[('host4', 'node4')].free_disk_mb,
|
||||
8388608)
|
||||
|
||||
|
||||
class HostStateTestCase(test.TestCase):
|
||||
@ -156,7 +190,7 @@ class HostStateTestCase(test.TestCase):
|
||||
# in HostManagerTestCase.test_get_all_host_states()
|
||||
|
||||
def test_host_state_passes_filters_passes(self):
|
||||
fake_host = host_manager.HostState('host1')
|
||||
fake_host = host_manager.HostState('host1', 'node1')
|
||||
filter_properties = {}
|
||||
|
||||
cls1 = ComputeFilterClass1()
|
||||
@ -173,7 +207,7 @@ class HostStateTestCase(test.TestCase):
|
||||
self.assertTrue(result)
|
||||
|
||||
def test_host_state_passes_filters_passes_with_ignore(self):
|
||||
fake_host = host_manager.HostState('host1')
|
||||
fake_host = host_manager.HostState('host1', 'node1')
|
||||
filter_properties = {'ignore_hosts': ['host2']}
|
||||
|
||||
cls1 = ComputeFilterClass1()
|
||||
@ -190,7 +224,7 @@ class HostStateTestCase(test.TestCase):
|
||||
self.assertTrue(result)
|
||||
|
||||
def test_host_state_passes_filters_fails(self):
|
||||
fake_host = host_manager.HostState('host1')
|
||||
fake_host = host_manager.HostState('host1', 'node1')
|
||||
filter_properties = {}
|
||||
|
||||
cls1 = ComputeFilterClass1()
|
||||
@ -207,7 +241,7 @@ class HostStateTestCase(test.TestCase):
|
||||
self.assertFalse(result)
|
||||
|
||||
def test_host_state_passes_filters_fails_from_ignore(self):
|
||||
fake_host = host_manager.HostState('host1')
|
||||
fake_host = host_manager.HostState('host1', 'node1')
|
||||
filter_properties = {'ignore_hosts': ['host1']}
|
||||
|
||||
cls1 = ComputeFilterClass1()
|
||||
@ -224,7 +258,7 @@ class HostStateTestCase(test.TestCase):
|
||||
self.assertFalse(result)
|
||||
|
||||
def test_host_state_passes_filters_skipped_from_force(self):
|
||||
fake_host = host_manager.HostState('host1')
|
||||
fake_host = host_manager.HostState('host1', 'node1')
|
||||
filter_properties = {'force_hosts': ['host1']}
|
||||
|
||||
cls1 = ComputeFilterClass1()
|
||||
@ -257,7 +291,7 @@ class HostStateTestCase(test.TestCase):
|
||||
local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
|
||||
updated_at=None)
|
||||
|
||||
host = host_manager.HostState("fakehost")
|
||||
host = host_manager.HostState("fakehost", "fakenode")
|
||||
host.update_from_compute_node(compute)
|
||||
|
||||
self.assertEqual(5, host.num_instances)
|
||||
@ -272,7 +306,7 @@ class HostStateTestCase(test.TestCase):
|
||||
self.assertEqual(42, host.num_io_ops)
|
||||
|
||||
def test_stat_consumption_from_instance(self):
|
||||
host = host_manager.HostState("fakehost")
|
||||
host = host_manager.HostState("fakehost", "fakenode")
|
||||
|
||||
instance = dict(root_gb=0, ephemeral_gb=0, memory_mb=0, vcpus=0,
|
||||
project_id='12345', vm_state=vm_states.BUILDING,
|
||||
|
@ -95,7 +95,7 @@ class TestWeightedHost(test.TestCase):
|
||||
self.assertDictMatch(host.to_dict(), expected)
|
||||
|
||||
def test_dict_conversion_with_host_state(self):
|
||||
host_state = host_manager.HostState('somehost')
|
||||
host_state = host_manager.HostState('somehost', None)
|
||||
host = least_cost.WeightedHost('someweight', host_state)
|
||||
expected = {'weight': 'someweight',
|
||||
'host': 'somehost'}
|
||||
|
@ -155,7 +155,7 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
|
||||
super(HyperVAPITestCase, self).tearDown()
|
||||
|
||||
def test_get_available_resource(self):
|
||||
dic = self._conn.get_available_resource()
|
||||
dic = self._conn.get_available_resource(None)
|
||||
|
||||
self.assertEquals(dic['hypervisor_hostname'], platform.node())
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user