trunk merge after 2b hit
This commit is contained in:
0
nova/tests/scheduler/__init__.py
Normal file
0
nova/tests/scheduler/__init__.py
Normal file
206
nova/tests/scheduler/test_host_filter.py
Normal file
206
nova/tests/scheduler/test_host_filter.py
Normal file
@@ -0,0 +1,206 @@
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Tests For Scheduler Host Filters.
|
||||
"""
|
||||
|
||||
import json
|
||||
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import test
|
||||
from nova.scheduler import host_filter
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
class FakeZoneManager:
|
||||
pass
|
||||
|
||||
|
||||
class HostFilterTestCase(test.TestCase):
|
||||
"""Test case for host filters."""
|
||||
|
||||
def _host_caps(self, multiplier):
|
||||
# Returns host capabilities in the following way:
|
||||
# host1 = memory:free 10 (100max)
|
||||
# disk:available 100 (1000max)
|
||||
# hostN = memory:free 10 + 10N
|
||||
# disk:available 100 + 100N
|
||||
# in other words: hostN has more resources than host0
|
||||
# which means ... don't go above 10 hosts.
|
||||
return {'host_name-description': 'XenServer %s' % multiplier,
|
||||
'host_hostname': 'xs-%s' % multiplier,
|
||||
'host_memory_total': 100,
|
||||
'host_memory_overhead': 10,
|
||||
'host_memory_free': 10 + multiplier * 10,
|
||||
'host_memory_free-computed': 10 + multiplier * 10,
|
||||
'host_other-config': {},
|
||||
'host_ip_address': '192.168.1.%d' % (100 + multiplier),
|
||||
'host_cpu_info': {},
|
||||
'disk_available': 100 + multiplier * 100,
|
||||
'disk_total': 1000,
|
||||
'disk_used': 0,
|
||||
'host_uuid': 'xxx-%d' % multiplier,
|
||||
'host_name-label': 'xs-%s' % multiplier}
|
||||
|
||||
def setUp(self):
|
||||
self.old_flag = FLAGS.default_host_filter
|
||||
FLAGS.default_host_filter = \
|
||||
'nova.scheduler.host_filter.AllHostsFilter'
|
||||
self.instance_type = dict(name='tiny',
|
||||
memory_mb=50,
|
||||
vcpus=10,
|
||||
local_gb=500,
|
||||
flavorid=1,
|
||||
swap=500,
|
||||
rxtx_quota=30000,
|
||||
rxtx_cap=200)
|
||||
|
||||
self.zone_manager = FakeZoneManager()
|
||||
states = {}
|
||||
for x in xrange(10):
|
||||
states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)}
|
||||
self.zone_manager.service_states = states
|
||||
|
||||
def tearDown(self):
|
||||
FLAGS.default_host_filter = self.old_flag
|
||||
|
||||
def test_choose_filter(self):
|
||||
# Test default filter ...
|
||||
hf = host_filter.choose_host_filter()
|
||||
self.assertEquals(hf._full_name(),
|
||||
'nova.scheduler.host_filter.AllHostsFilter')
|
||||
# Test valid filter ...
|
||||
hf = host_filter.choose_host_filter(
|
||||
'nova.scheduler.host_filter.InstanceTypeFilter')
|
||||
self.assertEquals(hf._full_name(),
|
||||
'nova.scheduler.host_filter.InstanceTypeFilter')
|
||||
# Test invalid filter ...
|
||||
try:
|
||||
host_filter.choose_host_filter('does not exist')
|
||||
self.fail("Should not find host filter.")
|
||||
except exception.SchedulerHostFilterNotFound:
|
||||
pass
|
||||
|
||||
def test_all_host_filter(self):
|
||||
hf = host_filter.AllHostsFilter()
|
||||
cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(10, len(hosts))
|
||||
for host, capabilities in hosts:
|
||||
self.assertTrue(host.startswith('host'))
|
||||
|
||||
def test_instance_type_filter(self):
|
||||
hf = host_filter.InstanceTypeFilter()
|
||||
# filter all hosts that can support 50 ram and 500 disk
|
||||
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter',
|
||||
name)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(6, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
self.assertEquals('host05', just_hosts[0])
|
||||
self.assertEquals('host10', just_hosts[5])
|
||||
|
||||
def test_json_filter(self):
|
||||
hf = host_filter.JsonFilter()
|
||||
# filter all hosts that can support 50 ram and 500 disk
|
||||
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
self.assertEquals('nova.scheduler.host_filter.JsonFilter', name)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(6, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
self.assertEquals('host05', just_hosts[0])
|
||||
self.assertEquals('host10', just_hosts[5])
|
||||
|
||||
# Try some custom queries
|
||||
|
||||
raw = ['or',
|
||||
['and',
|
||||
['<', '$compute.host_memory_free', 30],
|
||||
['<', '$compute.disk_available', 300]
|
||||
],
|
||||
['and',
|
||||
['>', '$compute.host_memory_free', 70],
|
||||
['>', '$compute.disk_available', 700]
|
||||
]
|
||||
]
|
||||
cooked = json.dumps(raw)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(5, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
for index, host in zip([1, 2, 8, 9, 10], just_hosts):
|
||||
self.assertEquals('host%02d' % index, host)
|
||||
|
||||
raw = ['not',
|
||||
['=', '$compute.host_memory_free', 30],
|
||||
]
|
||||
cooked = json.dumps(raw)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(9, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts):
|
||||
self.assertEquals('host%02d' % index, host)
|
||||
|
||||
raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100]
|
||||
cooked = json.dumps(raw)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(5, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
for index, host in zip([2, 4, 6, 8, 10], just_hosts):
|
||||
self.assertEquals('host%02d' % index, host)
|
||||
|
||||
# Try some bogus input ...
|
||||
raw = ['unknown command', ]
|
||||
cooked = json.dumps(raw)
|
||||
try:
|
||||
hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.fail("Should give KeyError")
|
||||
except KeyError, e:
|
||||
pass
|
||||
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([])))
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({})))
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps(
|
||||
['not', True, False, True, False]
|
||||
)))
|
||||
|
||||
try:
|
||||
hf.filter_hosts(self.zone_manager, json.dumps(
|
||||
'not', True, False, True, False
|
||||
))
|
||||
self.fail("Should give KeyError")
|
||||
except KeyError, e:
|
||||
pass
|
||||
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(['=', '$foo', 100])))
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(['=', '$.....', 100])))
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(
|
||||
['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]])))
|
||||
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(['=', {}, ['>', '$missing....foo']])))
|
||||
144
nova/tests/scheduler/test_least_cost_scheduler.py
Normal file
144
nova/tests/scheduler/test_least_cost_scheduler.py
Normal file
@@ -0,0 +1,144 @@
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Tests For Least Cost Scheduler
|
||||
"""
|
||||
|
||||
from nova import flags
|
||||
from nova import test
|
||||
from nova.scheduler import least_cost
|
||||
from nova.tests.scheduler import test_zone_aware_scheduler
|
||||
|
||||
MB = 1024 * 1024
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
class FakeHost(object):
|
||||
def __init__(self, host_id, free_ram, io):
|
||||
self.id = host_id
|
||||
self.free_ram = free_ram
|
||||
self.io = io
|
||||
|
||||
|
||||
class WeightedSumTestCase(test.TestCase):
|
||||
def test_empty_domain(self):
|
||||
domain = []
|
||||
weighted_fns = []
|
||||
result = least_cost.weighted_sum(domain, weighted_fns)
|
||||
expected = []
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_basic_costing(self):
|
||||
hosts = [
|
||||
FakeHost(1, 512 * MB, 100),
|
||||
FakeHost(2, 256 * MB, 400),
|
||||
FakeHost(3, 512 * MB, 100)
|
||||
]
|
||||
|
||||
weighted_fns = [
|
||||
(1, lambda h: h.free_ram), # Fill-first, free_ram is a *cost*
|
||||
(2, lambda h: h.io), # Avoid high I/O
|
||||
]
|
||||
|
||||
costs = least_cost.weighted_sum(
|
||||
domain=hosts, weighted_fns=weighted_fns)
|
||||
|
||||
# Each 256 MB unit of free-ram contributes 0.5 points by way of:
|
||||
# cost = weight * (score/max_score) = 1 * (256/512) = 0.5
|
||||
# Each 100 iops of IO adds 0.5 points by way of:
|
||||
# cost = 2 * (100/400) = 2 * 0.25 = 0.5
|
||||
expected = [1.5, 2.5, 1.5]
|
||||
self.assertEqual(expected, costs)
|
||||
|
||||
|
||||
class LeastCostSchedulerTestCase(test.TestCase):
|
||||
def setUp(self):
|
||||
super(LeastCostSchedulerTestCase, self).setUp()
|
||||
|
||||
class FakeZoneManager:
|
||||
pass
|
||||
|
||||
zone_manager = FakeZoneManager()
|
||||
|
||||
states = test_zone_aware_scheduler.fake_zone_manager_service_states(
|
||||
num_hosts=10)
|
||||
zone_manager.service_states = states
|
||||
|
||||
self.sched = least_cost.LeastCostScheduler()
|
||||
self.sched.zone_manager = zone_manager
|
||||
|
||||
def tearDown(self):
|
||||
super(LeastCostSchedulerTestCase, self).tearDown()
|
||||
|
||||
def assertWeights(self, expected, num, request_spec, hosts):
|
||||
weighted = self.sched.weigh_hosts(num, request_spec, hosts)
|
||||
self.assertDictListMatch(weighted, expected, approx_equal=True)
|
||||
|
||||
def test_no_hosts(self):
|
||||
num = 1
|
||||
request_spec = {}
|
||||
hosts = []
|
||||
|
||||
expected = []
|
||||
self.assertWeights(expected, num, request_spec, hosts)
|
||||
|
||||
def test_noop_cost_fn(self):
|
||||
FLAGS.least_cost_scheduler_cost_functions = [
|
||||
'nova.scheduler.least_cost.noop_cost_fn'
|
||||
]
|
||||
FLAGS.noop_cost_fn_weight = 1
|
||||
|
||||
num = 1
|
||||
request_spec = {}
|
||||
hosts = self.sched.filter_hosts(num, request_spec)
|
||||
|
||||
expected = [dict(weight=1, hostname=hostname)
|
||||
for hostname, caps in hosts]
|
||||
self.assertWeights(expected, num, request_spec, hosts)
|
||||
|
||||
def test_cost_fn_weights(self):
|
||||
FLAGS.least_cost_scheduler_cost_functions = [
|
||||
'nova.scheduler.least_cost.noop_cost_fn'
|
||||
]
|
||||
FLAGS.noop_cost_fn_weight = 2
|
||||
|
||||
num = 1
|
||||
request_spec = {}
|
||||
hosts = self.sched.filter_hosts(num, request_spec)
|
||||
|
||||
expected = [dict(weight=2, hostname=hostname)
|
||||
for hostname, caps in hosts]
|
||||
self.assertWeights(expected, num, request_spec, hosts)
|
||||
|
||||
def test_fill_first_cost_fn(self):
|
||||
FLAGS.least_cost_scheduler_cost_functions = [
|
||||
'nova.scheduler.least_cost.fill_first_cost_fn'
|
||||
]
|
||||
FLAGS.fill_first_cost_fn_weight = 1
|
||||
|
||||
num = 1
|
||||
request_spec = {}
|
||||
hosts = self.sched.filter_hosts(num, request_spec)
|
||||
|
||||
expected = []
|
||||
for idx, (hostname, caps) in enumerate(hosts):
|
||||
# Costs are normalized so over 10 hosts, each host with increasing
|
||||
# free ram will cost 1/N more. Since the lowest cost host has some
|
||||
# free ram, we add in the 1/N for the base_cost
|
||||
weight = 0.1 + (0.1 * idx)
|
||||
weight_dict = dict(weight=weight, hostname=hostname)
|
||||
expected.append(weight_dict)
|
||||
|
||||
self.assertWeights(expected, num, request_spec, hosts)
|
||||
296
nova/tests/scheduler/test_zone_aware_scheduler.py
Normal file
296
nova/tests/scheduler/test_zone_aware_scheduler.py
Normal file
@@ -0,0 +1,296 @@
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Tests For Zone Aware Scheduler.
|
||||
"""
|
||||
|
||||
from nova import exception
|
||||
from nova import test
|
||||
from nova.scheduler import driver
|
||||
from nova.scheduler import zone_aware_scheduler
|
||||
from nova.scheduler import zone_manager
|
||||
|
||||
|
||||
def _host_caps(multiplier):
|
||||
# Returns host capabilities in the following way:
|
||||
# host1 = memory:free 10 (100max)
|
||||
# disk:available 100 (1000max)
|
||||
# hostN = memory:free 10 + 10N
|
||||
# disk:available 100 + 100N
|
||||
# in other words: hostN has more resources than host0
|
||||
# which means ... don't go above 10 hosts.
|
||||
return {'host_name-description': 'XenServer %s' % multiplier,
|
||||
'host_hostname': 'xs-%s' % multiplier,
|
||||
'host_memory_total': 100,
|
||||
'host_memory_overhead': 10,
|
||||
'host_memory_free': 10 + multiplier * 10,
|
||||
'host_memory_free-computed': 10 + multiplier * 10,
|
||||
'host_other-config': {},
|
||||
'host_ip_address': '192.168.1.%d' % (100 + multiplier),
|
||||
'host_cpu_info': {},
|
||||
'disk_available': 100 + multiplier * 100,
|
||||
'disk_total': 1000,
|
||||
'disk_used': 0,
|
||||
'host_uuid': 'xxx-%d' % multiplier,
|
||||
'host_name-label': 'xs-%s' % multiplier}
|
||||
|
||||
|
||||
def fake_zone_manager_service_states(num_hosts):
|
||||
states = {}
|
||||
for x in xrange(num_hosts):
|
||||
states['host%02d' % (x + 1)] = {'compute': _host_caps(x)}
|
||||
return states
|
||||
|
||||
|
||||
class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler):
|
||||
def filter_hosts(self, num, specs):
|
||||
# NOTE(sirp): this is returning [(hostname, services)]
|
||||
return self.zone_manager.service_states.items()
|
||||
|
||||
def weigh_hosts(self, num, specs, hosts):
|
||||
fake_weight = 99
|
||||
weighted = []
|
||||
for hostname, caps in hosts:
|
||||
weighted.append(dict(weight=fake_weight, name=hostname))
|
||||
return weighted
|
||||
|
||||
|
||||
class FakeZoneManager(zone_manager.ZoneManager):
|
||||
def __init__(self):
|
||||
self.service_states = {
|
||||
'host1': {
|
||||
'compute': {'ram': 1000},
|
||||
},
|
||||
'host2': {
|
||||
'compute': {'ram': 2000},
|
||||
},
|
||||
'host3': {
|
||||
'compute': {'ram': 3000},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class FakeEmptyZoneManager(zone_manager.ZoneManager):
|
||||
def __init__(self):
|
||||
self.service_states = {}
|
||||
|
||||
|
||||
def fake_empty_call_zone_method(context, method, specs):
|
||||
return []
|
||||
|
||||
|
||||
# Hmm, I should probably be using mox for this.
|
||||
was_called = False
|
||||
|
||||
|
||||
def fake_provision_resource(context, item, instance_id, request_spec, kwargs):
|
||||
global was_called
|
||||
was_called = True
|
||||
|
||||
|
||||
def fake_ask_child_zone_to_create_instance(context, zone_info,
|
||||
request_spec, kwargs):
|
||||
global was_called
|
||||
was_called = True
|
||||
|
||||
|
||||
def fake_provision_resource_locally(context, item, instance_id, kwargs):
|
||||
global was_called
|
||||
was_called = True
|
||||
|
||||
|
||||
def fake_provision_resource_from_blob(context, item, instance_id,
|
||||
request_spec, kwargs):
|
||||
global was_called
|
||||
was_called = True
|
||||
|
||||
|
||||
def fake_decrypt_blob_returns_local_info(blob):
|
||||
return {'foo': True} # values aren't important.
|
||||
|
||||
|
||||
def fake_decrypt_blob_returns_child_info(blob):
|
||||
return {'child_zone': True,
|
||||
'child_blob': True} # values aren't important. Keys are.
|
||||
|
||||
|
||||
def fake_call_zone_method(context, method, specs):
|
||||
return [
|
||||
('zone1', [
|
||||
dict(weight=1, blob='AAAAAAA'),
|
||||
dict(weight=111, blob='BBBBBBB'),
|
||||
dict(weight=112, blob='CCCCCCC'),
|
||||
dict(weight=113, blob='DDDDDDD'),
|
||||
]),
|
||||
('zone2', [
|
||||
dict(weight=120, blob='EEEEEEE'),
|
||||
dict(weight=2, blob='FFFFFFF'),
|
||||
dict(weight=122, blob='GGGGGGG'),
|
||||
dict(weight=123, blob='HHHHHHH'),
|
||||
]),
|
||||
('zone3', [
|
||||
dict(weight=130, blob='IIIIIII'),
|
||||
dict(weight=131, blob='JJJJJJJ'),
|
||||
dict(weight=132, blob='KKKKKKK'),
|
||||
dict(weight=3, blob='LLLLLLL'),
|
||||
]),
|
||||
]
|
||||
|
||||
|
||||
class ZoneAwareSchedulerTestCase(test.TestCase):
|
||||
"""Test case for Zone Aware Scheduler."""
|
||||
|
||||
def test_zone_aware_scheduler(self):
|
||||
"""
|
||||
Create a nested set of FakeZones, ensure that a select call returns the
|
||||
appropriate build plan.
|
||||
"""
|
||||
sched = FakeZoneAwareScheduler()
|
||||
self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
|
||||
|
||||
zm = FakeZoneManager()
|
||||
sched.set_zone_manager(zm)
|
||||
|
||||
fake_context = {}
|
||||
build_plan = sched.select(fake_context, {})
|
||||
|
||||
self.assertEqual(15, len(build_plan))
|
||||
|
||||
hostnames = [plan_item['name']
|
||||
for plan_item in build_plan if 'name' in plan_item]
|
||||
self.assertEqual(3, len(hostnames))
|
||||
|
||||
def test_empty_zone_aware_scheduler(self):
|
||||
"""
|
||||
Ensure empty hosts & child_zones result in NoValidHosts exception.
|
||||
"""
|
||||
sched = FakeZoneAwareScheduler()
|
||||
self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method)
|
||||
|
||||
zm = FakeEmptyZoneManager()
|
||||
sched.set_zone_manager(zm)
|
||||
|
||||
fake_context = {}
|
||||
self.assertRaises(driver.NoValidHost, sched.schedule_run_instance,
|
||||
fake_context, 1,
|
||||
dict(host_filter=None,
|
||||
request_spec={'instance_type': {}}))
|
||||
|
||||
def test_schedule_do_not_schedule_with_hint(self):
|
||||
"""
|
||||
Check the local/child zone routing in the run_instance() call.
|
||||
If the zone_blob hint was passed in, don't re-schedule.
|
||||
"""
|
||||
global was_called
|
||||
sched = FakeZoneAwareScheduler()
|
||||
was_called = False
|
||||
self.stubs.Set(sched, '_provision_resource', fake_provision_resource)
|
||||
request_spec = {
|
||||
'instance_properties': {},
|
||||
'instance_type': {},
|
||||
'filter_driver': 'nova.scheduler.host_filter.AllHostsFilter',
|
||||
'blob': "Non-None blob data"
|
||||
}
|
||||
|
||||
result = sched.schedule_run_instance(None, 1, request_spec)
|
||||
self.assertEquals(None, result)
|
||||
self.assertTrue(was_called)
|
||||
|
||||
def test_provision_resource_local(self):
|
||||
"""Provision a resource locally or remotely."""
|
||||
global was_called
|
||||
sched = FakeZoneAwareScheduler()
|
||||
was_called = False
|
||||
self.stubs.Set(sched, '_provision_resource_locally',
|
||||
fake_provision_resource_locally)
|
||||
|
||||
request_spec = {'hostname': "foo"}
|
||||
sched._provision_resource(None, request_spec, 1, request_spec, {})
|
||||
self.assertTrue(was_called)
|
||||
|
||||
def test_provision_resource_remote(self):
|
||||
"""Provision a resource locally or remotely."""
|
||||
global was_called
|
||||
sched = FakeZoneAwareScheduler()
|
||||
was_called = False
|
||||
self.stubs.Set(sched, '_provision_resource_from_blob',
|
||||
fake_provision_resource_from_blob)
|
||||
|
||||
request_spec = {}
|
||||
sched._provision_resource(None, request_spec, 1, request_spec, {})
|
||||
self.assertTrue(was_called)
|
||||
|
||||
def test_provision_resource_from_blob_empty(self):
|
||||
"""Provision a resource locally or remotely given no hints."""
|
||||
global was_called
|
||||
sched = FakeZoneAwareScheduler()
|
||||
request_spec = {}
|
||||
self.assertRaises(zone_aware_scheduler.InvalidBlob,
|
||||
sched._provision_resource_from_blob,
|
||||
None, {}, 1, {}, {})
|
||||
|
||||
def test_provision_resource_from_blob_with_local_blob(self):
|
||||
"""
|
||||
Provision a resource locally or remotely when blob hint passed in.
|
||||
"""
|
||||
global was_called
|
||||
sched = FakeZoneAwareScheduler()
|
||||
was_called = False
|
||||
self.stubs.Set(sched, '_decrypt_blob',
|
||||
fake_decrypt_blob_returns_local_info)
|
||||
self.stubs.Set(sched, '_provision_resource_locally',
|
||||
fake_provision_resource_locally)
|
||||
|
||||
request_spec = {'blob': "Non-None blob data"}
|
||||
|
||||
sched._provision_resource_from_blob(None, request_spec, 1,
|
||||
request_spec, {})
|
||||
self.assertTrue(was_called)
|
||||
|
||||
def test_provision_resource_from_blob_with_child_blob(self):
|
||||
"""
|
||||
Provision a resource locally or remotely when child blob hint
|
||||
passed in.
|
||||
"""
|
||||
global was_called
|
||||
sched = FakeZoneAwareScheduler()
|
||||
self.stubs.Set(sched, '_decrypt_blob',
|
||||
fake_decrypt_blob_returns_child_info)
|
||||
was_called = False
|
||||
self.stubs.Set(sched, '_ask_child_zone_to_create_instance',
|
||||
fake_ask_child_zone_to_create_instance)
|
||||
|
||||
request_spec = {'blob': "Non-None blob data"}
|
||||
|
||||
sched._provision_resource_from_blob(None, request_spec, 1,
|
||||
request_spec, {})
|
||||
self.assertTrue(was_called)
|
||||
|
||||
def test_provision_resource_from_blob_with_immediate_child_blob(self):
|
||||
"""
|
||||
Provision a resource locally or remotely when blob hint passed in
|
||||
from an immediate child.
|
||||
"""
|
||||
global was_called
|
||||
sched = FakeZoneAwareScheduler()
|
||||
was_called = False
|
||||
self.stubs.Set(sched, '_ask_child_zone_to_create_instance',
|
||||
fake_ask_child_zone_to_create_instance)
|
||||
|
||||
request_spec = {'child_blob': True, 'child_zone': True}
|
||||
|
||||
sched._provision_resource_from_blob(None, request_spec, 1,
|
||||
request_spec, {})
|
||||
self.assertTrue(was_called)
|
||||
Reference in New Issue
Block a user