Merge with trunk
This commit is contained in:
@@ -270,8 +270,10 @@ DEFINE_list('region_list',
|
||||
DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
|
||||
DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID')
|
||||
DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key')
|
||||
DEFINE_integer('glance_port', 9292, 'glance port')
|
||||
DEFINE_string('glance_host', '$my_ip', 'glance host')
|
||||
# NOTE(sirp): my_ip interpolation doesn't work within nested structures
|
||||
DEFINE_list('glance_api_servers',
|
||||
['127.0.0.1:9292'],
|
||||
'list of glance api servers available to nova (host:port)')
|
||||
DEFINE_integer('s3_port', 3333, 's3 port')
|
||||
DEFINE_string('s3_host', '$my_ip', 's3 host (for infrastructure)')
|
||||
DEFINE_string('s3_dmz', '$my_ip', 's3 dmz ip (for instances)')
|
||||
|
||||
@@ -111,7 +111,8 @@ def _process(func, zone):
|
||||
return func(nova, zone)
|
||||
|
||||
|
||||
def call_zone_method(context, method, errors_to_ignore=None, *args, **kwargs):
|
||||
def call_zone_method(context, method_name, errors_to_ignore=None,
|
||||
novaclient_collection_name='zones', *args, **kwargs):
|
||||
"""Returns a list of (zone, call_result) objects."""
|
||||
if not isinstance(errors_to_ignore, (list, tuple)):
|
||||
# This will also handle the default None
|
||||
@@ -131,18 +132,16 @@ def call_zone_method(context, method, errors_to_ignore=None, *args, **kwargs):
|
||||
#TODO (dabo) - add logic for failure counts per zone,
|
||||
# with escalation after a given number of failures.
|
||||
continue
|
||||
zone_method = getattr(nova.zones, method)
|
||||
novaclient_collection = getattr(nova, novaclient_collection_name)
|
||||
collection_method = getattr(novaclient_collection, method_name)
|
||||
|
||||
def _error_trap(*args, **kwargs):
|
||||
try:
|
||||
return zone_method(*args, **kwargs)
|
||||
return collection_method(*args, **kwargs)
|
||||
except Exception as e:
|
||||
if type(e) in errors_to_ignore:
|
||||
return None
|
||||
# TODO (dabo) - want to be able to re-raise here.
|
||||
# Returning a string now; raising was causing issues.
|
||||
# raise e
|
||||
return "ERROR", "%s" % e
|
||||
raise
|
||||
|
||||
res = pool.spawn(_error_trap, *args, **kwargs)
|
||||
results.append((zone, res))
|
||||
|
||||
@@ -91,6 +91,7 @@ class ZoneAwareScheduler(driver.Scheduler):
|
||||
image_id = instance_properties['image_id']
|
||||
meta = instance_properties['metadata']
|
||||
flavor_id = instance_type['flavorid']
|
||||
reservation_id = instance_properties['reservation_id']
|
||||
|
||||
files = kwargs['injected_files']
|
||||
ipgroup = None # Not supported in OS API ... yet
|
||||
@@ -99,7 +100,8 @@ class ZoneAwareScheduler(driver.Scheduler):
|
||||
child_blob = zone_info['child_blob']
|
||||
zone = db.zone_get(context, child_zone)
|
||||
url = zone.api_url
|
||||
LOG.debug(_("Forwarding instance create call to child zone %(url)s")
|
||||
LOG.debug(_("Forwarding instance create call to child zone %(url)s"
|
||||
". ReservationID=%(reservation_id)s")
|
||||
% locals())
|
||||
nova = None
|
||||
try:
|
||||
@@ -110,7 +112,7 @@ class ZoneAwareScheduler(driver.Scheduler):
|
||||
"to talk to zone at %(url)s.") % locals())
|
||||
|
||||
nova.servers.create(name, image_id, flavor_id, ipgroup, meta, files,
|
||||
child_blob)
|
||||
child_blob, reservation_id=reservation_id)
|
||||
|
||||
def _provision_resource_from_blob(self, context, item, instance_id,
|
||||
request_spec, kwargs):
|
||||
|
||||
@@ -133,11 +133,11 @@ class HostFilterTestCase(test.TestCase):
|
||||
raw = ['or',
|
||||
['and',
|
||||
['<', '$compute.host_memory_free', 30],
|
||||
['<', '$compute.disk_available', 300]
|
||||
['<', '$compute.disk_available', 300],
|
||||
],
|
||||
['and',
|
||||
['>', '$compute.host_memory_free', 70],
|
||||
['>', '$compute.disk_available', 700]
|
||||
['>', '$compute.disk_available', 700],
|
||||
]
|
||||
]
|
||||
cooked = json.dumps(raw)
|
||||
@@ -183,12 +183,12 @@ class HostFilterTestCase(test.TestCase):
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([])))
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({})))
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps(
|
||||
['not', True, False, True, False]
|
||||
['not', True, False, True, False],
|
||||
)))
|
||||
|
||||
try:
|
||||
hf.filter_hosts(self.zone_manager, json.dumps(
|
||||
'not', True, False, True, False
|
||||
'not', True, False, True, False,
|
||||
))
|
||||
self.fail("Should give KeyError")
|
||||
except KeyError, e:
|
||||
|
||||
@@ -44,7 +44,7 @@ class WeightedSumTestCase(test.TestCase):
|
||||
hosts = [
|
||||
FakeHost(1, 512 * MB, 100),
|
||||
FakeHost(2, 256 * MB, 400),
|
||||
FakeHost(3, 512 * MB, 100)
|
||||
FakeHost(3, 512 * MB, 100),
|
||||
]
|
||||
|
||||
weighted_fns = [
|
||||
@@ -96,7 +96,7 @@ class LeastCostSchedulerTestCase(test.TestCase):
|
||||
|
||||
def test_noop_cost_fn(self):
|
||||
FLAGS.least_cost_scheduler_cost_functions = [
|
||||
'nova.scheduler.least_cost.noop_cost_fn'
|
||||
'nova.scheduler.least_cost.noop_cost_fn',
|
||||
]
|
||||
FLAGS.noop_cost_fn_weight = 1
|
||||
|
||||
@@ -110,7 +110,7 @@ class LeastCostSchedulerTestCase(test.TestCase):
|
||||
|
||||
def test_cost_fn_weights(self):
|
||||
FLAGS.least_cost_scheduler_cost_functions = [
|
||||
'nova.scheduler.least_cost.noop_cost_fn'
|
||||
'nova.scheduler.least_cost.noop_cost_fn',
|
||||
]
|
||||
FLAGS.noop_cost_fn_weight = 2
|
||||
|
||||
@@ -124,7 +124,7 @@ class LeastCostSchedulerTestCase(test.TestCase):
|
||||
|
||||
def test_fill_first_cost_fn(self):
|
||||
FLAGS.least_cost_scheduler_cost_functions = [
|
||||
'nova.scheduler.least_cost.fill_first_cost_fn'
|
||||
'nova.scheduler.least_cost.fill_first_cost_fn',
|
||||
]
|
||||
FLAGS.fill_first_cost_fn_weight = 1
|
||||
|
||||
|
||||
@@ -201,7 +201,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
|
||||
'instance_properties': {},
|
||||
'instance_type': {},
|
||||
'filter_driver': 'nova.scheduler.host_filter.AllHostsFilter',
|
||||
'blob': "Non-None blob data"
|
||||
'blob': "Non-None blob data",
|
||||
}
|
||||
|
||||
result = sched.schedule_run_instance(None, 1, request_spec)
|
||||
|
||||
@@ -115,6 +115,18 @@ class CloudTestCase(test.TestCase):
|
||||
public_ip=address)
|
||||
db.floating_ip_destroy(self.context, address)
|
||||
|
||||
def test_allocate_address(self):
|
||||
address = "10.10.10.10"
|
||||
allocate = self.cloud.allocate_address
|
||||
db.floating_ip_create(self.context,
|
||||
{'address': address,
|
||||
'host': self.network.host})
|
||||
self.assertEqual(allocate(self.context)['publicIp'], address)
|
||||
db.floating_ip_destroy(self.context, address)
|
||||
self.assertRaises(exception.NoMoreFloatingIps,
|
||||
allocate,
|
||||
self.context)
|
||||
|
||||
def test_associate_disassociate_address(self):
|
||||
"""Verifies associate runs cleanly without raising an exception"""
|
||||
address = "10.10.10.10"
|
||||
|
||||
@@ -331,7 +331,7 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
|
||||
def check_vm_params_for_linux(self):
|
||||
self.assertEquals(self.vm['platform']['nx'], 'false')
|
||||
self.assertEquals(self.vm['PV_args'], 'clocksource=jiffies')
|
||||
self.assertEquals(self.vm['PV_args'], '')
|
||||
self.assertEquals(self.vm['PV_bootloader'], 'pygrub')
|
||||
|
||||
# check that these are not set
|
||||
|
||||
Reference in New Issue
Block a user