From bfff03f74c2b4195986bf0c0d30250b97e11dbad Mon Sep 17 00:00:00 2001 From: John Tran Date: Fri, 18 Mar 2011 11:49:11 -0700 Subject: [PATCH 01/60] created api endpoint to allow uploading of public key --- nova/tests/test_cloud.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index cf8ee7ef..03b1ad2f 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -279,6 +279,22 @@ class CloudTestCase(test.TestCase): self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys)) self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys)) + def test_import_public_key(self): + result = self.cloud.import_public_key(self.context, + 'testimportkey', 'mytestpubkey', 'mytestfprint') + self.assertTrue(result) + keydata = db.key_pair_get(self.context, + self.context.user.id, + 'testimportkey') + print "PUBLIC_KEY:" + file = open('/tmp/blah', 'w') + file.write(keydata['public_key']) + file.close() + print keydata['public_key'] + self.assertEqual('mytestpubkey', keydata['public_key']) + self.assertEqual('mytestfprint', keydata['fingerprint']) + self.assertTrue(1) + def test_delete_key_pair(self): self._create_key('test') self.cloud.delete_key_pair(self.context, 'test') From 4c8178a75a74e5c327a06824e1c7792f2e9ff0e1 Mon Sep 17 00:00:00 2001 From: John Tran Date: Fri, 18 Mar 2011 12:17:40 -0700 Subject: [PATCH 02/60] cleaned up tests stubs that were accidentally checked in --- nova/tests/test_cloud.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 03b1ad2f..3a266c99 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -281,19 +281,15 @@ class CloudTestCase(test.TestCase): def test_import_public_key(self): result = self.cloud.import_public_key(self.context, - 'testimportkey', 'mytestpubkey', 'mytestfprint') + 'testimportkey', + 'mytestpubkey', + 'mytestfprint') self.assertTrue(result) keydata = db.key_pair_get(self.context, self.context.user.id, 'testimportkey') - print "PUBLIC_KEY:" - file = open('/tmp/blah', 'w') - file.write(keydata['public_key']) - file.close() - print keydata['public_key'] self.assertEqual('mytestpubkey', keydata['public_key']) self.assertEqual('mytestfprint', keydata['fingerprint']) - self.assertTrue(1) def test_delete_key_pair(self): self._create_key('test') From 0f22d5871d519a649455d3de642ad719b443f978 Mon Sep 17 00:00:00 2001 From: John Tran Date: Mon, 21 Mar 2011 14:35:19 -0700 Subject: [PATCH 03/60] if fingerprint data not provided, added logic to calculate it using the pub key. --- nova/tests/public_key/dummy.fingerprint | 1 + nova/tests/public_key/dummy.pub | 1 + nova/tests/test_cloud.py | 32 +++++++++++++++++++------ 3 files changed, 27 insertions(+), 7 deletions(-) create mode 100644 nova/tests/public_key/dummy.fingerprint create mode 100644 nova/tests/public_key/dummy.pub diff --git a/nova/tests/public_key/dummy.fingerprint b/nova/tests/public_key/dummy.fingerprint new file mode 100644 index 00000000..715bca27 --- /dev/null +++ b/nova/tests/public_key/dummy.fingerprint @@ -0,0 +1 @@ +1c:87:d1:d9:32:fd:62:3c:78:2b:c0:ad:c0:15:88:df diff --git a/nova/tests/public_key/dummy.pub b/nova/tests/public_key/dummy.pub new file mode 100644 index 00000000..d4cf2bc0 --- /dev/null +++ b/nova/tests/public_key/dummy.pub @@ -0,0 +1 @@ +ssh-dss AAAAB3NzaC1kc3MAAACBAMGJlY9XEIm2X234pdO5yFWMp2JuOQx8U0E815IVXhmKxYCBK9ZakgZOIQmPbXoGYyV+mziDPp6HJ0wKYLQxkwLEFr51fAZjWQvRss0SinURRuLkockDfGFtD4pYJthekr/rlqMKlBSDUSpGq8jUWW60UJ18FGooFpxR7ESqQRx/AAAAFQC96LRglaUeeP+E8U/yblEJocuiWwAAAIA3XiMR8Skiz/0aBm5K50SeQznQuMJTyzt9S9uaz5QZWiFu69hOyGSFGw8fqgxEkXFJIuHobQQpGYQubLW0NdaYRqyE/Vud3JUJUb8Texld6dz8vGemyB5d1YvtSeHIo8/BGv2msOqR3u5AZTaGCBD9DhpSGOKHEdNjTtvpPd8S8gAAAIBociGZ5jf09iHLVENhyXujJbxfGRPsyNTyARJfCOGl0oFV6hEzcQyw8U/ePwjgvjc2UizMWLl8tsb2FXKHRdc2v+ND3Us+XqKQ33X3ADP4FZ/+Oj213gMyhCmvFTP0u5FmHog9My4CB7YcIWRuUR42WlhQ2IfPvKwUoTk3R+T6Og== www-data@mk diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 3a266c99..c49a39ed 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -280,16 +280,34 @@ class CloudTestCase(test.TestCase): self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys)) def test_import_public_key(self): - result = self.cloud.import_public_key(self.context, - 'testimportkey', - 'mytestpubkey', - 'mytestfprint') - self.assertTrue(result) + # test when user provides all values + result1 = self.cloud.import_public_key(self.context, + 'testimportkey1', + 'mytestpubkey', + 'mytestfprint') + self.assertTrue(result1) keydata = db.key_pair_get(self.context, - self.context.user.id, - 'testimportkey') + self.context.user.id, + 'testimportkey1') self.assertEqual('mytestpubkey', keydata['public_key']) self.assertEqual('mytestfprint', keydata['fingerprint']) + # test when user omits fingerprint + pubkey_path = os.path.join(os.path.dirname(__file__), 'public_key') + f = open(pubkey_path + '/dummy.pub', 'r') + dummypub = f.readline().rstrip() + f.close + f = open(pubkey_path + '/dummy.fingerprint', 'r') + dummyfprint = f.readline().rstrip() + f.close + result2 = self.cloud.import_public_key(self.context, + 'testimportkey2', + dummypub) + self.assertTrue(result2) + keydata = db.key_pair_get(self.context, + self.context.user.id, + 'testimportkey2') + self.assertEqual(dummypub, keydata['public_key']) + self.assertEqual(dummyfprint, keydata['fingerprint']) def test_delete_key_pair(self): self._create_key('test') From b3b03a75e02f015062de2ff9ad0c7f2ad42e5b6c Mon Sep 17 00:00:00 2001 From: John Tran Date: Wed, 23 Mar 2011 11:16:22 -0700 Subject: [PATCH 04/60] added myself to authors file --- Authors | 1 + 1 file changed, 1 insertion(+) diff --git a/Authors b/Authors index 7993955e..c1e16489 100644 --- a/Authors +++ b/Authors @@ -28,6 +28,7 @@ Jesse Andrews Joe Heck Joel Moore John Dewey +John Tran Jonathan Bryce Jordan Rinke Josh Durgin From d924c7392021c7d3bff038b8b0163fc8e4175fd2 Mon Sep 17 00:00:00 2001 From: Renuka Apte Date: Tue, 12 Apr 2011 15:20:30 -0700 Subject: [PATCH 05/60] Minor fixes --- Authors | 1 + 1 file changed, 1 insertion(+) diff --git a/Authors b/Authors index eccf38a4..b6da7a43 100644 --- a/Authors +++ b/Authors @@ -56,6 +56,7 @@ Nachi Ueno Naveed Massjouni Nirmal Ranganathan Paul Voccio +Renuka Apte Ricardo Carrillo Cruz Rick Clark Rick Harris From dc74b7864d7cff6fea657f78eec4b46808185540 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Fri, 22 Apr 2011 12:47:09 -0400 Subject: [PATCH 06/60] Created new libvirt directory, moved libvirt_conn.py to libvirt/connection.py, moved libvirt templates, broke out firewall and network utilities. --- nova/tests/test_virt.py | 47 +++++++++++++++++++++-------------------- 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 0a0c7a95..d770f2c1 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -34,7 +34,8 @@ from nova.auth import manager from nova.compute import manager as compute_manager from nova.compute import power_state from nova.db.sqlalchemy import models -from nova.virt import libvirt_conn +from nova.virt.libvirt import connection +from nova.virt.libvirt import firewall libvirt = None FLAGS = flags.FLAGS @@ -64,7 +65,7 @@ class CacheConcurrencyTestCase(test.TestCase): def test_same_fname_concurrency(self): """Ensures that the same fname cache runs at a sequentially""" - conn = libvirt_conn.LibvirtConnection + conn = connection.LibvirtConnection wait1 = eventlet.event.Event() done1 = eventlet.event.Event() eventlet.spawn(conn._cache_image, _concurrency, @@ -85,7 +86,7 @@ class CacheConcurrencyTestCase(test.TestCase): def test_different_fname_concurrency(self): """Ensures that two different fname caches are concurrent""" - conn = libvirt_conn.LibvirtConnection + conn = connection.LibvirtConnection wait1 = eventlet.event.Event() done1 = eventlet.event.Event() eventlet.spawn(conn._cache_image, _concurrency, @@ -106,7 +107,7 @@ class CacheConcurrencyTestCase(test.TestCase): class LibvirtConnTestCase(test.TestCase): def setUp(self): super(LibvirtConnTestCase, self).setUp() - libvirt_conn._late_load_cheetah() + connection._late_load_cheetah() self.flags(fake_call=True) self.manager = manager.AuthManager() @@ -152,8 +153,8 @@ class LibvirtConnTestCase(test.TestCase): return False global libvirt libvirt = __import__('libvirt') - libvirt_conn.libvirt = __import__('libvirt') - libvirt_conn.libxml2 = __import__('libxml2') + connection.libvirt = __import__('libvirt') + connection.libxml2 = __import__('libxml2') return True def create_fake_libvirt_mock(self, **kwargs): @@ -163,7 +164,7 @@ class LibvirtConnTestCase(test.TestCase): class FakeLibvirtConnection(object): pass - # A fake libvirt_conn.IptablesFirewallDriver + # A fake connection.IptablesFirewallDriver class FakeIptablesFirewallDriver(object): def __init__(self, **kwargs): @@ -179,11 +180,11 @@ class LibvirtConnTestCase(test.TestCase): for key, val in kwargs.items(): fake.__setattr__(key, val) - # Inevitable mocks for libvirt_conn.LibvirtConnection - self.mox.StubOutWithMock(libvirt_conn.utils, 'import_class') - libvirt_conn.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip) - self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn') - libvirt_conn.LibvirtConnection._conn = fake + # Inevitable mocks for connection.LibvirtConnection + self.mox.StubOutWithMock(connection.utils, 'import_class') + connection.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip) + self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn') + connection.LibvirtConnection._conn = fake def create_service(self, **kwargs): service_ref = {'host': kwargs.get('host', 'dummy'), @@ -247,7 +248,7 @@ class LibvirtConnTestCase(test.TestCase): 'instance_id': instance_ref['id']}) self.flags(libvirt_type='lxc') - conn = libvirt_conn.LibvirtConnection(True) + conn = connection.LibvirtConnection(True) uri = conn.get_uri() self.assertEquals(uri, 'lxc:///') @@ -359,7 +360,7 @@ class LibvirtConnTestCase(test.TestCase): for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): FLAGS.libvirt_type = libvirt_type - conn = libvirt_conn.LibvirtConnection(True) + conn = connection.LibvirtConnection(True) uri = conn.get_uri() self.assertEquals(uri, expected_uri) @@ -386,7 +387,7 @@ class LibvirtConnTestCase(test.TestCase): FLAGS.libvirt_uri = testuri for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): FLAGS.libvirt_type = libvirt_type - conn = libvirt_conn.LibvirtConnection(True) + conn = connection.LibvirtConnection(True) uri = conn.get_uri() self.assertEquals(uri, testuri) db.instance_destroy(user_context, instance_ref['id']) @@ -410,13 +411,13 @@ class LibvirtConnTestCase(test.TestCase): self.create_fake_libvirt_mock(getVersion=getVersion, getType=getType, listDomainsID=listDomainsID) - self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, + self.mox.StubOutWithMock(connection.LibvirtConnection, 'get_cpu_info') - libvirt_conn.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo') + connection.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo') # Start test self.mox.ReplayAll() - conn = libvirt_conn.LibvirtConnection(False) + conn = connection.LibvirtConnection(False) conn.update_available_resource(self.context, 'dummy') service_ref = db.service_get(self.context, service_ref['id']) compute_node = service_ref['compute_node'][0] @@ -450,7 +451,7 @@ class LibvirtConnTestCase(test.TestCase): self.create_fake_libvirt_mock() self.mox.ReplayAll() - conn = libvirt_conn.LibvirtConnection(False) + conn = connection.LibvirtConnection(False) self.assertRaises(exception.Invalid, conn.update_available_resource, self.context, 'dummy') @@ -485,7 +486,7 @@ class LibvirtConnTestCase(test.TestCase): # Start test self.mox.ReplayAll() try: - conn = libvirt_conn.LibvirtConnection(False) + conn = connection.LibvirtConnection(False) conn.firewall_driver.setattr('setup_basic_filtering', fake_none) conn.firewall_driver.setattr('prepare_instance_filter', fake_none) conn.firewall_driver.setattr('instance_filter_exists', fake_none) @@ -534,7 +535,7 @@ class LibvirtConnTestCase(test.TestCase): # Start test self.mox.ReplayAll() - conn = libvirt_conn.LibvirtConnection(False) + conn = connection.LibvirtConnection(False) self.assertRaises(libvirt.libvirtError, conn._live_migration, self.context, instance_ref, 'dest', '', @@ -569,7 +570,7 @@ class IptablesFirewallTestCase(test.TestCase): class FakeLibvirtConnection(object): pass self.fake_libvirt_connection = FakeLibvirtConnection() - self.fw = libvirt_conn.IptablesFirewallDriver( + self.fw = firewall.IptablesFirewallDriver( get_connection=lambda: self.fake_libvirt_connection) def tearDown(self): @@ -746,7 +747,7 @@ class NWFilterTestCase(test.TestCase): self.fake_libvirt_connection = Mock() - self.fw = libvirt_conn.NWFilterFirewall( + self.fw = firewall.NWFilterFirewall( lambda: self.fake_libvirt_connection) def tearDown(self): From 05c40ad8d1229d9d62590403ce9fc81f8bc04c21 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Fri, 22 Apr 2011 15:26:45 -0400 Subject: [PATCH 07/60] Renamed test_virt.py to test_libvirt.py as per suggestion. --- nova/tests/{test_virt.py => test_libvirt.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename nova/tests/{test_virt.py => test_libvirt.py} (100%) diff --git a/nova/tests/test_virt.py b/nova/tests/test_libvirt.py similarity index 100% rename from nova/tests/test_virt.py rename to nova/tests/test_libvirt.py From 74aff1dbd29bd872d95791300e83bb73f19257be Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Wed, 11 May 2011 06:28:07 -0700 Subject: [PATCH 08/60] First cut with tests passing --- nova/scheduler/api.py | 6 ++ nova/scheduler/zone_aware_scheduler.py | 88 ++++++++++++++++++++++++++ 2 files changed, 94 insertions(+) create mode 100644 nova/scheduler/zone_aware_scheduler.py diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py index 816ae551..d8a0025e 100644 --- a/nova/scheduler/api.py +++ b/nova/scheduler/api.py @@ -81,6 +81,12 @@ def get_zone_capabilities(context): return _call_scheduler('get_zone_capabilities', context=context) +def select(context, specs=None): + """Returns a list of hosts.""" + return _call_scheduler('select', context=context, + params={"specs": specs}) + + def update_service_capabilities(context, service_name, host, capabilities): """Send an update to all the scheduler services informing them of the capabilities of this service.""" diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py new file mode 100644 index 00000000..b849e8de --- /dev/null +++ b/nova/scheduler/zone_aware_scheduler.py @@ -0,0 +1,88 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +The Zone Aware Scheduler is a base class Scheduler for creating instances +across zones. There are two expansion points to this class for: +1. Assigning Weights to hosts for requested instances +2. Filtering Hosts based on required instance capabilities +""" + +import operator + +from nova import log as logging +from nova.scheduler import api + +LOG = logging.getLogger('nova.scheduler.zone_aware_scheduler') + + +class ZoneAwareScheduler(object): + """Base class for creating Zone Aware Schedulers.""" + + def _call_zone_method(self, context, method, specs): + """Call novaclient zone method. Broken out for testing.""" + return api.call_zone_method(context, method, specs=specs) + + def select(self, context, *args, **kwargs): + """Select returns a list of weights and zone/host information + corresponding to the best hosts to service the request. Any + child zone information has been encrypted so as not to reveal + anything about the children.""" + return self._schedule(context, "compute", *args, **kwargs) + + def schedule(self, context, topic, *args, **kwargs): + """The schedule() contract requires we return the one + best-suited host for this request. + """ + res = self._schedule(context, topic, *args, **kwargs) + return res[0] + + def _schedule(self, context, topic, *args, **kwargs): + """Returns a list of hosts that meet the required specs, + ordered by their fitness. + """ + # Filter local hosts based on requirements ... + host_list = self.filter_hosts() + + # then weigh the selected hosts. + # weighted = [ { 'weight':#, 'name':host, ...}, ] + weighted = self.weight_hosts(host_list) + + # Next, tack on the best weights from the child zones ... + child_results = self._call_zone_method(context, "select", + specs=specs) + for child_zone, result in child_results: + for weighting in result: + # Remember the child_zone so we can get back to + # it later if needed. This implicitly builds a zone + # path structure. + host_dict = { + "weight": weighting["weight"], + "child_zone": child_zone, + "child_blob": weighting["blob"]} + weighted.append(host_dict) + + weighted.sort(key=operator.itemgetter('weight')) + return weighted + + def filter_hosts(self): + """Derived classes must override this method and return + a list of hosts in [?] format.""" + raise NotImplemented() + + def weigh_hosts(self, hosts): + """Derived classes must override this method and return + a lists of hosts in [?] format.""" + raise NotImplemented() From bc1968e728f0d9f6656e9af37c0d788fcf4a9a8d Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 11 May 2011 13:50:24 -0400 Subject: [PATCH 09/60] Updated the value of the nova-manager libvirt_type --- bin/nova-manage | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nova-manage b/bin/nova-manage index 2f6af6e2..3eb5f035 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -97,7 +97,7 @@ flags.DECLARE('vlan_start', 'nova.network.manager') flags.DECLARE('vpn_start', 'nova.network.manager') flags.DECLARE('fixed_range_v6', 'nova.network.manager') flags.DECLARE('images_path', 'nova.image.local') -flags.DECLARE('libvirt_type', 'nova.virt.libvirt_conn') +flags.DECLARE('libvirt_type', 'nova.virt.libvirt.connection') flags.DEFINE_flag(flags.HelpFlag()) flags.DEFINE_flag(flags.HelpshortFlag()) flags.DEFINE_flag(flags.HelpXMLFlag()) From 91b8ac3f980f74ba0de610ce55f7444d12213f90 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Wed, 11 May 2011 11:12:31 -0700 Subject: [PATCH 10/60] start of zone_aware_scheduler test --- nova/scheduler/api.py | 39 ++++++++++++++++++++++++++ nova/scheduler/zone_aware_scheduler.py | 31 ++++++++++++-------- 2 files changed, 58 insertions(+), 12 deletions(-) diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py index d8a0025e..55f8e0a6 100644 --- a/nova/scheduler/api.py +++ b/nova/scheduler/api.py @@ -111,6 +111,45 @@ def _process(func, zone): return func(nova, zone) +def call_zone_method(context, method, errors_to_ignore=None, *args, **kwargs): + """Returns a list of (zone, call_result) objects.""" + if not isinstance(errors_to_ignore, (list, tuple)): + # This will also handle the default None + errors_to_ignore = [errors_to_ignore] + + pool = greenpool.GreenPool() + results = [] + for zone in db.zone_get_all(context): + try: + nova = novaclient.OpenStack(zone.username, zone.password, + zone.api_url) + nova.authenticate() + except novaclient.exceptions.BadRequest, e: + url = zone.api_url + LOG.warn(_("Failed request to zone; URL=%(url)s: %(e)s") + % locals()) + #TODO (dabo) - add logic for failure counts per zone, + # with escalation after a given number of failures. + continue + zone_method = getattr(nova.zones, method) + + def _error_trap(*args, **kwargs): + try: + return zone_method(*args, **kwargs) + except Exception as e: + if type(e) in errors_to_ignore: + return None + # TODO (dabo) - want to be able to re-raise here. + # Returning a string now; raising was causing issues. + # raise e + return "ERROR", "%s" % e + + res = pool.spawn(_error_trap, *args, **kwargs) + results.append((zone, res)) + pool.waitall() + return [(zone.id, res.wait()) for zone, res in results] + + def child_zone_helper(zone_list, func): """Fire off a command to each zone in the list. The return is [novaclient return objects] from each child zone. diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index b849e8de..b85cdfe6 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -24,11 +24,12 @@ import operator from nova import log as logging from nova.scheduler import api +from nova.scheduler import driver LOG = logging.getLogger('nova.scheduler.zone_aware_scheduler') -class ZoneAwareScheduler(object): +class ZoneAwareScheduler(driver.Scheduler): """Base class for creating Zone Aware Schedulers.""" def _call_zone_method(self, context, method, specs): @@ -42,23 +43,29 @@ class ZoneAwareScheduler(object): anything about the children.""" return self._schedule(context, "compute", *args, **kwargs) - def schedule(self, context, topic, *args, **kwargs): + def schedule(self, context, topic, *args, **kwargs): """The schedule() contract requires we return the one best-suited host for this request. """ res = self._schedule(context, topic, *args, **kwargs) + # TODO(sirp): should this be a host object rather than a weight-dict? return res[0] def _schedule(self, context, topic, *args, **kwargs): """Returns a list of hosts that meet the required specs, ordered by their fitness. """ + + #TODO(sandy): extract these from args. + num_instances = 1 + specs = {} + # Filter local hosts based on requirements ... - host_list = self.filter_hosts() + host_list = self.filter_hosts(num_instances, specs) # then weigh the selected hosts. # weighted = [ { 'weight':#, 'name':host, ...}, ] - weighted = self.weight_hosts(host_list) + weighted = self.weigh_hosts(num_instances, specs, host_list) # Next, tack on the best weights from the child zones ... child_results = self._call_zone_method(context, "select", @@ -77,12 +84,12 @@ class ZoneAwareScheduler(object): weighted.sort(key=operator.itemgetter('weight')) return weighted - def filter_hosts(self): - """Derived classes must override this method and return - a list of hosts in [?] format.""" - raise NotImplemented() + def filter_hosts(self, num, specs): + """Derived classes must override this method and return + a list of hosts in [?] format.""" + raise NotImplemented() - def weigh_hosts(self, hosts): - """Derived classes must override this method and return - a lists of hosts in [?] format.""" - raise NotImplemented() + def weigh_hosts(self, num, specs, hosts): + """Derived classes must override this method and return + a lists of hosts in [?] format.""" + raise NotImplemented() From 0087f66d8aa4ec412265f49c512d2909fb8e5033 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Wed, 11 May 2011 11:43:58 -0700 Subject: [PATCH 11/60] NoValidHost exception test --- nova/scheduler/zone_aware_scheduler.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index b85cdfe6..8285ec57 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -49,6 +49,8 @@ class ZoneAwareScheduler(driver.Scheduler): """ res = self._schedule(context, topic, *args, **kwargs) # TODO(sirp): should this be a host object rather than a weight-dict? + if not res: + raise driver.NoValidHost(_('No hosts were available')) return res[0] def _schedule(self, context, topic, *args, **kwargs): @@ -64,7 +66,7 @@ class ZoneAwareScheduler(driver.Scheduler): host_list = self.filter_hosts(num_instances, specs) # then weigh the selected hosts. - # weighted = [ { 'weight':#, 'name':host, ...}, ] + # weighted = [{weight=weight, name=hostname}, ...] weighted = self.weigh_hosts(num_instances, specs, host_list) # Next, tack on the best weights from the child zones ... @@ -86,10 +88,10 @@ class ZoneAwareScheduler(driver.Scheduler): def filter_hosts(self, num, specs): """Derived classes must override this method and return - a list of hosts in [?] format.""" + a list of hosts in [(hostname, capability_dict)] format.""" raise NotImplemented() def weigh_hosts(self, num, specs, hosts): """Derived classes must override this method and return - a lists of hosts in [?] format.""" + a lists of hosts in [(weight, hostname)] format.""" raise NotImplemented() From b9dfbe6eb6527f5f0ebea30dbb87829b59702e91 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 11 May 2011 14:46:31 -0400 Subject: [PATCH 12/60] Updated MANIFEST for template move. --- MANIFEST.in | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/MANIFEST.in b/MANIFEST.in index e7a6e7da..fc449275 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -24,8 +24,7 @@ include nova/console/xvp.conf.template include nova/db/sqlalchemy/migrate_repo/migrate.cfg include nova/db/sqlalchemy/migrate_repo/README include nova/virt/interfaces.template -include nova/virt/libvirt*.xml.template -include nova/virt/cpuinfo.xml.template +include nova/virt/libvirt/*.template include nova/tests/CA/ include nova/tests/CA/cacert.pem include nova/tests/CA/private/ From 3e494c104a52f6a848f8ba969de9f80eb32d31e0 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Wed, 11 May 2011 12:45:22 -0700 Subject: [PATCH 13/60] messing around with the flow of create() and specs --- nova/scheduler/zone_aware_scheduler.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index 8285ec57..07f86450 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -36,6 +36,24 @@ class ZoneAwareScheduler(driver.Scheduler): """Call novaclient zone method. Broken out for testing.""" return api.call_zone_method(context, method, specs=specs) + def schedule_run_instance(self, context, topic='compute', specs=None, + *args, **kwargs): + """This method is called from nova.compute.api to provision + an instance. However we need to look at the parameters being + passed in to see if this is a request to: + 1. Create a Build Plan and then provision, or + 2. Use the Build Plan information in the request parameters + to simply create the instance (either in this zone or + a child zone).""" + + if 'blob' in specs: + return self.provision_instance(context, topic, specs) + + # Create build plan and provision ... + build_plan = self.select(context, specs) + for item in build_plan: + self.provision_instance(context, topic, item) + def select(self, context, *args, **kwargs): """Select returns a list of weights and zone/host information corresponding to the best hosts to service the request. Any From 0f2c1a6f4c79349445871d99bb6dcccdb989d2d1 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Thu, 12 May 2011 20:07:54 -0500 Subject: [PATCH 14/60] Adding basic tests for call_zone_method --- nova/tests/test_scheduler.py | 61 +++++++++++++++++++++++++++++++++--- 1 file changed, 57 insertions(+), 4 deletions(-) diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 968ef9d6..54b3f80f 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -912,7 +912,8 @@ class SimpleDriverTestCase(test.TestCase): class FakeZone(object): - def __init__(self, api_url, username, password): + def __init__(self, id, api_url, username, password): + self.id = id self.api_url = api_url self.username = username self.password = password @@ -920,7 +921,7 @@ class FakeZone(object): def zone_get_all(context): return [ - FakeZone('http://example.com', 'bob', 'xxx'), + FakeZone(1, 'http://example.com', 'bob', 'xxx'), ] @@ -1037,7 +1038,7 @@ class FakeNovaClient(object): class DynamicNovaClientTest(test.TestCase): def test_issue_novaclient_command_found(self): - zone = FakeZone('http://example.com', 'bob', 'xxx') + zone = FakeZone(1, 'http://example.com', 'bob', 'xxx') self.assertEquals(api._issue_novaclient_command( FakeNovaClient(FakeServerCollection()), zone, "servers", "get", 100).a, 10) @@ -1051,7 +1052,7 @@ class DynamicNovaClientTest(test.TestCase): zone, "servers", "pause", 100), None) def test_issue_novaclient_command_not_found(self): - zone = FakeZone('http://example.com', 'bob', 'xxx') + zone = FakeZone(1, 'http://example.com', 'bob', 'xxx') self.assertEquals(api._issue_novaclient_command( FakeNovaClient(FakeEmptyServerCollection()), zone, "servers", "get", 100), None) @@ -1063,3 +1064,55 @@ class DynamicNovaClientTest(test.TestCase): self.assertEquals(api._issue_novaclient_command( FakeNovaClient(FakeEmptyServerCollection()), zone, "servers", "any", "name"), None) + + +class FakeZonesProxy(object): + def do_something(*args, **kwargs): + return 42 + + def raises_exception(*args, **kwargs): + raise Exception('testing') + + +class FakeNovaClientOpenStack(object): + def __init__(self, *args, **kwargs): + self.zones = FakeZonesProxy() + + def authenticate(self): + pass + + +class CallZoneMethodTest(test.TestCase): + def setUp(self): + super(CallZoneMethodTest, self).setUp() + self.stubs = stubout.StubOutForTesting() + self.stubs.Set(db, 'zone_get_all', zone_get_all) + self.stubs.Set(novaclient, 'OpenStack', FakeNovaClientOpenStack) + + def tearDown(self): + self.stubs.UnsetAll() + super(CallZoneMethodTest, self).tearDown() + + def test_call_zone_method(self): + context = {} + method = 'do_something' + results = api.call_zone_method(context, method) + expected = [(1, 42)] + self.assertEqual(expected, results) + + def test_call_zone_method_not_present(self): + context = {} + method = 'not_present' + self.assertRaises(AttributeError, api.call_zone_method, + context, method) + + def test_call_zone_method_generates_exception(self): + context = {} + method = 'raises_exception' + results = api.call_zone_method(context, method) + + # FIXME(sirp): for now the _error_trap code is catching errors and + # converting them to a ("ERROR", "string") tuples. The code (and this + # test) should eventually handle real exceptions. + expected = [(1, ('ERROR', 'testing'))] + self.assertEqual(expected, results) From 5fa8101e567f1e1d181056354c146a065c7804b3 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Thu, 12 May 2011 18:44:22 -0700 Subject: [PATCH 15/60] pep8 --- nova/tests/test_zone_aware_scheduler.py | 119 ++++++++++++++++++++++++ 1 file changed, 119 insertions(+) create mode 100644 nova/tests/test_zone_aware_scheduler.py diff --git a/nova/tests/test_zone_aware_scheduler.py b/nova/tests/test_zone_aware_scheduler.py new file mode 100644 index 00000000..fdcde34c --- /dev/null +++ b/nova/tests/test_zone_aware_scheduler.py @@ -0,0 +1,119 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Zone Aware Scheduler. +""" + +from nova import test +from nova.scheduler import driver +from nova.scheduler import zone_aware_scheduler +from nova.scheduler import zone_manager + + +class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler): + def filter_hosts(self, num, specs): + # NOTE(sirp): this is returning [(hostname, services)] + return self.zone_manager.service_states.items() + + def weigh_hosts(self, num, specs, hosts): + fake_weight = 99 + weighted = [] + for hostname, caps in hosts: + weighted.append(dict(weight=fake_weight, name=hostname)) + return weighted + + +class FakeZoneManager(zone_manager.ZoneManager): + def __init__(self): + self.service_states = { + 'host1': { + 'compute': {'ram': 1000} + }, + 'host2': { + 'compute': {'ram': 2000} + }, + 'host3': { + 'compute': {'ram': 3000} + } + } + + +class FakeEmptyZoneManager(zone_manager.ZoneManager): + def __init__(self): + self.service_states = {} + + +def fake_empty_call_zone_method(context, method, specs): + return [] + + +def fake_call_zone_method(context, method, specs): + return [ + ('zone1', [ + dict(weight=1, blob='AAAAAAA'), + dict(weight=111, blob='BBBBBBB'), + dict(weight=112, blob='CCCCCCC'), + dict(weight=113, blob='DDDDDDD'), + ]), + ('zone2', [ + dict(weight=120, blob='EEEEEEE'), + dict(weight=2, blob='FFFFFFF'), + dict(weight=122, blob='GGGGGGG'), + dict(weight=123, blob='HHHHHHH'), + ]), + ('zone3', [ + dict(weight=130, blob='IIIIIII'), + dict(weight=131, blob='JJJJJJJ'), + dict(weight=132, blob='KKKKKKK'), + dict(weight=3, blob='LLLLLLL'), + ]), + ] + + +class ZoneAwareSchedulerTestCase(test.TestCase): + """Test case for Zone Aware Scheduler.""" + + def test_zone_aware_scheduler(self): + """ + Create a nested set of FakeZones, ensure that a select call returns the + appropriate build plan. + """ + sched = FakeZoneAwareScheduler() + self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method) + + zm = FakeZoneManager() + sched.set_zone_manager(zm) + + fake_context = {} + build_plan = sched.select(fake_context, {}) + + self.assertEqual(15, len(build_plan)) + + hostnames = [plan_item['name'] + for plan_item in build_plan if 'name' in plan_item] + self.assertEqual(3, len(hostnames)) + + def test_empty_zone_aware_scheduler(self): + """ + Ensure empty hosts & child_zones result in NoValidHosts exception. + """ + sched = FakeZoneAwareScheduler() + self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method) + + zm = FakeEmptyZoneManager() + sched.set_zone_manager(zm) + + fake_context = {} + self.assertRaises(driver.NoValidHost, sched.schedule, fake_context, {}) From fffb7787306e31dbe0fa4666f2d7f931d79d77f0 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Fri, 13 May 2011 06:12:18 -0700 Subject: [PATCH 16/60] fixup based on Lorin's feedback --- nova/scheduler/zone_aware_scheduler.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index 07f86450..b3d230bd 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -36,7 +36,7 @@ class ZoneAwareScheduler(driver.Scheduler): """Call novaclient zone method. Broken out for testing.""" return api.call_zone_method(context, method, specs=specs) - def schedule_run_instance(self, context, topic='compute', specs=None, + def schedule_run_instance(self, context, topic='compute', specs={}, *args, **kwargs): """This method is called from nova.compute.api to provision an instance. However we need to look at the parameters being @@ -54,6 +54,10 @@ class ZoneAwareScheduler(driver.Scheduler): for item in build_plan: self.provision_instance(context, topic, item) + def provision_instance(context, topic, item): + """Create the requested instance in this Zone or a child zone.""" + pass + def select(self, context, *args, **kwargs): """Select returns a list of weights and zone/host information corresponding to the best hosts to service the request. Any @@ -111,5 +115,5 @@ class ZoneAwareScheduler(driver.Scheduler): def weigh_hosts(self, num, specs, hosts): """Derived classes must override this method and return - a lists of hosts in [(weight, hostname)] format.""" + a lists of hosts in [{weight, hostname}] format.""" raise NotImplemented() From e6d2222f513b52618ad2bd2134f842d0f309988e Mon Sep 17 00:00:00 2001 From: Eldar Nugaev Date: Mon, 16 May 2011 18:14:09 +0400 Subject: [PATCH 17/60] Added response about error in nova-manage project operations --- bin/nova-manage | 43 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 35 insertions(+), 8 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index a36ec86d..155ab592 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -362,27 +362,47 @@ class ProjectCommands(object): def add(self, project_id, user_id): """Adds user to project arguments: project_id user_id""" - self.manager.add_to_project(user_id, project_id) + try: + self.manager.add_to_project(user_id, project_id) + except exception.UserNotFound, e: + print e + raise def create(self, name, project_manager, description=None): """Creates a new project arguments: name project_manager [description]""" - self.manager.create_project(name, project_manager, description) + try: + self.manager.create_project(name, project_manager, description) + except exception.UserNotFound, e: + print e + raise def modify(self, name, project_manager, description=None): """Modifies a project arguments: name project_manager [description]""" - self.manager.modify_project(name, project_manager, description) - + try: + self.manager.modify_project(name, project_manager, description) + except exception.UserNotFound, e: + print e + raise + def delete(self, name): """Deletes an existing project arguments: name""" - self.manager.delete_project(name) + try: + self.manager.delete_project(name) + except exception.ProjectNotFound, e: + print e + raise def environment(self, project_id, user_id, filename='novarc'): """Exports environment variables to an sourcable file arguments: project_id user_id [filename='novarc]""" - rc = self.manager.get_environment_rc(user_id, project_id) + try: + rc = self.manager.get_environment_rc(user_id, project_id) + except (exception.UserNotFound, exception.ProjectNotFound), e: + print e + raise with open(filename, 'w') as f: f.write(rc) @@ -400,7 +420,7 @@ class ProjectCommands(object): quo = {'project_id': project_id, key: value} try: db.quota_update(ctxt, project_id, quo) - except exception.NotFound: + except exception.ProjectQuotaNotFound: db.quota_create(ctxt, quo) project_quota = quota.get_quota(ctxt, project_id) for key, value in project_quota.iteritems(): @@ -409,7 +429,11 @@ class ProjectCommands(object): def remove(self, project_id, user_id): """Removes user from project arguments: project_id user_id""" - self.manager.remove_from_project(user_id, project_id) + try: + self.manager.remove_from_project(user_id, project_id) + except (exception.UserNotFound, exception.ProjectNotFound), e: + print e + raise def scrub(self, project_id): """Deletes data associated with project @@ -428,6 +452,9 @@ class ProjectCommands(object): zip_file = self.manager.get_credentials(user_id, project_id) with open(filename, 'w') as f: f.write(zip_file) + except (exception.UserNotFound, exception.ProjectNotFound), e: + print e + raise except db.api.NoMoreNetworks: print _('No more networks available. If this is a new ' 'installation, you need\nto call something like this:\n\n' From 04abb75adfce476c962e0b55417e1c488dda819a Mon Sep 17 00:00:00 2001 From: Eldar Nugaev Date: Mon, 16 May 2011 18:17:15 +0400 Subject: [PATCH 18/60] Pep8 cleaning --- bin/nova-manage | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nova-manage b/bin/nova-manage index 155ab592..f1214ff3 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -385,7 +385,7 @@ class ProjectCommands(object): except exception.UserNotFound, e: print e raise - + def delete(self, name): """Deletes an existing project arguments: name""" From 861f42f7bf7a1ec92cc5b59de754553d2587505d Mon Sep 17 00:00:00 2001 From: Eldar Nugaev Date: Mon, 16 May 2011 22:40:16 +0400 Subject: [PATCH 19/60] style fixing --- bin/nova-manage | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index f1214ff3..a42dabf8 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -364,8 +364,8 @@ class ProjectCommands(object): arguments: project_id user_id""" try: self.manager.add_to_project(user_id, project_id) - except exception.UserNotFound, e: - print e + except exception.UserNotFound as ex: + print ex raise def create(self, name, project_manager, description=None): @@ -373,8 +373,8 @@ class ProjectCommands(object): arguments: name project_manager [description]""" try: self.manager.create_project(name, project_manager, description) - except exception.UserNotFound, e: - print e + except exception.UserNotFound as ex: + print ex raise def modify(self, name, project_manager, description=None): @@ -382,8 +382,8 @@ class ProjectCommands(object): arguments: name project_manager [description]""" try: self.manager.modify_project(name, project_manager, description) - except exception.UserNotFound, e: - print e + except exception.UserNotFound as ex: + print ex raise def delete(self, name): @@ -391,8 +391,8 @@ class ProjectCommands(object): arguments: name""" try: self.manager.delete_project(name) - except exception.ProjectNotFound, e: - print e + except exception.ProjectNotFound as ex: + print ex raise def environment(self, project_id, user_id, filename='novarc'): @@ -400,8 +400,8 @@ class ProjectCommands(object): arguments: project_id user_id [filename='novarc]""" try: rc = self.manager.get_environment_rc(user_id, project_id) - except (exception.UserNotFound, exception.ProjectNotFound), e: - print e + except (exception.UserNotFound, exception.ProjectNotFound) as ex: + print ex raise with open(filename, 'w') as f: f.write(rc) @@ -431,8 +431,8 @@ class ProjectCommands(object): arguments: project_id user_id""" try: self.manager.remove_from_project(user_id, project_id) - except (exception.UserNotFound, exception.ProjectNotFound), e: - print e + except (exception.UserNotFound, exception.ProjectNotFound) as ex: + print ex raise def scrub(self, project_id): @@ -452,8 +452,8 @@ class ProjectCommands(object): zip_file = self.manager.get_credentials(user_id, project_id) with open(filename, 'w') as f: f.write(zip_file) - except (exception.UserNotFound, exception.ProjectNotFound), e: - print e + except (exception.UserNotFound, exception.ProjectNotFound) as ex: + print ex raise except db.api.NoMoreNetworks: print _('No more networks available. If this is a new ' From 21c2328dcfbc2562b75dd457cf043ced98e341c7 Mon Sep 17 00:00:00 2001 From: Mark Washenberger Date: Tue, 17 May 2011 15:36:00 -0400 Subject: [PATCH 21/60] support unlimited quotas in nova-manage and flags --- bin/nova-manage | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/bin/nova-manage b/bin/nova-manage index c95b216c..09b89b0b 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -397,12 +397,16 @@ class ProjectCommands(object): arguments: project_id [key] [value]""" ctxt = context.get_admin_context() if key: + if value.lower() == 'unlimited': + value = None try: db.quota_update(ctxt, project_id, key, value) except exception.NotFound: db.quota_create(ctxt, project_id, key, value) project_quota = quota.get_quota(ctxt, project_id) for key, value in project_quota.iteritems(): + if value is None: + value = 'unlimited' print '%s: %s' % (key, value) def remove(self, project_id, user_id): From 3abceeecd2adf34dd85c6f5d025f036d7169468c Mon Sep 17 00:00:00 2001 From: Mark Washenberger Date: Thu, 19 May 2011 14:08:15 -0400 Subject: [PATCH 22/60] waldon's naming feedback --- bin/nova-manage | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nova-manage b/bin/nova-manage index ae168001..9bd18e59 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -403,7 +403,7 @@ class ProjectCommands(object): db.quota_update(ctxt, project_id, key, value) except exception.NotFound: db.quota_create(ctxt, project_id, key, value) - project_quota = quota.get_quota(ctxt, project_id) + project_quota = quota.get_project_quotas(ctxt, project_id) for key, value in project_quota.iteritems(): if value is None: value = 'unlimited' From e5f99e24140a44b93b0d97d8f3488044e643e300 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 19 May 2011 16:25:57 -0400 Subject: [PATCH 23/60] Moved back templates and fixed pep8 issue. Template move was due to breaking packaging with template moves. That will need to happen in a later merge. --- MANIFEST.in | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/MANIFEST.in b/MANIFEST.in index fc449275..e7a6e7da 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -24,7 +24,8 @@ include nova/console/xvp.conf.template include nova/db/sqlalchemy/migrate_repo/migrate.cfg include nova/db/sqlalchemy/migrate_repo/README include nova/virt/interfaces.template -include nova/virt/libvirt/*.template +include nova/virt/libvirt*.xml.template +include nova/virt/cpuinfo.xml.template include nova/tests/CA/ include nova/tests/CA/cacert.pem include nova/tests/CA/private/ From 27b71e79e12f3dbb27e40fccf0128f0f214ac324 Mon Sep 17 00:00:00 2001 From: Andrey Brindeyev Date: Fri, 20 May 2011 17:57:04 +0400 Subject: [PATCH 24/60] Addressing bug #785763. Usual default for maximum number of DHCP leases in dnsmasq is 150. This prevents instances to obtain IP addresses from DHCP in case we have more than 150 in our network. Adding myself to Authors. --- Authors | 1 + 1 file changed, 1 insertion(+) diff --git a/Authors b/Authors index 546c9091..6741c81f 100644 --- a/Authors +++ b/Authors @@ -1,4 +1,5 @@ Alex Meade +Andrey Brindeyev Andy Smith Andy Southgate Anne Gentle From 7a3ecc4015d1d2198d0fa0662aecc06ba7cc5e9c Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Fri, 20 May 2011 21:21:04 +0200 Subject: [PATCH 25/60] Include data files for public key tests in the tarball. --- MANIFEST.in | 1 + 1 file changed, 1 insertion(+) diff --git a/MANIFEST.in b/MANIFEST.in index e7a6e7da..4e145de7 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -35,6 +35,7 @@ include nova/tests/bundle/1mb.manifest.xml include nova/tests/bundle/1mb.no_kernel_or_ramdisk.manifest.xml include nova/tests/bundle/1mb.part.0 include nova/tests/bundle/1mb.part.1 +include nova/tests/public_key/* include nova/tests/db/nova.austin.sqlite include plugins/xenapi/README include plugins/xenapi/etc/xapi.d/plugins/objectstore From 713978756ce5ba29f444eec6478ffa177c85f232 Mon Sep 17 00:00:00 2001 From: "Dave Walker (Daviey)" Date: Sat, 21 May 2011 13:00:22 +0100 Subject: [PATCH 28/60] Added myself to Authors --- Authors | 1 + 1 file changed, 1 insertion(+) diff --git a/Authors b/Authors index 6741c81f..e7bddd21 100644 --- a/Authors +++ b/Authors @@ -17,6 +17,7 @@ Christian Berendt Chuck Short Cory Wright Dan Prince +Dave Walker David Pravec Dean Troyer Devin Carlen From ebeaa9e4dd3d2140bb7cd07bba455b126c508a97 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 23 May 2011 14:38:37 -0500 Subject: [PATCH 29/60] initial fudging in of swap disk --- nova/tests/xenapi/stubs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 4833ccb0..d9306900 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -37,7 +37,7 @@ def stubout_instance_snapshot(stubs): sr_ref=sr_ref, sharable=False) vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) vdi_uuid = vdi_rec['uuid'] - return vdi_uuid + return dict(primary_vdi_uuid=vdi_uuid, swap_vdi_uuid=None) stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image) From 1eb1bde49694ff143e9948dcc558a472285123a1 Mon Sep 17 00:00:00 2001 From: "Dave Walker (Daviey)" Date: Mon, 23 May 2011 22:15:10 +0100 Subject: [PATCH 30/60] Added test case for attempting to create a duplicate keypair --- nova/tests/test_api.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py index 97f401b8..7c0331ef 100644 --- a/nova/tests/test_api.py +++ b/nova/tests/test_api.py @@ -224,6 +224,29 @@ class ApiEc2TestCase(test.TestCase): self.manager.delete_project(project) self.manager.delete_user(user) + def test_create_duplicate_key_pair(self): + """Test that, after successfully generating a keypair, + requesting a second keypair with the same name fails sanely""" + self.expect_http() + self.mox.ReplayAll() + keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \ + for x in range(random.randint(4, 8))) + user = self.manager.create_user('fake', 'fake', 'fake') + project = self.manager.create_project('fake', 'fake', 'fake') + # NOTE(vish): create depends on pool, so call helper directly + self.ec2.create_key_pair('test') + + try: + self.ec2.create_key_pair('test') + except EC2ResponseError, e: + if e.code == 'KeyPairExists': + pass + else: + self.fail("Unexpected EC2ResponseError: %s " + "(expected KeyPairExists)" % e.code) + else: + self.fail('Exception not raised.') + def test_get_all_security_groups(self): """Test that we can retrieve security groups""" self.expect_http() From fc60fe8cbe2eaf1c0a8afb16e87980eaa2370929 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 23 May 2011 16:51:28 -0500 Subject: [PATCH 31/60] fix tests, have glance plugin return json encoded string of vdi uuids --- nova/tests/xenapi/stubs.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index d9306900..9f6f6431 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -17,6 +17,7 @@ """Stubouts, mocks and fixtures for the test suite""" import eventlet +import json from nova.virt import xenapi_conn from nova.virt.xenapi import fake from nova.virt.xenapi import volume_utils @@ -37,7 +38,7 @@ def stubout_instance_snapshot(stubs): sr_ref=sr_ref, sharable=False) vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) vdi_uuid = vdi_rec['uuid'] - return dict(primary_vdi_uuid=vdi_uuid, swap_vdi_uuid=None) + return {'primary_vdi_uuid': vdi_uuid} stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image) @@ -132,10 +133,16 @@ class FakeSessionForVMTests(fake.SessionBase): def __init__(self, uri): super(FakeSessionForVMTests, self).__init__(uri) - def host_call_plugin(self, _1, _2, _3, _4, _5): + def host_call_plugin(self, _1, _2, plugin, method, _5): sr_ref = fake.get_all('SR')[0] vdi_ref = fake.create_vdi('', False, sr_ref, False) vdi_rec = fake.get_record('VDI', vdi_ref) + if plugin == "glance" and method == "download_vhd": + swap_vdi_ref = fake.create_vdi('', False, sr_ref, False) + swap_vdi_rec = fake.get_record('VDI', swap_vdi_ref) + return '%s' % json.dumps( + {'primary_vdi_uuid': vdi_rec['uuid'], + 'swap_vdi_uuid': swap_vdi_rec['uuid']}) return '%s' % vdi_rec['uuid'] def VM_start(self, _1, ref, _2, _3): From db61c0c5d60694fb9dac5478dc741b53e380bebc Mon Sep 17 00:00:00 2001 From: termie Date: Tue, 24 May 2011 13:19:09 -0700 Subject: [PATCH 32/60] Properly reparse flags when adding dynamic flags --- nova/flags.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/flags.py b/nova/flags.py index 32cb6efa..7304700f 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -110,7 +110,7 @@ class FlagValues(gflags.FlagValues): return name in self.__dict__['__dirty'] def ClearDirty(self): - self.__dict__['__is_dirty'] = [] + self.__dict__['__dirty'] = [] def WasAlreadyParsed(self): return self.__dict__['__was_already_parsed'] @@ -119,11 +119,11 @@ class FlagValues(gflags.FlagValues): if '__stored_argv' not in self.__dict__: return new_flags = FlagValues(self) - for k in self.__dict__['__dirty']: + for k in self.FlagDict().iterkeys(): new_flags[k] = gflags.FlagValues.__getitem__(self, k) new_flags(self.__dict__['__stored_argv']) - for k in self.__dict__['__dirty']: + for k in new_flags.FlagDict().iterkeys(): setattr(self, k, getattr(new_flags, k)) self.ClearDirty() From d608b6e6bd31d9496da6e4ba4a42687fa33295fa Mon Sep 17 00:00:00 2001 From: termie Date: Tue, 24 May 2011 13:19:09 -0700 Subject: [PATCH 33/60] add a test from vish and fix the issues --- nova/flags.py | 1 + nova/tests/test_flags.py | 14 ++++++++++++++ 2 files changed, 15 insertions(+) diff --git a/nova/flags.py b/nova/flags.py index 7304700f..9eaac559 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -122,6 +122,7 @@ class FlagValues(gflags.FlagValues): for k in self.FlagDict().iterkeys(): new_flags[k] = gflags.FlagValues.__getitem__(self, k) + new_flags.Reset() new_flags(self.__dict__['__stored_argv']) for k in new_flags.FlagDict().iterkeys(): setattr(self, k, getattr(new_flags, k)) diff --git a/nova/tests/test_flags.py b/nova/tests/test_flags.py index 707300fc..05319d91 100644 --- a/nova/tests/test_flags.py +++ b/nova/tests/test_flags.py @@ -91,6 +91,20 @@ class FlagsTestCase(test.TestCase): self.assert_('runtime_answer' in self.global_FLAGS) self.assertEqual(self.global_FLAGS.runtime_answer, 60) + def test_long_vs_short_flags(self): + flags.DEFINE_string('duplicate_answer_long', 'val', 'desc', + flag_values=self.global_FLAGS) + argv = ['flags_test', '--duplicate_answer=60', 'extra_arg'] + args = self.global_FLAGS(argv) + + self.assert_('duplicate_answer' not in self.global_FLAGS) + self.assert_(self.global_FLAGS.duplicate_answer_long, 60) + + flags.DEFINE_integer('duplicate_answer', 60, 'desc', + flag_values=self.global_FLAGS) + self.assertEqual(self.global_FLAGS.duplicate_answer, 60) + self.assertEqual(self.global_FLAGS.duplicate_answer_long, 'val') + def test_flag_leak_left(self): self.assertEqual(FLAGS.flags_unittest, 'foo') FLAGS.flags_unittest = 'bar' From 2878a53bb8bb7dee6e30a8a374f96f8a4572183d Mon Sep 17 00:00:00 2001 From: termie Date: Tue, 24 May 2011 13:19:09 -0700 Subject: [PATCH 34/60] make fake_flags set defaults instead of runtime values --- bin/nova-dhcpbridge | 7 +++++++ nova/tests/fake_flags.py | 28 ++++++++++++++-------------- nova/tests/real_flags.py | 26 -------------------------- 3 files changed, 21 insertions(+), 40 deletions(-) delete mode 100644 nova/tests/real_flags.py diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index f42dfd6b..5926b97d 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -108,6 +108,13 @@ def main(): interface = os.environ.get('DNSMASQ_INTERFACE', FLAGS.dnsmasq_interface) if int(os.environ.get('TESTING', '0')): from nova.tests import fake_flags + + #if FLAGS.fake_rabbit: + # LOG.debug(_("leasing ip")) + # network_manager = utils.import_object(FLAGS.network_manager) + ## reload(fake_flags) + # from nova.tests import fake_flags + action = argv[1] if action in ['add', 'del', 'old']: mac = argv[2] diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index 5d7ca98b..ecefc464 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -21,24 +21,24 @@ from nova import flags FLAGS = flags.FLAGS flags.DECLARE('volume_driver', 'nova.volume.manager') -FLAGS.volume_driver = 'nova.volume.driver.FakeISCSIDriver' -FLAGS.connection_type = 'fake' -FLAGS.fake_rabbit = True +FLAGS['volume_driver'].SetDefault('nova.volume.driver.FakeISCSIDriver') +FLAGS['connection_type'].SetDefault('fake') +FLAGS['fake_rabbit'].SetDefault(True) flags.DECLARE('auth_driver', 'nova.auth.manager') -FLAGS.auth_driver = 'nova.auth.dbdriver.DbDriver' +FLAGS['auth_driver'].SetDefault('nova.auth.dbdriver.DbDriver') flags.DECLARE('network_size', 'nova.network.manager') flags.DECLARE('num_networks', 'nova.network.manager') flags.DECLARE('fake_network', 'nova.network.manager') -FLAGS.network_size = 8 -FLAGS.num_networks = 2 -FLAGS.fake_network = True -FLAGS.image_service = 'nova.image.local.LocalImageService' +FLAGS['network_size'].SetDefault(8) +FLAGS['num_networks'].SetDefault(2) +FLAGS['fake_network'].SetDefault(True) +FLAGS['image_service'].SetDefault('nova.image.local.LocalImageService') flags.DECLARE('num_shelves', 'nova.volume.driver') flags.DECLARE('blades_per_shelf', 'nova.volume.driver') flags.DECLARE('iscsi_num_targets', 'nova.volume.driver') -FLAGS.num_shelves = 2 -FLAGS.blades_per_shelf = 4 -FLAGS.iscsi_num_targets = 8 -FLAGS.verbose = True -FLAGS.sqlite_db = "tests.sqlite" -FLAGS.use_ipv6 = True +FLAGS['num_shelves'].SetDefault(2) +FLAGS['blades_per_shelf'].SetDefault(4) +FLAGS['iscsi_num_targets'].SetDefault(8) +FLAGS['verbose'].SetDefault(True) +FLAGS['sqlite_db'].SetDefault("tests.sqlite") +FLAGS['use_ipv6'].SetDefault(True) diff --git a/nova/tests/real_flags.py b/nova/tests/real_flags.py deleted file mode 100644 index 71da0499..00000000 --- a/nova/tests/real_flags.py +++ /dev/null @@ -1,26 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova import flags - -FLAGS = flags.FLAGS - -FLAGS.connection_type = 'libvirt' -FLAGS.fake_rabbit = False -FLAGS.fake_network = False -FLAGS.verbose = False From ebf75e3798ad6aa1575df311a89afb3dd55bd7ac Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:24 -0700 Subject: [PATCH 37/60] add support to rpc for multicall --- nova/rpc.py | 99 +++++++++++++++++++++++++++++++----------- nova/tests/test_rpc.py | 17 ++++++++ 2 files changed, 90 insertions(+), 26 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index 2116f22c..04198a4a 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -32,8 +32,11 @@ import uuid from carrot import connection as carrot_connection from carrot import messaging +import eventlet from eventlet import greenpool from eventlet import greenthread +from eventlet import queue + from nova import context from nova import exception @@ -131,7 +134,8 @@ class Consumer(messaging.Consumer): self.connection = Connection.recreate() self.backend = self.connection.create_backend() self.declare() - super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks) + return super(Consumer, self).fetch( + no_ack, auto_ack, enable_callbacks) if self.failed_connection: LOG.error(_('Reconnected to queue')) self.failed_connection = False @@ -347,8 +351,9 @@ def _unpack_context(msg): if key.startswith('_context_'): value = msg.pop(key) context_dict[key[9:]] = value + context_dict['msg_id'] = msg.pop('_msg_id', None) LOG.debug(_('unpacked context: %s'), context_dict) - return context.RequestContext.from_dict(context_dict) + return RpcContext.from_dict(context_dict) def _pack_context(msg, context): @@ -365,26 +370,27 @@ def _pack_context(msg, context): msg.update(context) -def call(context, topic, msg): - """Sends a message on a topic and wait for a response.""" +class RpcContext(context.RequestContext): + def __init__(self, *args, **kwargs): + msg_id = kwargs.pop('msg_id', None) + self.msg_id = msg_id + super(RpcContext, self).__init__(*args, **kwargs) + + def reply(self, *args, **kwargs): + msg_reply(self.msg_id, *args, **kwargs) + + +def multicall(context, topic, msg): + """Make a call that returns multiple times.""" LOG.debug(_('Making asynchronous call on %s ...'), topic) msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) LOG.debug(_('MSG_ID is %s') % (msg_id)) _pack_context(msg, context) - class WaitMessage(object): - def __call__(self, data, message): - """Acks message and sets result.""" - message.ack() - if data['failure']: - self.result = RemoteError(*data['failure']) - else: - self.result = data['result'] - - wait_msg = WaitMessage() conn = Connection.instance() consumer = DirectConsumer(connection=conn, msg_id=msg_id) + wait_msg = MulticallWaiter(consumer) consumer.register_callback(wait_msg) conn = Connection.instance() @@ -392,18 +398,59 @@ def call(context, topic, msg): publisher.send(msg) publisher.close() - try: - consumer.wait(limit=1) - except StopIteration: - pass - consumer.close() - # NOTE(termie): this is a little bit of a change from the original - # non-eventlet code where returning a Failure - # instance from a deferred call is very similar to - # raising an exception - if isinstance(wait_msg.result, Exception): - raise wait_msg.result - return wait_msg.result + return wait_msg + + +class MulticallWaiter(object): + def __init__(self, consumer): + self._consumer = consumer + self._results = queue.Queue() + self._closed = False + + def close(self): + self._closed = True + self._consumer.close() + + def __call__(self, data, message): + """Acks message and sets result.""" + message.ack() + if data['failure']: + self._results.put(RemoteError(*data['failure'])) + else: + self._results.put(data['result']) + + def __iter__(self): + return self.wait() + + def wait(self): + # TODO(termie): This is probably really a much simpler issue but am + # trying to solve the problem quickly. This works but + # I'd prefer to dig in and do it the best way later on. + + def _waiter(): + while not self._closed: + try: + self._consumer.wait(limit=1) + except StopIteration: + pass + eventlet.spawn(_waiter) + + while True: + result = self._results.get() + if isinstance(result, Exception): + raise result + if result == None: + self.close() + raise StopIteration + yield result + + +def call(context, topic, msg): + """Sends a message on a topic and wait for a response.""" + rv = multicall(context, topic, msg) + for x in rv: + rv.close() + return x def cast(context, topic, msg): diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index 44d7c91e..92ddfcff 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -49,6 +49,17 @@ class RpcTestCase(test.TestCase): "args": {"value": value}}) self.assertEqual(value, result) + def test_multicall_succeed_three_times(self): + """Get a value through rpc call""" + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo_three_times", + "args": {"value": value}}) + + for x in result: + self.assertEqual(value, x) + def test_context_passed(self): """Makes sure a context is passed through rpc call""" value = 42 @@ -126,6 +137,12 @@ class TestReceiver(object): LOG.debug(_("Received %s"), context) return context.to_dict() + @staticmethod + def echo_three_times(context, value): + context.reply(value) + context.reply(value) + context.reply(value) + @staticmethod def fail(context, value): """Raises an exception with the value sent in""" From 7eb27bdc028354e5f72f3391fbe1b062aac6ceb9 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:24 -0700 Subject: [PATCH 38/60] make the test more expicit --- nova/tests/test_rpc.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index 92ddfcff..acab3e75 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -56,9 +56,10 @@ class RpcTestCase(test.TestCase): 'test', {"method": "echo_three_times", "args": {"value": value}}) - + i = 0 for x in result: - self.assertEqual(value, x) + self.assertEqual(value + i, x) + i += 1 def test_context_passed(self): """Makes sure a context is passed through rpc call""" @@ -140,8 +141,8 @@ class TestReceiver(object): @staticmethod def echo_three_times(context, value): context.reply(value) - context.reply(value) - context.reply(value) + context.reply(value + 1) + context.reply(value + 2) @staticmethod def fail(context, value): From 0bb2db220529030b14df5d8c93fa440cb94c4286 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:24 -0700 Subject: [PATCH 39/60] add commented out unworking code for yield-based returns --- nova/rpc.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/nova/rpc.py b/nova/rpc.py index 04198a4a..f43291c4 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -201,6 +201,11 @@ class AdapterConsumer(Consumer): try: rval = node_func(context=ctxt, **node_args) if msg_id: + # TODO(termie): re-enable when fix the yielding issue + #if hasattr(rval, 'send'): + # logging.error('rval! %s', rval) + # for x in rval: + # msg_reply(msg_id, x, None) msg_reply(msg_id, rval, None) except Exception as e: logging.exception('Exception during message handling') From f9e1125b4fafbe8a35646c5d32c8c87688ff0271 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: [PATCH 40/60] Add a connection pool for rpc cast/call Use the same rabbit connection for all topic listening and wait to be notified vs doing a 0.1 second poll for each. --- nova/rpc.py | 96 +++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 72 insertions(+), 24 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index f43291c4..62590ca9 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -35,9 +35,9 @@ from carrot import messaging import eventlet from eventlet import greenpool from eventlet import greenthread +from eventlet import pools from eventlet import queue - from nova import context from nova import exception from nova import fakerabbit @@ -92,6 +92,11 @@ class Connection(carrot_connection.BrokerConnection): pass return cls.instance() +class Pool(pools.Pool): + def create(self): + return Connection.instance(new=True) + +ConnectionPool = Pool(max_size=20) class Consumer(messaging.Consumer): """Consumer base class. @@ -163,21 +168,9 @@ class AdapterConsumer(Consumer): self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size) super(AdapterConsumer, self).__init__(connection=connection, topic=topic) + self.register_callback(self.process_data) - def receive(self, *args, **kwargs): - self.pool.spawn_n(self._receive, *args, **kwargs) - - @exception.wrap_exception - def _receive(self, message_data, message): - """Magically looks for a method on the proxy object and calls it. - - Message data should be a dictionary with two keys: - method: string representing the method to call - args: dictionary of arg: value - - Example: {'method': 'echo', 'args': {'value': 42}} - - """ + def process_data(self, message_data, message): LOG.debug(_('received %s') % message_data) msg_id = message_data.pop('_msg_id', None) @@ -194,6 +187,19 @@ class AdapterConsumer(Consumer): LOG.warn(_('no method for message: %s') % message_data) msg_reply(msg_id, _('No method for message: %s') % message_data) return + self.pool.spawn_n(self._process_data, msg_id, ctxt, method, args) + + @exception.wrap_exception + def _process_data(self, msg_id, ctxt, method, args): + """Magically looks for a method on the proxy object and calls it. + + Message data should be a dictionary with two keys: + method: string representing the method to call + args: dictionary of arg: value + + Example: {'method': 'echo', 'args': {'value': 42}} + + """ node_func = getattr(self.proxy, str(method)) node_args = dict((str(k), v) for k, v in args.iteritems()) @@ -214,11 +220,6 @@ class AdapterConsumer(Consumer): return -class Publisher(messaging.Publisher): - """Publisher base class.""" - pass - - class TopicAdapterConsumer(AdapterConsumer): """Consumes messages on a specific topic.""" @@ -251,6 +252,50 @@ class FanoutAdapterConsumer(AdapterConsumer): topic=topic, proxy=proxy) +class ConsumerSet(object): + """Groups consumers to listen on together on a single connection""" + + def __init__(self, conn, consumer_list): + self.consumer_list = set(consumer_list) + self.consumer_set = None + self.init(conn) + + def init(self, conn): + if not conn: + conn = Connection.instance(new=True) + if self.consumer_set: + self.consumer_set.close() + self.consumer_set = messaging.ConsumerSet(conn) + for consumer in self.consumer_list: + consumer.connection = conn + # consumer.backend is set for us + self.consumer_set.add_consumer(consumer) + + def reconnect(self): + self.init(None) + + def wait(self, limit=None): + while True: + it = self.consumer_set.iterconsume(limit=limit) + while True: + try: + it.next() + except StopIteration: + return + except Exception as e: + LOG.error(_("Received exception %s " % str(e) + \ + "while processing consumer")) + fuck + self.reconnect() + # Break to outer loop + break + + +class Publisher(messaging.Publisher): + """Publisher base class.""" + pass + + class TopicPublisher(Publisher): """Publishes messages on a specific topic.""" @@ -315,7 +360,7 @@ def msg_reply(msg_id, reply=None, failure=None): LOG.error(_("Returning exception %s to caller"), message) LOG.error(tb) failure = (failure[0].__name__, str(failure[1]), tb) - conn = Connection.instance() + conn = ConnectionPool.get() publisher = DirectPublisher(connection=conn, msg_id=msg_id) try: publisher.send({'result': reply, 'failure': failure}) @@ -324,7 +369,9 @@ def msg_reply(msg_id, reply=None, failure=None): {'result': dict((k, repr(v)) for k, v in reply.__dict__.iteritems()), 'failure': failure}) + publisher.close() + ConnectionPool.put(conn) class RemoteError(exception.Error): @@ -393,12 +440,11 @@ def multicall(context, topic, msg): LOG.debug(_('MSG_ID is %s') % (msg_id)) _pack_context(msg, context) - conn = Connection.instance() + conn = ConnectionPool.get() consumer = DirectConsumer(connection=conn, msg_id=msg_id) wait_msg = MulticallWaiter(consumer) consumer.register_callback(wait_msg) - conn = Connection.instance() publisher = TopicPublisher(connection=conn, topic=topic) publisher.send(msg) publisher.close() @@ -462,10 +508,11 @@ def cast(context, topic, msg): """Sends a message on a topic without waiting for a response.""" LOG.debug(_('Making asynchronous cast on %s...'), topic) _pack_context(msg, context) - conn = Connection.instance() + conn = ConnectionPool.get() publisher = TopicPublisher(connection=conn, topic=topic) publisher.send(msg) publisher.close() + ConnectionPool.put(conn) def fanout_cast(context, topic, msg): @@ -511,6 +558,7 @@ def send_message(topic, message, wait=True): if wait: consumer.wait() + consumer.close() if __name__ == '__main__': From 58ac7dd16b6db9a33998bcfb0d379c248fa29fce Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: [PATCH 41/60] pep8 and comment fixes --- nova/rpc.py | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index 62590ca9..db5aec82 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -92,12 +92,16 @@ class Connection(carrot_connection.BrokerConnection): pass return cls.instance() + class Pool(pools.Pool): + """Class that implements a Pool of Connections""" + def create(self): return Connection.instance(new=True) ConnectionPool = Pool(max_size=20) + class Consumer(messaging.Consumer): """Consumer base class. @@ -171,6 +175,16 @@ class AdapterConsumer(Consumer): self.register_callback(self.process_data) def process_data(self, message_data, message): + """Consumer callback that parses the message for validity and + fires off a thread to call the proxy object method. + + Message data should be a dictionary with two keys: + method: string representing the method to call + args: dictionary of arg: value + + Example: {'method': 'echo', 'args': {'value': 42}} + + """ LOG.debug(_('received %s') % message_data) msg_id = message_data.pop('_msg_id', None) @@ -191,14 +205,8 @@ class AdapterConsumer(Consumer): @exception.wrap_exception def _process_data(self, msg_id, ctxt, method, args): - """Magically looks for a method on the proxy object and calls it. - - Message data should be a dictionary with two keys: - method: string representing the method to call - args: dictionary of arg: value - - Example: {'method': 'echo', 'args': {'value': 42}} - + """Thread that maigcally looks for a method on the proxy + object and calls it. """ node_func = getattr(self.proxy, str(method)) @@ -285,7 +293,6 @@ class ConsumerSet(object): except Exception as e: LOG.error(_("Received exception %s " % str(e) + \ "while processing consumer")) - fuck self.reconnect() # Break to outer loop break From 1b75e5eba049951c093ae5a25a59f0225f54f473 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: [PATCH 42/60] convert fanout_cast to ConnectionPool --- nova/rpc.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/rpc.py b/nova/rpc.py index db5aec82..fdb22869 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -526,10 +526,11 @@ def fanout_cast(context, topic, msg): """Sends a message on a fanout exchange without waiting for a response.""" LOG.debug(_('Making asynchronous fanout cast...')) _pack_context(msg, context) - conn = Connection.instance() + conn = ConnectionPool.get() publisher = FanoutPublisher(topic, connection=conn) publisher.send(msg) publisher.close() + ConnectionPool.put(conn) def generic_response(message_data, message): From 2368bc4fe37ae2f708ebf2b9a69edb1777f7af3c Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: [PATCH 43/60] fakerabbit's declare_consumer should support more than 1 consumer. also: make fakerabbit Backend.consume be an iterator like it should be.. --- nova/fakerabbit.py | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index a7dee8ca..a29ba9d8 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -77,6 +77,10 @@ class Queue(object): class Backend(base.BaseBackend): + def __init__(self, connection, **kwargs): + super(Backend, self).__init__(connection, **kwargs) + self.consumers = [] + def queue_declare(self, queue, **kwargs): global QUEUES if queue not in QUEUES: @@ -97,16 +101,20 @@ class Backend(base.BaseBackend): EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key) def declare_consumer(self, queue, callback, *args, **kwargs): - self.current_queue = queue - self.current_callback = callback + self.consumers.append((queue, callback)) def consume(self, limit=None): + num = 0 while True: - item = self.get(self.current_queue) - if item: - self.current_callback(item) - raise StopIteration() - greenthread.sleep(0) + for (queue, callback) in self.consumers: + item = self.get(queue) + if item: + callback(item) + num += 1 + yield + if limit and num == limit: + raise StopIteration() + greenthread.sleep(0.1) def get(self, queue, no_ack=False): global QUEUES From 1da5df1d8e6fac63106ca5f0eb839add3407d4fd Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 25 May 2011 15:42:24 -0700 Subject: [PATCH 44/60] fix consumers to actually be deleted and clean up cloud test --- nova/fakerabbit.py | 13 +++++++++---- nova/rpc.py | 13 ++++++++++--- nova/tests/test_cloud.py | 26 ++++++++++---------------- 3 files changed, 29 insertions(+), 23 deletions(-) diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index a29ba9d8..5f3e75c4 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -79,7 +79,7 @@ class Queue(object): class Backend(base.BaseBackend): def __init__(self, connection, **kwargs): super(Backend, self).__init__(connection, **kwargs) - self.consumers = [] + self.consumers = {} def queue_declare(self, queue, **kwargs): global QUEUES @@ -100,13 +100,18 @@ class Backend(base.BaseBackend): ' key %(routing_key)s') % locals()) EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key) - def declare_consumer(self, queue, callback, *args, **kwargs): - self.consumers.append((queue, callback)) + def declare_consumer(self, queue, callback, consumer_tag, *args, **kwargs): + LOG.debug("Adding consumer %s", consumer_tag) + self.consumers[consumer_tag] = (queue, callback) + + def cancel(self, consumer_tag): + LOG.debug("Removing consumer %s", consumer_tag) + del self.consumers[consumer_tag] def consume(self, limit=None): num = 0 while True: - for (queue, callback) in self.consumers: + for (queue, callback) in self.consumers.itervalues(): item = self.get(queue) if item: callback(item) diff --git a/nova/rpc.py b/nova/rpc.py index fdb22869..e2e962fc 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -30,11 +30,11 @@ import time import traceback import uuid +import greenlet from carrot import connection as carrot_connection from carrot import messaging import eventlet from eventlet import greenpool -from eventlet import greenthread from eventlet import pools from eventlet import queue @@ -266,6 +266,7 @@ class ConsumerSet(object): def __init__(self, conn, consumer_list): self.consumer_list = set(consumer_list) self.consumer_set = None + self.enabled = True self.init(conn) def init(self, conn): @@ -283,15 +284,21 @@ class ConsumerSet(object): self.init(None) def wait(self, limit=None): - while True: + running = True + while running: it = self.consumer_set.iterconsume(limit=limit) + if not it: + break while True: try: it.next() except StopIteration: return + except greenlet.GreenletExit: + running = False + break except Exception as e: - LOG.error(_("Received exception %s " % str(e) + \ + LOG.error(_("Received exception %s " % type(e) + \ "while processing consumer")) self.reconnect() # Break to outer loop diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 54c0454d..1e14c327 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -17,13 +17,8 @@ # under the License. from base64 import b64decode -import json from M2Crypto import BIO from M2Crypto import RSA -import os -import shutil -import tempfile -import time from eventlet import greenthread @@ -33,12 +28,10 @@ from nova import db from nova import flags from nova import log as logging from nova import rpc -from nova import service from nova import test from nova import utils from nova import exception from nova.auth import manager -from nova.compute import power_state from nova.api.ec2 import cloud from nova.api.ec2 import ec2utils from nova.image import local @@ -79,6 +72,15 @@ class CloudTestCase(test.TestCase): self.stubs.Set(local.LocalImageService, 'show', fake_show) self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show) + # NOTE(vish): set up a manual wait so rpc.cast has a chance to finish + rpc_cast = rpc.cast + + def finish_cast(*args, **kwargs): + rpc_cast(*args, **kwargs) + greenthread.sleep(0.2) + + self.stubs.Set(rpc, 'cast', finish_cast) + def tearDown(self): network_ref = db.project_get_network(self.context, self.project.id) @@ -113,7 +115,6 @@ class CloudTestCase(test.TestCase): self.cloud.describe_addresses(self.context) self.cloud.release_address(self.context, public_ip=address) - greenthread.sleep(0.3) db.floating_ip_destroy(self.context, address) def test_associate_disassociate_address(self): @@ -129,12 +130,10 @@ class CloudTestCase(test.TestCase): self.cloud.associate_address(self.context, instance_id=ec2_id, public_ip=address) - greenthread.sleep(0.3) self.cloud.disassociate_address(self.context, public_ip=address) self.cloud.release_address(self.context, public_ip=address) - greenthread.sleep(0.3) self.network.deallocate_fixed_ip(self.context, fixed) db.instance_destroy(self.context, inst['id']) db.floating_ip_destroy(self.context, address) @@ -306,31 +305,26 @@ class CloudTestCase(test.TestCase): 'instance_type': instance_type, 'max_count': max_count} rv = self.cloud.run_instances(self.context, **kwargs) - greenthread.sleep(0.3) instance_id = rv['instancesSet'][0]['instanceId'] output = self.cloud.get_console_output(context=self.context, instance_id=[instance_id]) self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE?OUTPUT') # TODO(soren): We need this until we can stop polling in the rpc code # for unit tests. - greenthread.sleep(0.3) rv = self.cloud.terminate_instances(self.context, [instance_id]) - greenthread.sleep(0.3) def test_ajax_console(self): + kwargs = {'image_id': 'ami-1'} rv = self.cloud.run_instances(self.context, **kwargs) instance_id = rv['instancesSet'][0]['instanceId'] - greenthread.sleep(0.3) output = self.cloud.get_ajax_console(context=self.context, instance_id=[instance_id]) self.assertEquals(output['url'], '%s/?token=FAKETOKEN' % FLAGS.ajax_console_proxy_url) # TODO(soren): We need this until we can stop polling in the rpc code # for unit tests. - greenthread.sleep(0.3) rv = self.cloud.terminate_instances(self.context, [instance_id]) - greenthread.sleep(0.3) def test_key_generation(self): result = self._create_key('test') From 26daa48eb527c9f69b488a4eebfb6fafac1f2e08 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: [PATCH 45/60] catch greenlet.GreenletExit when shutting service down --- nova/rpc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/rpc.py b/nova/rpc.py index e2e962fc..02052ecf 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -24,13 +24,13 @@ No fan-out support yet. """ +import greenlet import json import sys import time import traceback import uuid -import greenlet from carrot import connection as carrot_connection from carrot import messaging import eventlet From 0e22560ea2de3ea6cef9494ed2af8fc58eca8148 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: [PATCH 46/60] Add rpc_conn_pool_size flag for the new connection pool --- nova/rpc.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index 02052ecf..82869fc4 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -50,7 +50,10 @@ LOG = logging.getLogger('nova.rpc') FLAGS = flags.FLAGS -flags.DEFINE_integer('rpc_thread_pool_size', 1024, 'Size of RPC thread pool') +flags.DEFINE_integer('rpc_thread_pool_size', 1024, + 'Size of RPC thread pool') +flags.DEFINE_integer('rpc_conn_pool_size', 30, + 'Size of RPC connection pool') class Connection(carrot_connection.BrokerConnection): @@ -99,7 +102,7 @@ class Pool(pools.Pool): def create(self): return Connection.instance(new=True) -ConnectionPool = Pool(max_size=20) +ConnectionPool = Pool(max_size=FLAGS.rpc_conn_pool_size) class Consumer(messaging.Consumer): From a3a605534df98c482ab5364ce6881ffec1e87705 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: [PATCH 47/60] connection pool tests and make the pool LIFO --- nova/rpc.py | 8 +++++++- nova/tests/test_rpc.py | 42 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 1 deletion(-) diff --git a/nova/rpc.py b/nova/rpc.py index 82869fc4..3cc0dadd 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -99,10 +99,16 @@ class Connection(carrot_connection.BrokerConnection): class Pool(pools.Pool): """Class that implements a Pool of Connections""" + # TODO(comstud): Timeout connections not used in a while def create(self): return Connection.instance(new=True) -ConnectionPool = Pool(max_size=FLAGS.rpc_conn_pool_size) +# Create a ConnectionPool to use for RPC calls. We'll order the +# pool as a stack (LIFO), so that we can potentially loop through and +# timeout old unused connections at some point +ConnectionPool = Pool( + max_size=FLAGS.rpc_conn_pool_size, + order_as_stack=True) class Consumer(messaging.Consumer): diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index acab3e75..f6420959 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -120,6 +120,48 @@ class RpcTestCase(test.TestCase): "value": value}}) self.assertEqual(value, result) + def test_connectionpool_single(self): + """Test that ConnectionPool recycles a single connection""" + + conn1 = rpc.ConnectionPool.get() + rpc.ConnectionPool.put(conn1) + conn2 = rpc.ConnectionPool.get() + rpc.ConnectionPool.put(conn2) + self.assertEqual(conn1, conn2) + + def test_connectionpool_double(self): + """Test that ConnectionPool returns 2 separate connections + when called consecutively and the pool returns connections LIFO + """ + + conn1 = rpc.ConnectionPool.get() + conn2 = rpc.ConnectionPool.get() + + self.assertNotEqual(conn1, conn2) + rpc.ConnectionPool.put(conn1) + rpc.ConnectionPool.put(conn2) + + conn3 = rpc.ConnectionPool.get() + conn4 = rpc.ConnectionPool.get() + self.assertEqual(conn2, conn3) + self.assertEqual(conn1, conn4) + + def test_connectionpool_limit(self): + """Test connection pool limit and verify all connections + are unique + """ + + max_size = FLAGS.rpc_conn_pool_size + conns = [] + + for i in xrange(max_size): + conns.append(rpc.ConnectionPool.get()) + + self.assertFalse(rpc.ConnectionPool.free_items) + self.assertEqual(rpc.ConnectionPool.current_size, + rpc.ConnectionPool.max_size) + self.assertEqual(len(set(conns)), max_size) + class TestReceiver(object): """Simple Proxy class so the consumer has methods to call From 86d53fa64dba97cba05c2e60c1b38f6d8625180e Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:24 -0700 Subject: [PATCH 48/60] bring back commits lost in merge --- nova/rpc.py | 107 ++++++++++++++++++++++++----------------- nova/tests/test_rpc.py | 19 ++++++++ 2 files changed, 82 insertions(+), 44 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index 3cc0dadd..d7d7bb01 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -35,6 +35,7 @@ from carrot import connection as carrot_connection from carrot import messaging import eventlet from eventlet import greenpool +from eventlet import greenthread from eventlet import pools from eventlet import queue @@ -140,30 +141,30 @@ class Consumer(messaging.Consumer): FLAGS.rabbit_max_retries) sys.exit(1) - def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): - """Wraps the parent fetch with some logic for failed connection.""" - # TODO(vish): the logic for failed connections and logging should be - # refactored into some sort of connection manager object - try: - if self.failed_connection: - # NOTE(vish): connection is defined in the parent class, we can - # recreate it as long as we create the backend too - # pylint: disable=W0201 - self.connection = Connection.recreate() - self.backend = self.connection.create_backend() - self.declare() - return super(Consumer, self).fetch( - no_ack, auto_ack, enable_callbacks) - if self.failed_connection: - LOG.error(_('Reconnected to queue')) - self.failed_connection = False - # NOTE(vish): This is catching all errors because we really don't - # want exceptions to be logged 10 times a second if some - # persistent failure occurs. - except Exception, e: # pylint: disable=W0703 - if not self.failed_connection: - LOG.exception(_('Failed to fetch message from queue: %s' % e)) - self.failed_connection = True + #def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): + # """Wraps the parent fetch with some logic for failed connection.""" + # # TODO(vish): the logic for failed connections and logging should be + # # refactored into some sort of connection manager object + # try: + # if self.failed_connection: + # # NOTE(vish): connection is defined in the parent class, we can + # # recreate it as long as we create the backend too + # # pylint: disable=W0201 + # self.connection = Connection.recreate() + # self.backend = self.connection.create_backend() + # self.declare() + # return super(Consumer, self).fetch( + # no_ack, auto_ack, enable_callbacks) + # if self.failed_connection: + # LOG.error(_('Reconnected to queue')) + # self.failed_connection = False + # # NOTE(vish): This is catching all errors because we really don't + # # want exceptions to be logged 10 times a second if some + # # persistent failure occurs. + # except Exception, e: # pylint: disable=W0703 + # if not self.failed_connection: + # LOG.exception(_('Failed to fetch message from queue: %s' % e)) + # self.failed_connection = True def attach_to_eventlet(self): """Only needed for unit tests!""" @@ -195,7 +196,7 @@ class AdapterConsumer(Consumer): """ LOG.debug(_('received %s') % message_data) - msg_id = message_data.pop('_msg_id', None) + msg_id = message_data.get('_msg_id', None) ctxt = _unpack_context(message_data) @@ -225,11 +226,14 @@ class AdapterConsumer(Consumer): rval = node_func(context=ctxt, **node_args) if msg_id: # TODO(termie): re-enable when fix the yielding issue - #if hasattr(rval, 'send'): - # logging.error('rval! %s', rval) - # for x in rval: - # msg_reply(msg_id, x, None) - msg_reply(msg_id, rval, None) + if hasattr(rval, 'send'): + logging.error('rval! %s', rval) + for x in rval: + msg_reply(msg_id, x, None) + msg_reply(msg_id, None, None) + else: + msg_reply(msg_id, rval, None) + #msg_reply(msg_id, rval, None) except Exception as e: logging.exception('Exception during message handling') if msg_id: @@ -355,7 +359,7 @@ class DirectConsumer(Consumer): self.routing_key = msg_id self.exchange = msg_id self.auto_delete = True - self.exclusive = True + self.exclusive = False super(DirectConsumer, self).__init__(connection=connection) @@ -387,7 +391,9 @@ def msg_reply(msg_id, reply=None, failure=None): publisher = DirectPublisher(connection=conn, msg_id=msg_id) try: publisher.send({'result': reply, 'failure': failure}) + LOG.error('MSG REPLY SUCCESS') except TypeError: + LOG.error('MSG REPLY FAILURE') publisher.send( {'result': dict((k, repr(v)) for k, v in reply.__dict__.iteritems()), @@ -440,9 +446,9 @@ def _pack_context(msg, context): for args at some point. """ - context = dict([('_context_%s' % key, value) - for (key, value) in context.to_dict().iteritems()]) - msg.update(context) + context_d = dict([('_context_%s' % key, value) + for (key, value) in context.to_dict().iteritems()]) + msg.update(context_d) class RpcContext(context.RequestContext): @@ -463,12 +469,13 @@ def multicall(context, topic, msg): LOG.debug(_('MSG_ID is %s') % (msg_id)) _pack_context(msg, context) - conn = ConnectionPool.get() - consumer = DirectConsumer(connection=conn, msg_id=msg_id) + con_conn = ConnectionPool.get() + consumer = DirectConsumer(connection=con_conn, msg_id=msg_id) wait_msg = MulticallWaiter(consumer) consumer.register_callback(wait_msg) - publisher = TopicPublisher(connection=conn, topic=topic) + pub_conn = ConnectionPool.get() + publisher = TopicPublisher(connection=pub_conn, topic=topic) publisher.send(msg) publisher.close() @@ -484,6 +491,7 @@ class MulticallWaiter(object): def close(self): self._closed = True self._consumer.close() + ConnectionPool.put(self._consumer.connection) def __call__(self, data, message): """Acks message and sets result.""" @@ -501,15 +509,26 @@ class MulticallWaiter(object): # trying to solve the problem quickly. This works but # I'd prefer to dig in and do it the best way later on. - def _waiter(): - while not self._closed: - try: - self._consumer.wait(limit=1) - except StopIteration: - pass - eventlet.spawn(_waiter) + #def _waiter(): + # i = 0 + # while not self._closed: + # LOG.error('Iteration #%s (%s)', i, self._consumer.consumer_tag) + # i += 1 + # try: + # self._consumer.wait(limit=1) + # except StopIteration: + # pass + # self._consumer.close() + # ConnectionPool.put(self._consumer.connection) + #eventlet.spawn(_waiter) while True: + rv = None + while rv is None and not self._closed: + rv = self._consumer.fetch(enable_callbacks=True) + time.sleep(0.01) + + LOG.error('RV %s', rv) result = self._results.get() if isinstance(result, Exception): raise result diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index f6420959..e5d99474 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -61,6 +61,18 @@ class RpcTestCase(test.TestCase): self.assertEqual(value + i, x) i += 1 + def test_multicall_succeed_three_times_yield(self): + """Get a value through rpc call""" + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + i = 0 + for x in result: + self.assertEqual(value + i, x) + i += 1 + def test_context_passed(self): """Makes sure a context is passed through rpc call""" value = 42 @@ -83,6 +95,7 @@ class RpcTestCase(test.TestCase): 'test', {"method": "fail", "args": {"value": value}}) + LOG.error('INNNNNNN BETTTWWWWWWWWWWEEEEEEEEEEN') try: rpc.call(self.context, 'test', @@ -186,6 +199,12 @@ class TestReceiver(object): context.reply(value + 1) context.reply(value + 2) + @staticmethod + def echo_three_times_yield(context, value): + yield value + yield value + 1 + yield value + 2 + @staticmethod def fail(context, value): """Raises an exception with the value sent in""" From ff9ba4e5d7f63ad4aab36e97f175926cdb31bed4 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:24 -0700 Subject: [PATCH 49/60] almost everything working with fake_rabbit --- nova/rpc.py | 16 +++++++++++++++- nova/tests/test_cloud.py | 4 ++-- run_tests.py | 1 + 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index d7d7bb01..e1f594a9 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -102,6 +102,7 @@ class Pool(pools.Pool): # TODO(comstud): Timeout connections not used in a while def create(self): + LOG.debug('Creating new connection') return Connection.instance(new=True) # Create a ConnectionPool to use for RPC calls. We'll order the @@ -166,6 +167,10 @@ class Consumer(messaging.Consumer): # LOG.exception(_('Failed to fetch message from queue: %s' % e)) # self.failed_connection = True + def close(self, *args, **kwargs): + LOG.debug('Closing consumer %s', self.consumer_tag) + return super(Consumer, self).close(*args, **kwargs) + def attach_to_eventlet(self): """Only needed for unit tests!""" timer = utils.LoopingCall(self.fetch, enable_callbacks=True) @@ -317,6 +322,8 @@ class ConsumerSet(object): # Break to outer loop break + def close(self): + self.consumer_set.close() class Publisher(messaging.Publisher): """Publisher base class.""" @@ -525,12 +532,19 @@ class MulticallWaiter(object): while True: rv = None while rv is None and not self._closed: - rv = self._consumer.fetch(enable_callbacks=True) + try: + rv = self._consumer.fetch(enable_callbacks=True) + except Exception: + self.close() + raise + #rv = self._consumer.fetch(enable_callbacks=True) time.sleep(0.01) LOG.error('RV %s', rv) result = self._results.get() + LOG.error('RESULT %s', result) if isinstance(result, Exception): + self.close() raise result if result == None: self.close() diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 1e14c327..a838dd53 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -87,8 +87,8 @@ class CloudTestCase(test.TestCase): db.network_disassociate(self.context, network_ref['id']) self.manager.delete_project(self.project) self.manager.delete_user(self.user) - self.compute.kill() - self.network.kill() + #self.compute.kill() + #self.network.kill() super(CloudTestCase, self).tearDown() def _create_key(self, name): diff --git a/run_tests.py b/run_tests.py index d5d8acd1..509a60ca 100644 --- a/run_tests.py +++ b/run_tests.py @@ -285,6 +285,7 @@ if __name__ == '__main__': # If any argument looks like a test name but doesn't have "nova.tests" in # front of it, automatically add that so we don't have to type as much argv = [] + logging.getLogger('amqplib').setLevel(logging.DEBUG) for x in sys.argv: if x.startswith('test_'): argv.append('nova.tests.%s' % x) From aba7847b8aae1a8433d10331d0b9e96e0591f711 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:24 -0700 Subject: [PATCH 50/60] don't need to use a separate connection --- nova/rpc.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index e1f594a9..a212383f 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -481,8 +481,7 @@ def multicall(context, topic, msg): wait_msg = MulticallWaiter(consumer) consumer.register_callback(wait_msg) - pub_conn = ConnectionPool.get() - publisher = TopicPublisher(connection=pub_conn, topic=topic) + publisher = TopicPublisher(connection=con_conn, topic=topic) publisher.send(msg) publisher.close() From b3338e3ca5548f66c6756b0fd065d76b11c4017b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 25 May 2011 15:42:24 -0700 Subject: [PATCH 51/60] lots of fixes for rpc and extra imports --- nova/fakerabbit.py | 12 ++++++-- nova/rpc.py | 68 +++++++++++++++++----------------------------- 2 files changed, 34 insertions(+), 46 deletions(-) diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index 5f3e75c4..ff993e29 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -31,6 +31,7 @@ LOG = logging.getLogger("nova.fakerabbit") EXCHANGES = {} QUEUES = {} +CONSUMERS = {} class Message(base.BaseMessage): @@ -101,17 +102,20 @@ class Backend(base.BaseBackend): EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key) def declare_consumer(self, queue, callback, consumer_tag, *args, **kwargs): + global CONSUMERS LOG.debug("Adding consumer %s", consumer_tag) - self.consumers[consumer_tag] = (queue, callback) + CONSUMERS[consumer_tag] = (queue, callback) def cancel(self, consumer_tag): + global CONSUMERS LOG.debug("Removing consumer %s", consumer_tag) - del self.consumers[consumer_tag] + del CONSUMERS[consumer_tag] def consume(self, limit=None): + global CONSUMERS num = 0 while True: - for (queue, callback) in self.consumers.itervalues(): + for (queue, callback) in CONSUMERS.itervalues(): item = self.get(queue) if item: callback(item) @@ -147,5 +151,7 @@ class Backend(base.BaseBackend): def reset_all(): global EXCHANGES global QUEUES + global CONSUMERS EXCHANGES = {} QUEUES = {} + CONSUMERS = {} diff --git a/nova/rpc.py b/nova/rpc.py index a212383f..7faed4d3 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -33,9 +33,7 @@ import uuid from carrot import connection as carrot_connection from carrot import messaging -import eventlet from eventlet import greenpool -from eventlet import greenthread from eventlet import pools from eventlet import queue @@ -142,30 +140,30 @@ class Consumer(messaging.Consumer): FLAGS.rabbit_max_retries) sys.exit(1) - #def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): - # """Wraps the parent fetch with some logic for failed connection.""" - # # TODO(vish): the logic for failed connections and logging should be - # # refactored into some sort of connection manager object - # try: - # if self.failed_connection: - # # NOTE(vish): connection is defined in the parent class, we can - # # recreate it as long as we create the backend too - # # pylint: disable=W0201 - # self.connection = Connection.recreate() - # self.backend = self.connection.create_backend() - # self.declare() - # return super(Consumer, self).fetch( - # no_ack, auto_ack, enable_callbacks) - # if self.failed_connection: - # LOG.error(_('Reconnected to queue')) - # self.failed_connection = False - # # NOTE(vish): This is catching all errors because we really don't - # # want exceptions to be logged 10 times a second if some - # # persistent failure occurs. - # except Exception, e: # pylint: disable=W0703 - # if not self.failed_connection: - # LOG.exception(_('Failed to fetch message from queue: %s' % e)) - # self.failed_connection = True + def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): + """Wraps the parent fetch with some logic for failed connection.""" + # TODO(vish): the logic for failed connections and logging should be + # refactored into some sort of connection manager object + try: + if self.failed_connection: + # NOTE(vish): connection is defined in the parent class, we can + # recreate it as long as we create the backend too + # pylint: disable=W0201 + self.connection = Connection.recreate() + self.backend = self.connection.create_backend() + self.declare() + return super(Consumer, self).fetch( + no_ack, auto_ack, enable_callbacks) + if self.failed_connection: + LOG.error(_('Reconnected to queue')) + self.failed_connection = False + # NOTE(vish): This is catching all errors because we really don't + # want exceptions to be logged 10 times a second if some + # persistent failure occurs. + except Exception, e: # pylint: disable=W0703 + if not self.failed_connection: + LOG.exception(_('Failed to fetch message from queue: %s' % e)) + self.failed_connection = True def close(self, *args, **kwargs): LOG.debug('Closing consumer %s', self.consumer_tag) @@ -325,6 +323,7 @@ class ConsumerSet(object): def close(self): self.consumer_set.close() + class Publisher(messaging.Publisher): """Publisher base class.""" pass @@ -511,23 +510,6 @@ class MulticallWaiter(object): return self.wait() def wait(self): - # TODO(termie): This is probably really a much simpler issue but am - # trying to solve the problem quickly. This works but - # I'd prefer to dig in and do it the best way later on. - - #def _waiter(): - # i = 0 - # while not self._closed: - # LOG.error('Iteration #%s (%s)', i, self._consumer.consumer_tag) - # i += 1 - # try: - # self._consumer.wait(limit=1) - # except StopIteration: - # pass - # self._consumer.close() - # ConnectionPool.put(self._consumer.connection) - #eventlet.spawn(_waiter) - while True: rv = None while rv is None and not self._closed: From fa547237aea3e27fc4eaab4fb2cc144ad2d0466e Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:25 -0700 Subject: [PATCH 52/60] make sure that using multicall on a call with a single result still functions --- nova/rpc.py | 4 ++-- nova/tests/test_rpc.py | 29 +++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 2 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index 7faed4d3..84493271 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -233,10 +233,10 @@ class AdapterConsumer(Consumer): logging.error('rval! %s', rval) for x in rval: msg_reply(msg_id, x, None) - msg_reply(msg_id, None, None) else: msg_reply(msg_id, rval, None) - #msg_reply(msg_id, rval, None) + # This final None tells multicall that it is done. + msg_reply(msg_id, None, None) except Exception as e: logging.exception('Exception during message handling') if msg_id: diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index e5d99474..c1ef60ff 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -49,6 +49,35 @@ class RpcTestCase(test.TestCase): "args": {"value": value}}) self.assertEqual(value, result) + def test_call_succeed_despite_multiple_returns(self): + """Get a value through rpc call""" + value = 42 + result = rpc.call(self.context, 'test', {"method": "echo_three_times", + "args": {"value": value}}) + self.assertEqual(value, result) + + def test_call_succeed_despite_multiple_returns_yield(self): + """Get a value through rpc call""" + value = 42 + result = rpc.call(self.context, 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + self.assertEqual(value, result) + + def test_multicall_succeed_once(self): + """Get a value through rpc call""" + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo", + "args": {"value": value}}) + i = 0 + for x in result: + if i > 0: + self.fail('should only receive one response') + self.assertEqual(value + i, x) + i += 1 + def test_multicall_succeed_three_times(self): """Get a value through rpc call""" value = 42 From e9f4f8b45cd9d8e1b247e2bb7c13458eb5cff829 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:49 -0700 Subject: [PATCH 53/60] cleanup the code for merging --- nova/fakerabbit.py | 4 --- nova/rpc.py | 78 ++++++++++++++++++---------------------- nova/tests/test_cloud.py | 3 -- nova/tests/test_rpc.py | 1 - run_tests.py | 1 - 5 files changed, 35 insertions(+), 52 deletions(-) diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index ff993e29..e7e9dab7 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -78,10 +78,6 @@ class Queue(object): class Backend(base.BaseBackend): - def __init__(self, connection, **kwargs): - super(Backend, self).__init__(connection, **kwargs) - self.consumers = {} - def queue_declare(self, queue, **kwargs): global QUEUES if queue not in QUEUES: diff --git a/nova/rpc.py b/nova/rpc.py index 84493271..8d14494f 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -24,7 +24,6 @@ No fan-out support yet. """ -import greenlet import json import sys import time @@ -36,6 +35,7 @@ from carrot import messaging from eventlet import greenpool from eventlet import pools from eventlet import queue +import greenlet from nova import context from nova import exception @@ -50,9 +50,9 @@ LOG = logging.getLogger('nova.rpc') FLAGS = flags.FLAGS flags.DEFINE_integer('rpc_thread_pool_size', 1024, - 'Size of RPC thread pool') + 'Size of RPC thread pool') flags.DEFINE_integer('rpc_conn_pool_size', 30, - 'Size of RPC connection pool') + 'Size of RPC connection pool') class Connection(carrot_connection.BrokerConnection): @@ -96,7 +96,7 @@ class Connection(carrot_connection.BrokerConnection): class Pool(pools.Pool): - """Class that implements a Pool of Connections""" + """Class that implements a Pool of Connections.""" # TODO(comstud): Timeout connections not used in a while def create(self): @@ -152,8 +152,9 @@ class Consumer(messaging.Consumer): self.connection = Connection.recreate() self.backend = self.connection.create_backend() self.declare() - return super(Consumer, self).fetch( - no_ack, auto_ack, enable_callbacks) + return super(Consumer, self).fetch(no_ack, + auto_ack, + enable_callbacks) if self.failed_connection: LOG.error(_('Reconnected to queue')) self.failed_connection = False @@ -165,10 +166,6 @@ class Consumer(messaging.Consumer): LOG.exception(_('Failed to fetch message from queue: %s' % e)) self.failed_connection = True - def close(self, *args, **kwargs): - LOG.debug('Closing consumer %s', self.consumer_tag) - return super(Consumer, self).close(*args, **kwargs) - def attach_to_eventlet(self): """Only needed for unit tests!""" timer = utils.LoopingCall(self.fetch, enable_callbacks=True) @@ -188,8 +185,10 @@ class AdapterConsumer(Consumer): self.register_callback(self.process_data) def process_data(self, message_data, message): - """Consumer callback that parses the message for validity and - fires off a thread to call the proxy object method. + """Consumer callback to call a method on a proxy object. + + Parses the message for validity and fires off a thread to call the + proxy object method. Message data should be a dictionary with two keys: method: string representing the method to call @@ -199,8 +198,8 @@ class AdapterConsumer(Consumer): """ LOG.debug(_('received %s') % message_data) + # This will be popped off in _unpack_context msg_id = message_data.get('_msg_id', None) - ctxt = _unpack_context(message_data) method = message_data.get('method') @@ -228,13 +227,13 @@ class AdapterConsumer(Consumer): try: rval = node_func(context=ctxt, **node_args) if msg_id: - # TODO(termie): re-enable when fix the yielding issue + # Check if the result was a generator if hasattr(rval, 'send'): - logging.error('rval! %s', rval) for x in rval: msg_reply(msg_id, x, None) else: msg_reply(msg_id, rval, None) + # This final None tells multicall that it is done. msg_reply(msg_id, None, None) except Exception as e: @@ -277,7 +276,7 @@ class FanoutAdapterConsumer(AdapterConsumer): class ConsumerSet(object): - """Groups consumers to listen on together on a single connection""" + """Groups consumers to listen on together on a single connection.""" def __init__(self, conn, consumer_list): self.consumer_list = set(consumer_list) @@ -365,7 +364,7 @@ class DirectConsumer(Consumer): self.routing_key = msg_id self.exchange = msg_id self.auto_delete = True - self.exclusive = False + self.exclusive = True super(DirectConsumer, self).__init__(connection=connection) @@ -393,20 +392,18 @@ def msg_reply(msg_id, reply=None, failure=None): LOG.error(_("Returning exception %s to caller"), message) LOG.error(tb) failure = (failure[0].__name__, str(failure[1]), tb) - conn = ConnectionPool.get() - publisher = DirectPublisher(connection=conn, msg_id=msg_id) - try: - publisher.send({'result': reply, 'failure': failure}) - LOG.error('MSG REPLY SUCCESS') - except TypeError: - LOG.error('MSG REPLY FAILURE') - publisher.send( - {'result': dict((k, repr(v)) - for k, v in reply.__dict__.iteritems()), - 'failure': failure}) - publisher.close() - ConnectionPool.put(conn) + with ConnectionPool.item() as conn: + publisher = DirectPublisher(connection=conn, msg_id=msg_id) + try: + publisher.send({'result': reply, 'failure': failure}) + except TypeError: + publisher.send( + {'result': dict((k, repr(v)) + for k, v in reply.__dict__.iteritems()), + 'failure': failure}) + + publisher.close() class RemoteError(exception.Error): @@ -518,12 +515,9 @@ class MulticallWaiter(object): except Exception: self.close() raise - #rv = self._consumer.fetch(enable_callbacks=True) time.sleep(0.01) - LOG.error('RV %s', rv) result = self._results.get() - LOG.error('RESULT %s', result) if isinstance(result, Exception): self.close() raise result @@ -545,22 +539,20 @@ def cast(context, topic, msg): """Sends a message on a topic without waiting for a response.""" LOG.debug(_('Making asynchronous cast on %s...'), topic) _pack_context(msg, context) - conn = ConnectionPool.get() - publisher = TopicPublisher(connection=conn, topic=topic) - publisher.send(msg) - publisher.close() - ConnectionPool.put(conn) + with ConnectionPool.item() as conn: + publisher = TopicPublisher(connection=conn, topic=topic) + publisher.send(msg) + publisher.close() def fanout_cast(context, topic, msg): """Sends a message on a fanout exchange without waiting for a response.""" LOG.debug(_('Making asynchronous fanout cast...')) _pack_context(msg, context) - conn = ConnectionPool.get() - publisher = FanoutPublisher(topic, connection=conn) - publisher.send(msg) - publisher.close() - ConnectionPool.put(conn) + with ConnectionPool.item() as conn: + publisher = FanoutPublisher(topic, connection=conn) + publisher.send(msg) + publisher.close() def generic_response(message_data, message): diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index a838dd53..ca3ef7ff 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -87,8 +87,6 @@ class CloudTestCase(test.TestCase): db.network_disassociate(self.context, network_ref['id']) self.manager.delete_project(self.project) self.manager.delete_user(self.user) - #self.compute.kill() - #self.network.kill() super(CloudTestCase, self).tearDown() def _create_key(self, name): @@ -314,7 +312,6 @@ class CloudTestCase(test.TestCase): rv = self.cloud.terminate_instances(self.context, [instance_id]) def test_ajax_console(self): - kwargs = {'image_id': 'ami-1'} rv = self.cloud.run_instances(self.context, **kwargs) instance_id = rv['instancesSet'][0]['instanceId'] diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index c1ef60ff..fcecfb35 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -124,7 +124,6 @@ class RpcTestCase(test.TestCase): 'test', {"method": "fail", "args": {"value": value}}) - LOG.error('INNNNNNN BETTTWWWWWWWWWWEEEEEEEEEEN') try: rpc.call(self.context, 'test', diff --git a/run_tests.py b/run_tests.py index 509a60ca..d5d8acd1 100644 --- a/run_tests.py +++ b/run_tests.py @@ -285,7 +285,6 @@ if __name__ == '__main__': # If any argument looks like a test name but doesn't have "nova.tests" in # front of it, automatically add that so we don't have to type as much argv = [] - logging.getLogger('amqplib').setLevel(logging.DEBUG) for x in sys.argv: if x.startswith('test_'): argv.append('nova.tests.%s' % x) From fdb0c5a8745c3d11693274d7764ffde69efede7a Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:43:04 -0700 Subject: [PATCH 54/60] cleanups --- nova/tests/test_rpc.py | 47 +++++++++++++++++++----------------------- 1 file changed, 21 insertions(+), 26 deletions(-) diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index fcecfb35..8523b409 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -31,7 +31,6 @@ LOG = logging.getLogger('nova.tests.rpc') class RpcTestCase(test.TestCase): - """Test cases for rpc""" def setUp(self): super(RpcTestCase, self).setUp() self.conn = rpc.Connection.instance(True) @@ -43,21 +42,18 @@ class RpcTestCase(test.TestCase): self.context = context.get_admin_context() def test_call_succeed(self): - """Get a value through rpc call""" value = 42 result = rpc.call(self.context, 'test', {"method": "echo", "args": {"value": value}}) self.assertEqual(value, result) def test_call_succeed_despite_multiple_returns(self): - """Get a value through rpc call""" value = 42 result = rpc.call(self.context, 'test', {"method": "echo_three_times", "args": {"value": value}}) self.assertEqual(value, result) def test_call_succeed_despite_multiple_returns_yield(self): - """Get a value through rpc call""" value = 42 result = rpc.call(self.context, 'test', {"method": "echo_three_times_yield", @@ -65,7 +61,6 @@ class RpcTestCase(test.TestCase): self.assertEqual(value, result) def test_multicall_succeed_once(self): - """Get a value through rpc call""" value = 42 result = rpc.multicall(self.context, 'test', @@ -79,7 +74,6 @@ class RpcTestCase(test.TestCase): i += 1 def test_multicall_succeed_three_times(self): - """Get a value through rpc call""" value = 42 result = rpc.multicall(self.context, 'test', @@ -91,7 +85,6 @@ class RpcTestCase(test.TestCase): i += 1 def test_multicall_succeed_three_times_yield(self): - """Get a value through rpc call""" value = 42 result = rpc.multicall(self.context, 'test', @@ -103,7 +96,7 @@ class RpcTestCase(test.TestCase): i += 1 def test_context_passed(self): - """Makes sure a context is passed through rpc call""" + """Makes sure a context is passed through rpc call.""" value = 42 result = rpc.call(self.context, 'test', {"method": "context", @@ -111,11 +104,12 @@ class RpcTestCase(test.TestCase): self.assertEqual(self.context.to_dict(), result) def test_call_exception(self): - """Test that exception gets passed back properly + """Test that exception gets passed back properly. rpc.call returns a RemoteError object. The value of the exception is converted to a string, so we convert it back to an int in the test. + """ value = 42 self.assertRaises(rpc.RemoteError, @@ -134,7 +128,7 @@ class RpcTestCase(test.TestCase): self.assertEqual(int(exc.value), value) def test_nested_calls(self): - """Test that we can do an rpc.call inside another call""" + """Test that we can do an rpc.call inside another call.""" class Nested(object): @staticmethod def echo(context, queue, value): @@ -162,8 +156,7 @@ class RpcTestCase(test.TestCase): self.assertEqual(value, result) def test_connectionpool_single(self): - """Test that ConnectionPool recycles a single connection""" - + """Test that ConnectionPool recycles a single connection.""" conn1 = rpc.ConnectionPool.get() rpc.ConnectionPool.put(conn1) conn2 = rpc.ConnectionPool.get() @@ -171,10 +164,13 @@ class RpcTestCase(test.TestCase): self.assertEqual(conn1, conn2) def test_connectionpool_double(self): - """Test that ConnectionPool returns 2 separate connections - when called consecutively and the pool returns connections LIFO - """ + """Test that ConnectionPool returns and reuses separate connections. + When called consecutively we should get separate connections and upon + returning them those connections should be reused for future calls + before generating a new connection. + + """ conn1 = rpc.ConnectionPool.get() conn2 = rpc.ConnectionPool.get() @@ -184,14 +180,11 @@ class RpcTestCase(test.TestCase): conn3 = rpc.ConnectionPool.get() conn4 = rpc.ConnectionPool.get() - self.assertEqual(conn2, conn3) - self.assertEqual(conn1, conn4) + self.assertEqual(conn1, conn3) + self.assertEqual(conn2, conn4) def test_connectionpool_limit(self): - """Test connection pool limit and verify all connections - are unique - """ - + """Test connection pool limit and connection uniqueness.""" max_size = FLAGS.rpc_conn_pool_size conns = [] @@ -205,19 +198,21 @@ class RpcTestCase(test.TestCase): class TestReceiver(object): - """Simple Proxy class so the consumer has methods to call + """Simple Proxy class so the consumer has methods to call. - Uses static methods because we aren't actually storing any state""" + Uses static methods because we aren't actually storing any state. + + """ @staticmethod def echo(context, value): - """Simply returns whatever value is sent in""" + """Simply returns whatever value is sent in.""" LOG.debug(_("Received %s"), value) return value @staticmethod def context(context, value): - """Returns dictionary version of context""" + """Returns dictionary version of context.""" LOG.debug(_("Received %s"), context) return context.to_dict() @@ -235,5 +230,5 @@ class TestReceiver(object): @staticmethod def fail(context, value): - """Raises an exception with the value sent in""" + """Raises an exception with the value sent in.""" raise Exception(value) From 22e1bdcda697f64775e8a10f3a0305feb1d55c30 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:43:04 -0700 Subject: [PATCH 55/60] replace removed import --- nova/tests/test_cloud.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index ca3ef7ff..b64be662 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -19,6 +19,7 @@ from base64 import b64decode from M2Crypto import BIO from M2Crypto import RSA +import os from eventlet import greenthread From bb5e80112bd4b930b46a585a2a3d470010aeb8f6 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:43:04 -0700 Subject: [PATCH 56/60] change the behavior of calling a multicall --- nova/rpc.py | 8 +++++--- nova/tests/test_rpc.py | 4 ++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index 8d14494f..493978e5 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -236,6 +236,9 @@ class AdapterConsumer(Consumer): # This final None tells multicall that it is done. msg_reply(msg_id, None, None) + elif hasattr(rval, 'send'): + # NOTE(vish): this iterates through the generator + list(rval) except Exception as e: logging.exception('Exception during message handling') if msg_id: @@ -530,9 +533,8 @@ class MulticallWaiter(object): def call(context, topic, msg): """Sends a message on a topic and wait for a response.""" rv = multicall(context, topic, msg) - for x in rv: - rv.close() - return x + # NOTE(vish): return the last result from the multicall + return list(rv)[-1] def cast(context, topic, msg): diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index 8523b409..35f4a64d 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -51,14 +51,14 @@ class RpcTestCase(test.TestCase): value = 42 result = rpc.call(self.context, 'test', {"method": "echo_three_times", "args": {"value": value}}) - self.assertEqual(value, result) + self.assertEqual(value + 2, result) def test_call_succeed_despite_multiple_returns_yield(self): value = 42 result = rpc.call(self.context, 'test', {"method": "echo_three_times_yield", "args": {"value": value}}) - self.assertEqual(value, result) + self.assertEqual(value + 2, result) def test_multicall_succeed_once(self): value = 42 From 94dc3041b3584ee158d3c725d6c0f33daaf864fa Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Thu, 26 May 2011 19:27:27 +0000 Subject: [PATCH 58/60] Change the return from glance to be a list of dictionaries describing VDIs Fix the rest of the code to account for this Add a test for swap --- nova/tests/test_xenapi.py | 23 +++++++++++++++++++++++ nova/tests/xenapi/stubs.py | 23 ++++++++++++++++++----- 2 files changed, 41 insertions(+), 5 deletions(-) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index be1e3569..18a26789 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -395,6 +395,29 @@ class XenAPIVMTestCase(test.TestCase): os_type="linux") self.check_vm_params_for_linux() + def test_spawn_vhd_glance_swapdisk(self): + # Change the default host_call_plugin to one that'll return + # a swap disk + orig_func = stubs.FakeSessionForVMTests.host_call_plugin + + stubs.FakeSessionForVMTests.host_call_plugin = \ + stubs.FakeSessionForVMTests.host_call_plugin_swap + + try: + # We'll steal the above glance linux test + self.test_spawn_vhd_glance_linux() + finally: + # Make sure to put this back + stubs.FakeSessionForVMTests.host_call_plugin = orig_func + + # We should have 2 VBDs. + self.assertEqual(len(self.vm['VBDs']), 2) + # Now test that we have 1. + self.tearDown() + self.setUp() + self.test_spawn_vhd_glance_linux() + self.assertEqual(len(self.vm['VBDs']), 1) + def test_spawn_vhd_glance_windows(self): FLAGS.xenapi_image_service = 'glance' self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None, diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 9f6f6431..35308d95 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -38,7 +38,7 @@ def stubout_instance_snapshot(stubs): sr_ref=sr_ref, sharable=False) vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) vdi_uuid = vdi_rec['uuid'] - return {'primary_vdi_uuid': vdi_uuid} + return [dict(vdi_type='os', vdi_uuid=vdi_uuid)] stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image) @@ -134,16 +134,29 @@ class FakeSessionForVMTests(fake.SessionBase): super(FakeSessionForVMTests, self).__init__(uri) def host_call_plugin(self, _1, _2, plugin, method, _5): + sr_ref = fake.get_all('SR')[0] + vdi_ref = fake.create_vdi('', False, sr_ref, False) + vdi_rec = fake.get_record('VDI', vdi_ref) + if plugin == "glance" and method == "download_vhd": + ret_str = json.dumps([dict(vdi_type='os', + vdi_uuid=vdi_rec['uuid'])]) + else: + ret_str = vdi_rec['uuid'] + return '%s' % ret_str + + def host_call_plugin_swap(self, _1, _2, plugin, method, _5): sr_ref = fake.get_all('SR')[0] vdi_ref = fake.create_vdi('', False, sr_ref, False) vdi_rec = fake.get_record('VDI', vdi_ref) if plugin == "glance" and method == "download_vhd": swap_vdi_ref = fake.create_vdi('', False, sr_ref, False) swap_vdi_rec = fake.get_record('VDI', swap_vdi_ref) - return '%s' % json.dumps( - {'primary_vdi_uuid': vdi_rec['uuid'], - 'swap_vdi_uuid': swap_vdi_rec['uuid']}) - return '%s' % vdi_rec['uuid'] + ret_str = json.dumps( + [dict(vdi_type='os', vdi_uuid=vdi_rec['uuid']), + dict(vdi_type='swap', vdi_uuid=swap_vdi_rec['uuid'])]) + else: + ret_str = vdi_rec['uuid'] + return '%s' % ret_str def VM_start(self, _1, ref, _2, _3): vm = fake.get_record('VM', ref) From 7be1954fa2ad5ff3d8c71986ad7c69d76e01d753 Mon Sep 17 00:00:00 2001 From: termie Date: Thu, 26 May 2011 15:08:53 -0700 Subject: [PATCH 59/60] changes per review --- nova/rpc.py | 17 ++++++++++------- nova/tests/test_rpc.py | 12 +++--------- 2 files changed, 13 insertions(+), 16 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index 493978e5..1ec495bc 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -28,6 +28,7 @@ import json import sys import time import traceback +import types import uuid from carrot import connection as carrot_connection @@ -228,7 +229,7 @@ class AdapterConsumer(Consumer): rval = node_func(context=ctxt, **node_args) if msg_id: # Check if the result was a generator - if hasattr(rval, 'send'): + if isinstance(rval, types.GeneratorType): for x in rval: msg_reply(msg_id, x, None) else: @@ -236,7 +237,7 @@ class AdapterConsumer(Consumer): # This final None tells multicall that it is done. msg_reply(msg_id, None, None) - elif hasattr(rval, 'send'): + elif isinstance(rval, types.GeneratorType): # NOTE(vish): this iterates through the generator list(rval) except Exception as e: @@ -281,11 +282,11 @@ class FanoutAdapterConsumer(AdapterConsumer): class ConsumerSet(object): """Groups consumers to listen on together on a single connection.""" - def __init__(self, conn, consumer_list): + def __init__(self, connection, consumer_list): self.consumer_list = set(consumer_list) self.consumer_set = None self.enabled = True - self.init(conn) + self.init(connection) def init(self, conn): if not conn: @@ -316,8 +317,7 @@ class ConsumerSet(object): running = False break except Exception as e: - LOG.error(_("Received exception %s " % type(e) + \ - "while processing consumer")) + LOG.exception(_("Exception while processing consumer")) self.reconnect() # Break to outer loop break @@ -534,7 +534,10 @@ def call(context, topic, msg): """Sends a message on a topic and wait for a response.""" rv = multicall(context, topic, msg) # NOTE(vish): return the last result from the multicall - return list(rv)[-1] + rv = list(rv) + if not rv: + return + return rv[-1] def cast(context, topic, msg): diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index 35f4a64d..ffd748ef 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -66,12 +66,10 @@ class RpcTestCase(test.TestCase): 'test', {"method": "echo", "args": {"value": value}}) - i = 0 - for x in result: + for i, x in enumerate(result): if i > 0: self.fail('should only receive one response') self.assertEqual(value + i, x) - i += 1 def test_multicall_succeed_three_times(self): value = 42 @@ -79,10 +77,8 @@ class RpcTestCase(test.TestCase): 'test', {"method": "echo_three_times", "args": {"value": value}}) - i = 0 - for x in result: + for i, x in enumerate(result): self.assertEqual(value + i, x) - i += 1 def test_multicall_succeed_three_times_yield(self): value = 42 @@ -90,10 +86,8 @@ class RpcTestCase(test.TestCase): 'test', {"method": "echo_three_times_yield", "args": {"value": value}}) - i = 0 - for x in result: + for i, x in enumerate(result): self.assertEqual(value + i, x) - i += 1 def test_context_passed(self): """Makes sure a context is passed through rpc call.""" From 5236b7311755025481d7bc9f7c0f62fa2642815c Mon Sep 17 00:00:00 2001 From: termie Date: Thu, 26 May 2011 17:06:52 -0700 Subject: [PATCH 60/60] fix a minor bug unrelated to this change --- nova/rpc.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nova/rpc.py b/nova/rpc.py index 1ec495bc..c5277c6a 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -212,7 +212,9 @@ class AdapterConsumer(Consumer): # we just log the message and send an error string # back to the caller LOG.warn(_('no method for message: %s') % message_data) - msg_reply(msg_id, _('No method for message: %s') % message_data) + if msg_id: + msg_reply(msg_id, + _('No method for message: %s') % message_data) return self.pool.spawn_n(self._process_data, msg_id, ctxt, method, args)