From 314e4bfdc380c02763deb09d708f4ea96d8d07e7 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 18 Aug 2011 12:34:01 -0400 Subject: [PATCH 01/46] Updated a number of items to pave the way for new states. --- nova/scheduler/driver.py | 11 +++++----- nova/tests/scheduler/test_scheduler.py | 13 ++++++++---- nova/tests/test_compute.py | 29 +++++++++++++------------- 3 files changed, 29 insertions(+), 24 deletions(-) diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index f28353f05..b788b996f 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -30,6 +30,8 @@ from nova import log as logging from nova import rpc from nova import utils from nova.compute import power_state +from nova.compute import task_state +from nova.compute import vm_state from nova.api.ec2 import ec2utils @@ -104,10 +106,8 @@ class Scheduler(object): dest, block_migration) # Changing instance_state. - db.instance_set_state(context, - instance_id, - power_state.PAUSED, - 'migrating') + values = {"vm_state": vm_state.MIGRATE} + db.instance_update(context, instance_id, values) # Changing volume state for volume_ref in instance_ref['volumes']: @@ -129,8 +129,7 @@ class Scheduler(object): """ # Checking instance is running. - if (power_state.RUNNING != instance_ref['state'] or \ - 'running' != instance_ref['state_description']): + if instance_ref['power_state'] != power_state.RUNNING: instance_id = ec2utils.id_to_ec2_id(instance_ref['id']) raise exception.InstanceNotRunning(instance_id=instance_id) diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index 158df2a27..1b5e131c9 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -40,6 +40,7 @@ from nova.scheduler import driver from nova.scheduler import manager from nova.scheduler import multi from nova.compute import power_state +from nova.compute import vm_state FLAGS = flags.FLAGS @@ -94,6 +95,9 @@ class SchedulerTestCase(test.TestCase): inst['vcpus'] = kwargs.get('vcpus', 1) inst['memory_mb'] = kwargs.get('memory_mb', 10) inst['local_gb'] = kwargs.get('local_gb', 20) + inst['vm_state'] = kwargs.get('vm_state', vm_state.ACTIVE) + inst['power_state'] = kwargs.get('power_state', power_state.RUNNING) + inst['task_state'] = kwargs.get('task_state', None) return db.instance_create(ctxt, inst) def test_fallback(self): @@ -271,8 +275,9 @@ class SimpleDriverTestCase(test.TestCase): inst['memory_mb'] = kwargs.get('memory_mb', 20) inst['local_gb'] = kwargs.get('local_gb', 30) inst['launched_on'] = kwargs.get('launghed_on', 'dummy') - inst['state_description'] = kwargs.get('state_description', 'running') - inst['state'] = kwargs.get('state', power_state.RUNNING) + inst['vm_state'] = kwargs.get('vm_state', vm_state.ACTIVE) + inst['task_state'] = kwargs.get('task_state', None) + inst['power_state'] = kwargs.get('power_state', power_state.RUNNING) return db.instance_create(self.context, inst)['id'] def _create_volume(self): @@ -664,14 +669,14 @@ class SimpleDriverTestCase(test.TestCase): block_migration=False) i_ref = db.instance_get(self.context, instance_id) - self.assertTrue(i_ref['state_description'] == 'migrating') + self.assertTrue(i_ref['vm_state'] == vm_state.MIGRATE) db.instance_destroy(self.context, instance_id) db.volume_destroy(self.context, v_ref['id']) def test_live_migration_src_check_instance_not_running(self): """The instance given by instance_id is not running.""" - instance_id = self._create_instance(state_description='migrating') + instance_id = self._create_instance(power_state=power_state.NOSTATE) i_ref = db.instance_get(self.context, instance_id) try: diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index e2fa3b140..f310eaff6 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -23,6 +23,7 @@ from nova import compute from nova.compute import instance_types from nova.compute import manager as compute_manager from nova.compute import power_state +from nova.compute import vm_state from nova import context from nova import db from nova.db.sqlalchemy import models @@ -747,8 +748,8 @@ class ComputeTestCase(test.TestCase): 'block_migration': False, 'disk': None}}).\ AndRaise(rpc.RemoteError('', '', '')) - dbmock.instance_update(c, i_ref['id'], {'state_description': 'running', - 'state': power_state.RUNNING, + dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_state.ACTIVE, + 'task_state': None, 'host': i_ref['host']}) for v in i_ref['volumes']: dbmock.volume_update(c, v['id'], {'status': 'in-use'}) @@ -779,8 +780,8 @@ class ComputeTestCase(test.TestCase): 'block_migration': False, 'disk': None}}).\ AndRaise(rpc.RemoteError('', '', '')) - dbmock.instance_update(c, i_ref['id'], {'state_description': 'running', - 'state': power_state.RUNNING, + dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_state.ACTIVE, + 'task_state': None, 'host': i_ref['host']}) self.compute.db = dbmock @@ -825,8 +826,8 @@ class ComputeTestCase(test.TestCase): c = context.get_admin_context() instance_id = self._create_instance() i_ref = db.instance_get(c, instance_id) - db.instance_update(c, i_ref['id'], {'state_description': 'migrating', - 'state': power_state.PAUSED}) + db.instance_update(c, i_ref['id'], {'vm_state': vm_state.MIGRATE, + 'power_state': power_state.PAUSED}) v_ref = db.volume_create(c, {'size': 1, 'instance_id': instance_id}) fix_addr = db.fixed_ip_create(c, {'address': '1.1.1.1', 'instance_id': instance_id}) @@ -887,7 +888,7 @@ class ComputeTestCase(test.TestCase): instances = db.instance_get_all(context.get_admin_context()) LOG.info(_("After force-killing instances: %s"), instances) self.assertEqual(len(instances), 1) - self.assertEqual(power_state.SHUTOFF, instances[0]['state']) + self.assertEqual(power_state.NOSTATE, instances[0]['power_state']) def test_get_all_by_name_regexp(self): """Test searching instances by name (display_name)""" @@ -1307,25 +1308,25 @@ class ComputeTestCase(test.TestCase): """Test searching instances by state""" c = context.get_admin_context() - instance_id1 = self._create_instance({'state': power_state.SHUTDOWN}) + instance_id1 = self._create_instance({'power_state': power_state.SHUTDOWN}) instance_id2 = self._create_instance({ 'id': 2, - 'state': power_state.RUNNING}) + 'power_state': power_state.RUNNING}) instance_id3 = self._create_instance({ 'id': 10, - 'state': power_state.RUNNING}) + 'power_state': power_state.RUNNING}) instances = self.compute_api.get_all(c, - search_opts={'state': power_state.SUSPENDED}) + search_opts={'power_state': power_state.SUSPENDED}) self.assertEqual(len(instances), 0) instances = self.compute_api.get_all(c, - search_opts={'state': power_state.SHUTDOWN}) + search_opts={'power_state': power_state.SHUTDOWN}) self.assertEqual(len(instances), 1) self.assertEqual(instances[0].id, instance_id1) instances = self.compute_api.get_all(c, - search_opts={'state': power_state.RUNNING}) + search_opts={'power_state': power_state.RUNNING}) self.assertEqual(len(instances), 2) instance_ids = [instance.id for instance in instances] self.assertTrue(instance_id2 in instance_ids) @@ -1333,7 +1334,7 @@ class ComputeTestCase(test.TestCase): # Test passing a list as search arg instances = self.compute_api.get_all(c, - search_opts={'state': [power_state.SHUTDOWN, + search_opts={'power_state': [power_state.SHUTDOWN, power_state.RUNNING]}) self.assertEqual(len(instances), 3) From 57a0e627005c482ffb673de9fb957a1b70924528 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Fri, 19 Aug 2011 15:13:40 -0400 Subject: [PATCH 02/46] vm_state --> vm_states --- nova/scheduler/driver.py | 4 ++-- nova/tests/scheduler/test_scheduler.py | 2 +- nova/tests/test_compute.py | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index b788b996f..8f9be879b 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -31,7 +31,7 @@ from nova import rpc from nova import utils from nova.compute import power_state from nova.compute import task_state -from nova.compute import vm_state +from nova.compute import vm_states from nova.api.ec2 import ec2utils @@ -106,7 +106,7 @@ class Scheduler(object): dest, block_migration) # Changing instance_state. - values = {"vm_state": vm_state.MIGRATE} + values = {"vm_state": vm_states.MIGRATE} db.instance_update(context, instance_id, values) # Changing volume state diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index 1b5e131c9..629019eaf 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -40,7 +40,7 @@ from nova.scheduler import driver from nova.scheduler import manager from nova.scheduler import multi from nova.compute import power_state -from nova.compute import vm_state +from nova.compute import vm_states FLAGS = flags.FLAGS diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 188398924..ca1bbc69f 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -23,7 +23,7 @@ from nova import compute from nova.compute import instance_types from nova.compute import manager as compute_manager from nova.compute import power_state -from nova.compute import vm_state +from nova.compute import vm_states from nova import context from nova import db from nova.db.sqlalchemy import models @@ -748,7 +748,7 @@ class ComputeTestCase(test.TestCase): 'block_migration': False, 'disk': None}}).\ AndRaise(rpc.RemoteError('', '', '')) - dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_state.ACTIVE, + dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_states.ACTIVE, 'task_state': None, 'host': i_ref['host']}) for v in i_ref['volumes']: @@ -780,7 +780,7 @@ class ComputeTestCase(test.TestCase): 'block_migration': False, 'disk': None}}).\ AndRaise(rpc.RemoteError('', '', '')) - dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_state.ACTIVE, + dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_states.ACTIVE, 'task_state': None, 'host': i_ref['host']}) @@ -826,7 +826,7 @@ class ComputeTestCase(test.TestCase): c = context.get_admin_context() instance_id = self._create_instance() i_ref = db.instance_get(c, instance_id) - db.instance_update(c, i_ref['id'], {'vm_state': vm_state.MIGRATE, + db.instance_update(c, i_ref['id'], {'vm_state': vm_states.MIGRATE, 'power_state': power_state.PAUSED}) v_ref = db.volume_create(c, {'size': 1, 'instance_id': instance_id}) fix_addr = db.fixed_ip_create(c, {'address': '1.1.1.1', From a2bbdf7845416d59b601155535ccbb82917c797d Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Fri, 19 Aug 2011 18:34:34 -0400 Subject: [PATCH 03/46] Lots of modifications surrounding the OSAPI to remove any mention of dealing with power states and exclusively using vm_states and task_state modules. Currently there are still a number of tests failing, but this is a stopping place for today. --- nova/tests/vmwareapi/db_fakes.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/nova/tests/vmwareapi/db_fakes.py b/nova/tests/vmwareapi/db_fakes.py index afd672c7a..dd38420ce 100644 --- a/nova/tests/vmwareapi/db_fakes.py +++ b/nova/tests/vmwareapi/db_fakes.py @@ -23,6 +23,8 @@ import time from nova import db from nova import utils +from nova.compute import task_state +from nova.compute import vm_states def stub_out_db_instance_api(stubs): @@ -64,7 +66,8 @@ def stub_out_db_instance_api(stubs): 'image_ref': values['image_ref'], 'kernel_id': values['kernel_id'], 'ramdisk_id': values['ramdisk_id'], - 'state_description': 'scheduling', + 'vm_state': vm_states.BUILD, + 'task_state': task_state.SCHEDULE, 'user_id': values['user_id'], 'project_id': values['project_id'], 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), From 6091ed7238e7f253daa2144db5e2e3b415c470b0 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Mon, 22 Aug 2011 09:54:33 -0400 Subject: [PATCH 04/46] Ec2 API updates. --- nova/tests/test_cloud.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 0793784f8..cce9514ec 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -1163,7 +1163,7 @@ class CloudTestCase(test.TestCase): self.compute = self.start_service('compute') def _wait_for_state(self, ctxt, instance_id, predicate): - """Wait for an stopping instance to be a given state""" + """Wait for a stopped instance to be a given state""" id = ec2utils.ec2_id_to_id(instance_id) while True: info = self.cloud.compute_api.get(context=ctxt, instance_id=id) @@ -1174,12 +1174,12 @@ class CloudTestCase(test.TestCase): def _wait_for_running(self, instance_id): def is_running(info): - return info['state_description'] == 'running' + return info['vm_state'] == 'running' self._wait_for_state(self.context, instance_id, is_running) def _wait_for_stopped(self, instance_id): def is_stopped(info): - return info['state_description'] == 'stopped' + return info['vm_state'] == 'stopped' self._wait_for_state(self.context, instance_id, is_stopped) def _wait_for_terminate(self, instance_id): @@ -1562,7 +1562,7 @@ class CloudTestCase(test.TestCase): 'id': 0, 'root_device_name': '/dev/sdh', 'security_groups': [{'name': 'fake0'}, {'name': 'fake1'}], - 'state_description': 'stopping', + 'vm_state': 'stopped', 'instance_type': {'name': 'fake_type'}, 'kernel_id': 1, 'ramdisk_id': 2, From 153ff39fb9ef0acd6475a05c92c7f12dfebf3787 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Mon, 22 Aug 2011 10:01:13 -0400 Subject: [PATCH 05/46] Renamed task_state to task_states... --- nova/tests/vmwareapi/db_fakes.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/tests/vmwareapi/db_fakes.py b/nova/tests/vmwareapi/db_fakes.py index dd38420ce..b046071c7 100644 --- a/nova/tests/vmwareapi/db_fakes.py +++ b/nova/tests/vmwareapi/db_fakes.py @@ -23,7 +23,7 @@ import time from nova import db from nova import utils -from nova.compute import task_state +from nova.compute import task_states from nova.compute import vm_states @@ -67,7 +67,7 @@ def stub_out_db_instance_api(stubs): 'kernel_id': values['kernel_id'], 'ramdisk_id': values['ramdisk_id'], 'vm_state': vm_states.BUILD, - 'task_state': task_state.SCHEDULE, + 'task_state': task_states.SCHEDULE, 'user_id': values['user_id'], 'project_id': values['project_id'], 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), From f064a9f10aeff185e4f2c6116d68d2f000f7ff4f Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Mon, 22 Aug 2011 11:57:42 -0400 Subject: [PATCH 06/46] Fix scheduler and integrated tests. --- nova/tests/scheduler/test_scheduler.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index 629019eaf..a1281ae73 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -95,7 +95,7 @@ class SchedulerTestCase(test.TestCase): inst['vcpus'] = kwargs.get('vcpus', 1) inst['memory_mb'] = kwargs.get('memory_mb', 10) inst['local_gb'] = kwargs.get('local_gb', 20) - inst['vm_state'] = kwargs.get('vm_state', vm_state.ACTIVE) + inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE) inst['power_state'] = kwargs.get('power_state', power_state.RUNNING) inst['task_state'] = kwargs.get('task_state', None) return db.instance_create(ctxt, inst) @@ -275,7 +275,7 @@ class SimpleDriverTestCase(test.TestCase): inst['memory_mb'] = kwargs.get('memory_mb', 20) inst['local_gb'] = kwargs.get('local_gb', 30) inst['launched_on'] = kwargs.get('launghed_on', 'dummy') - inst['vm_state'] = kwargs.get('vm_state', vm_state.ACTIVE) + inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE) inst['task_state'] = kwargs.get('task_state', None) inst['power_state'] = kwargs.get('power_state', power_state.RUNNING) return db.instance_create(self.context, inst)['id'] @@ -669,7 +669,7 @@ class SimpleDriverTestCase(test.TestCase): block_migration=False) i_ref = db.instance_get(self.context, instance_id) - self.assertTrue(i_ref['vm_state'] == vm_state.MIGRATE) + self.assertTrue(i_ref['vm_state'] == vm_states.MIGRATE) db.instance_destroy(self.context, instance_id) db.volume_destroy(self.context, v_ref['id']) From d0d62489c821b75cca39e7051e52f4f9ba2194be Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Mon, 22 Aug 2011 11:59:08 -0400 Subject: [PATCH 07/46] Fixes/updates to make test_cloud pass. --- nova/tests/test_cloud.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index cce9514ec..4d148f39e 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -38,6 +38,7 @@ from nova import test from nova import utils from nova.api.ec2 import cloud from nova.api.ec2 import ec2utils +from nova.compute import vm_states from nova.image import fake @@ -1174,12 +1175,12 @@ class CloudTestCase(test.TestCase): def _wait_for_running(self, instance_id): def is_running(info): - return info['vm_state'] == 'running' + return info['vm_state'] == vm_states.ACTIVE self._wait_for_state(self.context, instance_id, is_running) def _wait_for_stopped(self, instance_id): def is_stopped(info): - return info['vm_state'] == 'stopped' + return info['vm_state'] == vm_states.STOP self._wait_for_state(self.context, instance_id, is_stopped) def _wait_for_terminate(self, instance_id): @@ -1562,7 +1563,7 @@ class CloudTestCase(test.TestCase): 'id': 0, 'root_device_name': '/dev/sdh', 'security_groups': [{'name': 'fake0'}, {'name': 'fake1'}], - 'vm_state': 'stopped', + 'vm_state': vm_states.STOP, 'instance_type': {'name': 'fake_type'}, 'kernel_id': 1, 'ramdisk_id': 2, From 919f163d0c218ac571d353b7a622060a60908b28 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Mon, 22 Aug 2011 13:16:48 -0400 Subject: [PATCH 08/46] Fixes for a number of tests. --- nova/tests/vmwareapi/db_fakes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/vmwareapi/db_fakes.py b/nova/tests/vmwareapi/db_fakes.py index b046071c7..b56956f96 100644 --- a/nova/tests/vmwareapi/db_fakes.py +++ b/nova/tests/vmwareapi/db_fakes.py @@ -67,7 +67,7 @@ def stub_out_db_instance_api(stubs): 'kernel_id': values['kernel_id'], 'ramdisk_id': values['ramdisk_id'], 'vm_state': vm_states.BUILD, - 'task_state': task_states.SCHEDULE, + 'task_state': task_states.SCHEDULING, 'user_id': values['user_id'], 'project_id': values['project_id'], 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), From f017420dcb6dc4f6e9d47e980ba090711eba1a61 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Tue, 23 Aug 2011 10:57:47 -0400 Subject: [PATCH 09/46] PEP8 fixes --- nova/tests/test_compute.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 2cf694d2c..11e1fd540 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -1308,14 +1308,17 @@ class ComputeTestCase(test.TestCase): """Test searching instances by state""" c = context.get_admin_context() - instance_id1 = self._create_instance({'power_state': power_state.SHUTDOWN}) + instance_id1 = self._create_instance({ + 'power_state': power_state.SHUTDOWN, + }) instance_id2 = self._create_instance({ - 'id': 2, - 'power_state': power_state.RUNNING}) + 'id': 2, + 'power_state': power_state.RUNNING, + }) instance_id3 = self._create_instance({ - 'id': 10, - 'power_state': power_state.RUNNING}) - + 'id': 10, + 'power_state': power_state.RUNNING, + }) instances = self.compute_api.get_all(c, search_opts={'power_state': power_state.SUSPENDED}) self.assertEqual(len(instances), 0) From 779ba29077b4bd4a9225e630fabd9518c339eed9 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 24 Aug 2011 10:59:12 -0400 Subject: [PATCH 10/46] Commit with test data in migration. --- nova/exception.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nova/exception.py b/nova/exception.py index 44af8177e..66740019b 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -318,6 +318,9 @@ class InvalidEc2Id(Invalid): class NotFound(NovaException): message = _("Resource could not be found.") + def __init__(self, *args, **kwargs): + super(NotFound, self).__init__(**kwargs) + class FlagNotSet(NotFound): message = _("Required flag %(flag)s not set.") From e73630199c95ca9fd607a5768c3cdd890c337511 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 24 Aug 2011 13:01:20 -0400 Subject: [PATCH 11/46] Fix for trying rebuilds when instance is not active. --- nova/exception.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/exception.py b/nova/exception.py index 66740019b..889d36c96 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -61,7 +61,7 @@ class ApiError(Error): super(ApiError, self).__init__(outstr) -class BuildInProgress(Error): +class RebuildRequiresActiveInstance(Error): pass From 4c51c5597877dcaafcde72aaa3cc0375dee4cdc5 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 24 Aug 2011 18:14:13 -0400 Subject: [PATCH 12/46] Added fix for parallel build test. --- run_tests.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/run_tests.py b/run_tests.py index fd836967e..b9a74769e 100644 --- a/run_tests.py +++ b/run_tests.py @@ -55,6 +55,7 @@ To run a single test module: """ +import eventlet import gettext import heapq import os @@ -62,6 +63,7 @@ import unittest import sys import time +eventlet.monkey_patch() gettext.install('nova', unicode=1) from nose import config From b0df03f3d25a66b6ece0dc71650e58b38d257559 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 25 Aug 2011 09:04:04 -0400 Subject: [PATCH 13/46] Another attempt at fixing hanging test. --- nova/tests/test_xenapi.py | 2 ++ run_tests.py | 2 -- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 2f0559366..061e9ffea 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -24,6 +24,8 @@ import re import stubout import ast +eventlet.monkey_patch() + from nova import db from nova import context from nova import flags diff --git a/run_tests.py b/run_tests.py index b9a74769e..fd836967e 100644 --- a/run_tests.py +++ b/run_tests.py @@ -55,7 +55,6 @@ To run a single test module: """ -import eventlet import gettext import heapq import os @@ -63,7 +62,6 @@ import unittest import sys import time -eventlet.monkey_patch() gettext.install('nova', unicode=1) from nose import config From df097d41d869cf861fc7ac905fcb13dceb6f0e03 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 25 Aug 2011 18:00:32 -0400 Subject: [PATCH 14/46] Test fixup after last review feedback commit. --- nova/tests/test_cloud.py | 2 +- nova/tests/test_xenapi.py | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 9d58b7341..9deb5c011 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -1611,7 +1611,7 @@ class CloudTestCase(test.TestCase): self.assertEqual(groupSet, expected_groupSet) self.assertEqual(get_attribute('instanceInitiatedShutdownBehavior'), {'instance_id': 'i-12345678', - 'instanceInitiatedShutdownBehavior': 'stop'}) + 'instanceInitiatedShutdownBehavior': 'stopped'}) self.assertEqual(get_attribute('instanceType'), {'instance_id': 'i-12345678', 'instanceType': 'fake_type'}) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 061e9ffea..2f0559366 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -24,8 +24,6 @@ import re import stubout import ast -eventlet.monkey_patch() - from nova import db from nova import context from nova import flags From b04067d80f6da1d5b65449e83c0b32de5416ebdd Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Fri, 26 Aug 2011 15:40:04 -0700 Subject: [PATCH 15/46] start of kombu implementation, keeping the same RPC interfaces --- nova/rpc/__init__.py | 25 +- nova/rpc/{amqp.py => impl_carrot.py} | 14 + nova/rpc/impl_kombu.py | 426 +++++++++++++++++++++++++++ 3 files changed, 452 insertions(+), 13 deletions(-) rename nova/rpc/{amqp.py => impl_carrot.py} (98%) create mode 100644 nova/rpc/impl_kombu.py diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py index bdf7f705b..f102cf0fa 100644 --- a/nova/rpc/__init__.py +++ b/nova/rpc/__init__.py @@ -23,10 +23,18 @@ from nova import flags FLAGS = flags.FLAGS flags.DEFINE_string('rpc_backend', - 'nova.rpc.amqp', - "The messaging module to use, defaults to AMQP.") + 'carrot', + "The messaging module to use, defaults to carrot.") -RPCIMPL = import_object(FLAGS.rpc_backend) +impl_table = {'kombu': 'nova.rpc.impl_kombu', + 'amqp': 'nova.rpc.impl_kombu'} + 'carrot': 'nova.rpc.impl_carrot'} + + +# rpc_backend can be a short name like 'kombu', or it can be the full +# module name +RPCIMPL = import_object(impl_table.get(FLAGS.rpc_backend, + FLAGS.rpc_backend)) def create_connection(new=True): @@ -34,16 +42,7 @@ def create_connection(new=True): def create_consumer(conn, topic, proxy, fanout=False): - if fanout: - return RPCIMPL.FanoutAdapterConsumer( - connection=conn, - topic=topic, - proxy=proxy) - else: - return RPCIMPL.TopicAdapterConsumer( - connection=conn, - topic=topic, - proxy=proxy) + return RPCIMPL.create_consumer(conn, topic, proxy, fanout) def create_consumer_set(conn, consumers): diff --git a/nova/rpc/amqp.py b/nova/rpc/impl_carrot.py similarity index 98% rename from nova/rpc/amqp.py rename to nova/rpc/impl_carrot.py index fe429b266..529f98722 100644 --- a/nova/rpc/amqp.py +++ b/nova/rpc/impl_carrot.py @@ -520,6 +520,20 @@ class MulticallWaiter(object): yield result +def create_consumer(conn, topic, proxy, fanout=False): + """Create a consumer that calls methods in the proxy""" + if fanout: + return FanoutAdapterConsumer( + connection=conn, + topic=topic, + proxy=proxy) + else: + return TopicAdapterConsumer( + connection=conn, + topic=topic, + proxy=proxy) + + def call(context, topic, msg): """Sends a message on a topic and wait for a response.""" rv = multicall(context, topic, msg) diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py new file mode 100644 index 000000000..e609227c9 --- /dev/null +++ b/nova/rpc/impl_kombu.py @@ -0,0 +1,426 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import flags +from nova import log as logging + +import kombu +import kombu.entity +import kombu.messaging +import kombu.connection +import itertools +import sys +import time +import uuid + + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.rpc') + + +class QueueBase(object): + """Queue base class.""" + + def __init__(self, channel, callback, tag, **kwargs): + """Init the queue. + + 'channel' is the amqp channel to use + 'callback' is the callback to call when messages are received + 'tag' is a unique ID for the consumer on the channel + + queue name, exchange name, and other kombu options are + passed in here as a dictionary. + """ + self.callback = callback + self.tag = str(tag) + self.kwargs = kwargs + self.queue = None + self.reconnect(channel) + + def reconnect(self, channel): + """Re-create the queue after a rabbit reconnect""" + self.channel = channel + self.kwargs['channel'] = channel + self.queue = kombu.entity.Queue(**self.kwargs) + self.queue.declare() + + def consume(self, *args, **kwargs): + """Consume from this queue. + If a callback is specified in kwargs, use that. Otherwise, + use the callback passed during __init__() + + The callback will be called if a message was read off of the + queue. + + If kwargs['nowait'] is True, then this call will block until + a message is read. + + Messages will automatically be acked if the callback doesn't + raise an exception + """ + + options = {'consumer_tag': self.tag} + options['nowait'] = kwargs.get('nowait', False) + callback = kwargs.get('callback', self.callback) + if not callback: + raise ValueError("No callback defined") + + def _callback(raw_message): + message = self.channel.message_to_python(raw_message) + callback(message.payload) + message.ack() + + self.queue.consume(*args, callback=_callback, **options) + + def cancel(self): + """Cancel the consuming from the queue, if it has started""" + try: + self.queue.cancel(self.tag) + except KeyError, e: + # NOTE(comstud): Kludge to get around a amqplib bug + if str(e) != "u'%s'" % self.tag: + raise + self.queue = None + + +class DirectQueue(QueueBase): + """Queue/consumer class for 'direct'""" + + def __init__(self, channel, msg_id, callback, tag, **kwargs): + """Init a 'direct' queue. + + 'channel' is the amqp channel to use + 'msg_id' is the msg_id to listen on + 'callback' is the callback to call when messages are received + 'tag' is a unique ID for the consumer on the channel + + Other kombu options may be passed + """ + # Default options + options = {'durable': False, + 'auto_delete': True, + 'exclusive': True} + options.update(kwargs) + exchange = kombu.entity.Exchange( + name=msg_id, + type='direct', + durable=options['durable'], + auto_delete=options['auto_delete']) + super(DirectQueue, self).__init__( + channel, + callback, + tag, + name=msg_id, + exchange=exchange, + routing_key=msg_id, + **options) + + +class TopicQueue(QueueBase): + """Queue/consumer class for 'topic'""" + + def __init__(self, channel, topic, callback, tag, **kwargs): + """Init a 'topic' queue. + + 'channel' is the amqp channel to use + 'topic' is the topic to listen on + 'callback' is the callback to call when messages are received + 'tag' is a unique ID for the consumer on the channel + + Other kombu options may be passed + """ + # Default options + options = {'durable': FLAGS.rabbit_durable_queues, + 'auto_delete': False, + 'exclusive': False} + options.update(kwargs) + exchange = kombu.entity.Exchange( + name=FLAGS.control_exchange, + type='topic', + durable=options['durable'], + auto_delete=options['auto_delete']) + super(TopicQueue, self).__init__( + channel, + callback, + tag, + name=topic, + exchange=exchange, + routing_key=topic, + **options) + + +class FanoutQueue(QueueBase): + """Queue/consumer class for 'fanout'""" + + def __init__(self, channel, topic, callback, tag, **kwargs): + """Init a 'fanout' queue. + + 'channel' is the amqp channel to use + 'topic' is the topic to listen on + 'callback' is the callback to call when messages are received + 'tag' is a unique ID for the consumer on the channel + + Other kombu options may be passed + """ + unique = uuid.uuid4().hex + exchange_name = '%s_fanout' % topic + queue_name = '%s_fanout_%s' % (topic, unique) + + # Default options + options = {'durable': False, + 'auto_delete': True, + 'exclusive': True} + options.update(kwargs) + exchange = kombu.entity.Exchange( + name=exchange_name, + type='fanout', + durable=options['durable'], + auto_delete=options['auto_delete']) + super(FanoutQueue, self).__init__( + channel, + callback, + tag, + name=queue_name, + exchange=exchange, + routing_key=topic, + **options) + + +class Publisher(object): + """Base Publisher class""" + + def __init__(self, channel, exchange_name, routing_key, **kwargs): + """Init the Publisher class with the exchange_name, routing_key, + and other options + """ + self.exchange_name = exchange_name + self.routing_key = routing_key + self.kwargs = kwargs + self.reconnect(channel) + + def reconnect(self, channel): + """Re-establish the Producer after a rabbit reconnection""" + self.exchange = kombu.entity.Exchange(name=self.exchange_name, + **self.kwargs) + self.producer = kombu.messaging.Producer(exchange=self.exchange, + channel=channel, routing_key=self.routing_key) + + def send(self, msg): + """Send a message""" + self.producer.publish(msg) + + +class DirectPublisher(Publisher): + """Publisher class for 'direct'""" + def __init__(self, channel, msg_id, **kwargs): + """init a 'direct' publisher. + + Kombu options may be passed as keyword args to override defaults + """ + + options = {'durable': False, + 'auto_delete': True, + 'exclusive': True} + options.update(kwargs) + super(DirectPublisher, self).__init__(channel, + msg_id, + msg_id, + type='direct', + **options) + + +class TopicPublisher(Publisher): + """Publisher class for 'topic'""" + def __init__(self, channel, topic, **kwargs): + """init a 'topic' publisher. + + Kombu options may be passed as keyword args to override defaults + """ + options = {'durable': FLAGS.rabbit_durable_queues, + 'auto_delete': False, + 'exclusive': False} + options.update(kwargs) + super(TopicPublisher, self).__init__(channel, + FLAGS.control_exchange, + topic, + type='topic', + **options) + + +class FanoutPublisher(Publisher): + """Publisher class for 'fanout'""" + def __init__(self, channel, topic, **kwargs): + """init a 'fanout' publisher. + + Kombu options may be passed as keyword args to override defaults + """ + options = {'durable': False, + 'auto_delete': True, + 'exclusive': True} + options.update(kwargs) + super(FanoutPublisher, self).__init__(channel, + '%s_fanout' % topic, + None, + type='fanout', + **options) + + +class Connection(object): + """Connection instance object.""" + + def __init__(self): + self.queues = [] + self.max_retries = FLAGS.rabbit_max_retries + self.interval_start = FLAGS.rabbit_retry_interval + self.interval_stepping = 0 + self.interval_max = FLAGS.rabbit_retry_interval + + self.params = dict(hostname=FLAGS.rabbit_host, + port=FLAGS.rabbit_port, + userid=FLAGS.rabbit_userid, + password=FLAGS.rabbit_password, + virtual_host=FLAGS.rabbit_virtual_host) + if FLAGS.fake_rabbit: + self.params['transport'] = 'memory' + self.connection = None + self.reconnect() + + def reconnect(self): + """Handles reconnecting and re-estblishing queues""" + if self.connection: + try: + self.connection.close() + except self.connection.connection_errors: + pass + time.sleep(1) + self.connection = kombu.connection.Connection(**self.params) + self.queue_num = itertools.count(1) + + try: + self.connection.ensure_connection(errback=self.connect_error, + max_retries=self.max_retries, + interval_start=self.interval_start, + interval_step=self.interval_stepping, + interval_max=self.interval_max) + except self.connection.connection_errors, e: + err_str = str(e) + max_retries = FLAGS.rabbit_max_retries + LOG.error(_('Unable to connect to AMQP server ' + 'after %(max_retries)d tries: %(err_str)s') % locals()) + # NOTE(comstud): Original carrot code exits after so many + # attempts, but I wonder if we should re-try indefinitely + sys.exit(1) + LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d' % + self.params)) + self.channel = self.connection.channel() + for consumer in self.queues: + consumer.reconnect(self.channel) + if self.queues: + LOG.debug(_("Re-established AMQP queues")) + + def get_channel(self): + """Convenience call for bin/clear_rabbit_queues""" + return self.channel + + def connect_error(self, exc, interval): + """Callback when there are connection re-tries by kombu""" + info = self.params.copy() + info['intv'] = interval + info['e'] = exc + LOG.error(_('AMQP server on %(hostname)s:%(port)d is' + ' unreachable: %(e)s. Trying again in %(intv)d' + ' seconds.') % info) + + def close(self): + """Close/release this connection""" + self.connection.release() + self.connection = None + + def reset(self): + """Reset a connection so it can be used again""" + self.channel.close() + self.channel = self.connection.channel() + self.queues = [] + + def create_queue(self, queue_cls, topic, callback): + """Create a queue using the class that was passed in and + add it to our list of queues used for consuming + """ + self.queues.append(queue_cls(self.channel, topic, callback, + self.queue_num.next())) + + def consume(self, limit=None): + """Consume from all queues""" + while True: + try: + queues_head = self.queues[:-1] + queues_tail = self.queues[-1] + for queue in queues_head: + queue.consume(nowait=True) + queues_tail.consume(nowait=False) + + for iteration in itertools.count(0): + if limit and iteration >= limit: + raise StopIteration + yield self.connection.drain_events() + except self.connection.connection_errors, e: + LOG.exception(_('Failed to consume message from queue: ' + '%s' % str(e))) + self.reconnect() + + def publisher_send(self, cls, topic, msg): + """Send to a publisher based on the publisher class""" + while True: + publisher = None + try: + publisher = cls(self.channel, topic) + publisher.send(msg) + return + except self.connection.connection_errors, e: + LOG.exception(_('Failed to publish message %s' % str(e))) + try: + self.reconnect() + if publisher: + publisher.reconnect(self.channel) + except self.connection.connection_errors, e: + pass + + def direct_consumer(self, topic, callback): + """Create a 'direct' queue. + In nova's use, this is generally a msg_id queue used for + responses for call/multicall + """ + self.create_queue(DirectQueue, topic, callback) + + def topic_consumer(self, topic, callback=None): + """Create a 'topic' queue.""" + self.create_queue(TopicQueue, topic, callback) + + def fanout_consumer(self, topic, callback): + """Create a 'fanout' queue""" + self.create_queue(FanoutQueue, topic, callback) + + def direct_send(self, msg_id, msg): + """Send a 'direct' message""" + self.publisher_send(DirectPublisher, msg_id, msg) + + def topic_send(self, topic, msg): + """Send a 'topic' message""" + self.publisher_send(TopicPublisher, topic, msg) + + def fanout_send(self, topic, msg): + """Send a 'fanout' message""" + self.publisher_send(FanoutPublisher, topic, msg) From 149703d4354c693ec31488ad05c10e4f09dac9a0 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Fri, 26 Aug 2011 15:59:15 -0700 Subject: [PATCH 16/46] more work done to restore original rpc interfaces. --- nova/rpc/FIXME | 2 + nova/rpc/__init__.py | 3 +- nova/rpc/impl_kombu.py | 307 ++++++++++++++++++++++++++++++++++++++++- 3 files changed, 306 insertions(+), 6 deletions(-) create mode 100644 nova/rpc/FIXME diff --git a/nova/rpc/FIXME b/nova/rpc/FIXME new file mode 100644 index 000000000..704081802 --- /dev/null +++ b/nova/rpc/FIXME @@ -0,0 +1,2 @@ +Move some code duplication between carrot/kombu into common.py +The other FIXMEs in __init__.py and impl_kombu.py diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py index f102cf0fa..9371c2ab3 100644 --- a/nova/rpc/__init__.py +++ b/nova/rpc/__init__.py @@ -27,7 +27,7 @@ flags.DEFINE_string('rpc_backend', "The messaging module to use, defaults to carrot.") impl_table = {'kombu': 'nova.rpc.impl_kombu', - 'amqp': 'nova.rpc.impl_kombu'} + 'amqp': 'nova.rpc.impl_kombu', 'carrot': 'nova.rpc.impl_carrot'} @@ -46,6 +46,7 @@ def create_consumer(conn, topic, proxy, fanout=False): def create_consumer_set(conn, consumers): + # FIXME(comstud): replace however necessary return RPCIMPL.ConsumerSet(connection=conn, consumer_list=consumers) diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index e609227c9..a222bb885 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -30,6 +30,11 @@ import uuid FLAGS = flags.FLAGS LOG = logging.getLogger('nova.rpc') +flags.DEFINE_integer('rpc_conn_pool_size', 30, + 'Size of RPC connection pool') +flags.DEFINE_integer('rpc_thread_pool_size', 1024, + 'Size of RPC thread pool') + class QueueBase(object): """Queue base class.""" @@ -298,6 +303,16 @@ class Connection(object): self.connection = None self.reconnect() + @classmethod + def instance(cls, new=True): + """Returns the instance.""" + if new or not hasattr(cls, '_instance'): + if new: + return cls() + else: + cls._instance = cls() + return cls._instance + def reconnect(self): """Handles reconnecting and re-estblishing queues""" if self.connection: @@ -359,8 +374,10 @@ class Connection(object): """Create a queue using the class that was passed in and add it to our list of queues used for consuming """ - self.queues.append(queue_cls(self.channel, topic, callback, - self.queue_num.next())) + queue = queue_cls(self.channel, topic, callback, + self.queue_num.next()) + self.queues.append(queue) + return queue def consume(self, limit=None): """Consume from all queues""" @@ -403,15 +420,15 @@ class Connection(object): In nova's use, this is generally a msg_id queue used for responses for call/multicall """ - self.create_queue(DirectQueue, topic, callback) + return self.create_queue(DirectQueue, topic, callback) def topic_consumer(self, topic, callback=None): """Create a 'topic' queue.""" - self.create_queue(TopicQueue, topic, callback) + return self.create_queue(TopicQueue, topic, callback) def fanout_consumer(self, topic, callback): """Create a 'fanout' queue""" - self.create_queue(FanoutQueue, topic, callback) + return self.create_queue(FanoutQueue, topic, callback) def direct_send(self, msg_id, msg): """Send a 'direct' message""" @@ -424,3 +441,283 @@ class Connection(object): def fanout_send(self, topic, msg): """Send a 'fanout' message""" self.publisher_send(FanoutPublisher, topic, msg) + + +class Pool(pools.Pool): + """Class that implements a Pool of Connections.""" + + # TODO(comstud): Timeout connections not used in a while + def create(self): + LOG.debug('Creating new connection') + return RPCIMPL.Connection() + +# Create a ConnectionPool to use for RPC calls. We'll order the +# pool as a stack (LIFO), so that we can potentially loop through and +# timeout old unused connections at some point +ConnectionPool = Pool( + max_size=FLAGS.rpc_conn_pool_size, + order_as_stack=True) + + +class ConnectionContext(object): + def __init__(self, pooled=True): + self.connection = None + if pooled: + self.connection = ConnectionPool.get() + else: + self.connection = RPCIMPL.Connection() + self.pooled = pooled + + def __enter__(self): + return self + + def _done(self): + if self.connection: + if self.pooled: + # Reset the connection so it's ready for the next caller + # to grab from the pool + self.connection.reset() + ConnectionPool.put(self.connection) + else: + try: + self.connection.close() + except Exception: + # There's apparently a bug in kombu 'memory' transport + # which causes an assert failure. + # But, we probably want to ignore all exceptions when + # trying to close a connection, anyway... + pass + self.connection = None + + def __exit__(self, t, v, tb): + """end if 'with' statement. We're done here.""" + self._done() + + def __del__(self): + """Put Connection back into the pool if this ConnectionContext + is being deleted + """ + self._done() + + def close(self): + self._done() + + def __getattr__(self, key): + if self.connection: + return getattr(self.connection, key) + else: + raise exception.InvalidRPCConnectionReuse() + + +class ProxyCallback(object): + """Calls methods on a proxy object based on method and args.""" + + def __init__(self, proxy): + self.proxy = proxy + self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size) + + def __call__(self, message_data): + """Consumer callback to call a method on a proxy object. + + Parses the message for validity and fires off a thread to call the + proxy object method. + + Message data should be a dictionary with two keys: + method: string representing the method to call + args: dictionary of arg: value + + Example: {'method': 'echo', 'args': {'value': 42}} + + """ + LOG.debug(_('received %s') % message_data) + ctxt = _unpack_context(message_data) + method = message_data.get('method') + args = message_data.get('args', {}) + if not method: + LOG.warn(_('no method for message: %s') % message_data) + ctxt.reply(_('No method for message: %s') % message_data) + return + self.pool.spawn_n(self._process_data, ctxt, method, args) + + @exception.wrap_exception() + def _process_data(self, ctxt, method, args): + """Thread that maigcally looks for a method on the proxy + object and calls it. + """ + + node_func = getattr(self.proxy, str(method)) + node_args = dict((str(k), v) for k, v in args.iteritems()) + # NOTE(vish): magic is fun! + try: + rval = node_func(context=ctxt, **node_args) + # Check if the result was a generator + if isinstance(rval, types.GeneratorType): + for x in rval: + ctxt.reply(x, None) + else: + ctxt.reply(rval, None) + # This final None tells multicall that it is done. + ctxt.reply(None, None) + except Exception as e: + logging.exception('Exception during message handling') + ctxt.reply(None, sys.exc_info()) + return + + +def _unpack_context(msg): + """Unpack context from msg.""" + context_dict = {} + for key in list(msg.keys()): + # NOTE(vish): Some versions of python don't like unicode keys + # in kwargs. + key = str(key) + if key.startswith('_context_'): + value = msg.pop(key) + context_dict[key[9:]] = value + context_dict['msg_id'] = msg.pop('_msg_id', None) + LOG.debug(_('unpacked context: %s'), context_dict) + return RpcContext.from_dict(context_dict) + + +def _pack_context(msg, context): + """Pack context into msg. + + Values for message keys need to be less than 255 chars, so we pull + context out into a bunch of separate keys. If we want to support + more arguments in rabbit messages, we may want to do the same + for args at some point. + + """ + context_d = dict([('_context_%s' % key, value) + for (key, value) in context.to_dict().iteritems()]) + msg.update(context_d) + + +class RpcContext(context.RequestContext): + def __init__(self, *args, **kwargs): + msg_id = kwargs.pop('msg_id', None) + self.msg_id = msg_id + super(RpcContext, self).__init__(*args, **kwargs) + + def reply(self, *args, **kwargs): + if self.msg_id: + msg_reply(self.msg_id, *args, **kwargs) + + +class MulticallWaiter(object): + def __init__(self, connection): + self._connection = connection + self._iterator = connection.consume() + self._result = None + self._done = False + + def done(self): + self._done = True + self._connection = None + + def __call__(self, data): + """The consume() callback will call this. Store the result.""" + if data['failure']: + self._result = RemoteError(*data['failure']) + else: + self._result = data['result'] + + def __iter__(self): + if self._done: + raise StopIteration + while True: + self._iterator.next() + result = self._result + if isinstance(result, Exception): + self.done() + raise result + if result == None: + self.done() + raise StopIteration + yield result + + +def create_consumer(conn, topic, proxy, fanout=False): + """Create a consumer that calls a method in a proxy object""" + if fanout: + return conn.fanout_consumer(topic, ProxyCallback(proxy)) + else: + return conn.topic_consumer(topic, ProxyCallback(proxy)) + + +def create_consumer_set(conn, consumers): + # FIXME(comstud): Replace this however necessary + # Returns an object that you can call .wait() on to consume + # all queues? + # Needs to have a .close() which will stop consuming? + # Needs to also have an attach_to_eventlet method for tests? + raise NotImplemented + + +def multicall(context, topic, msg): + """Make a call that returns multiple times.""" + # Can't use 'with' for multicall, as it returns an iterator + # that will continue to use the connection. When it's done, + # connection.close() will get called which will put it back into + # the pool + LOG.debug(_('Making asynchronous call on %s ...'), topic) + msg_id = uuid.uuid4().hex + msg.update({'_msg_id': msg_id}) + LOG.debug(_('MSG_ID is %s') % (msg_id)) + _pack_context(msg, context) + + conn = ConnectionContext() + wait_msg = MulticallWaiter(conn) + conn.direct_consumer(msg_id, wait_msg) + conn.topic_send(topic, msg) + + return wait_msg + + +def call(context, topic, msg): + """Sends a message on a topic and wait for a response.""" + rv = multicall(context, topic, msg) + # NOTE(vish): return the last result from the multicall + rv = list(rv) + if not rv: + return + return rv[-1] + + +def cast(context, topic, msg): + """Sends a message on a topic without waiting for a response.""" + LOG.debug(_('Making asynchronous cast on %s...'), topic) + _pack_context(msg, context) + with ConnectionContext() as conn: + conn.topic_send(topic, msg) + + +def fanout_cast(context, topic, msg): + """Sends a message on a fanout exchange without waiting for a response.""" + LOG.debug(_('Making asynchronous fanout cast...')) + _pack_context(msg, context) + with ConnectionContext() as conn: + conn.fanout_send(topic, msg) + + +def msg_reply(msg_id, reply=None, failure=None): + """Sends a reply or an error on the channel signified by msg_id. + + Failure should be a sys.exc_info() tuple. + + """ + with ConnectionContext() as conn: + if failure: + message = str(failure[1]) + tb = traceback.format_exception(*failure) + LOG.error(_("Returning exception %s to caller"), message) + LOG.error(tb) + failure = (failure[0].__name__, str(failure[1]), tb) + + try: + msg = {'result': reply, 'failure': failure} + except TypeError: + msg = {'result': dict((k, repr(v)) + for k, v in reply.__dict__.iteritems()), + 'failure': failure} + conn.direct_send(msg_id, msg) From da631b68be6eebb79926de361e2109861487ce14 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Fri, 26 Aug 2011 16:00:50 -0700 Subject: [PATCH 17/46] more fixes --- nova/rpc/impl_kombu.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index a222bb885..cfef421c6 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -15,7 +15,7 @@ # under the License. from nova import flags -from nova import log as logging +from nova.rpc.common import RemoteError, LOG import kombu import kombu.entity @@ -28,7 +28,6 @@ import uuid FLAGS = flags.FLAGS -LOG = logging.getLogger('nova.rpc') flags.DEFINE_integer('rpc_conn_pool_size', 30, 'Size of RPC connection pool') @@ -559,7 +558,7 @@ class ProxyCallback(object): # This final None tells multicall that it is done. ctxt.reply(None, None) except Exception as e: - logging.exception('Exception during message handling') + LOG.exception('Exception during message handling') ctxt.reply(None, sys.exc_info()) return From e68be229a22f783d8644d82cdfd16f6f6d504407 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Fri, 26 Aug 2011 16:04:34 -0700 Subject: [PATCH 18/46] flag for kombu connection backoff on retries --- nova/flags.py | 1 + nova/rpc/impl_kombu.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/flags.py b/nova/flags.py index 95000df1b..ac70386e7 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -303,6 +303,7 @@ DEFINE_string('rabbit_userid', 'guest', 'rabbit userid') DEFINE_string('rabbit_password', 'guest', 'rabbit password') DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host') DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval') +DEFINE_integer('rabbit_interval_stepping', 2, 'rabbit connection retry backoff in seconds') DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts') DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to') DEFINE_boolean('rabbit_durable_queues', False, 'use durable queues') diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index cfef421c6..65199808e 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -289,7 +289,7 @@ class Connection(object): self.queues = [] self.max_retries = FLAGS.rabbit_max_retries self.interval_start = FLAGS.rabbit_retry_interval - self.interval_stepping = 0 + self.interval_stepping = FLAGS.rabbit_interval_stepping self.interval_max = FLAGS.rabbit_retry_interval self.params = dict(hostname=FLAGS.rabbit_host, From e95c5f9ee51f8bd137abc1f41c5c05dfddb17a80 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Sat, 27 Aug 2011 14:07:55 -0700 Subject: [PATCH 19/46] Default rabbit max_retries to forever Modify carrot code to handle retry backoffs and obey max_retries = forever Fix some kombu issues from cut-n-paste Service should make sure to close the RPC connection --- nova/flags.py | 6 +++--- nova/rpc/impl_carrot.py | 28 +++++++++++++++++++++------- nova/rpc/impl_kombu.py | 35 +++++++++++++++++------------------ nova/service.py | 6 ++++++ 4 files changed, 47 insertions(+), 28 deletions(-) diff --git a/nova/flags.py b/nova/flags.py index ac70386e7..e09b4721a 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -302,9 +302,9 @@ DEFINE_bool('rabbit_use_ssl', False, 'connect over SSL') DEFINE_string('rabbit_userid', 'guest', 'rabbit userid') DEFINE_string('rabbit_password', 'guest', 'rabbit password') DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host') -DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval') -DEFINE_integer('rabbit_interval_stepping', 2, 'rabbit connection retry backoff in seconds') -DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts') +DEFINE_integer('rabbit_retry_interval', 1, 'rabbit connection retry interval to start') +DEFINE_integer('rabbit_retry_backoff', 2, 'rabbit connection retry backoff in seconds') +DEFINE_integer('rabbit_max_retries', 0, 'maximum rabbit connection attempts (0=try forever)') DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to') DEFINE_boolean('rabbit_durable_queues', False, 'use durable queues') DEFINE_list('enabled_apis', ['ec2', 'osapi'], diff --git a/nova/rpc/impl_carrot.py b/nova/rpc/impl_carrot.py index 529f98722..117489bc6 100644 --- a/nova/rpc/impl_carrot.py +++ b/nova/rpc/impl_carrot.py @@ -119,25 +119,34 @@ class Consumer(messaging.Consumer): """ def __init__(self, *args, **kwargs): - for i in xrange(FLAGS.rabbit_max_retries): - if i > 0: - time.sleep(FLAGS.rabbit_retry_interval) + max_retries = FALGS.rabbit_max_retries + sleep_time = FLAGS.rabbit_retry_interval + tries = 0 + while True: + tries += 1 + if tries > 1: + time.sleep(sleep_time) + # backoff for next retry attempt.. if there is one + sleep_time += FLAGS.rabbit_retry_backoff + if sleep_time > 30: + sleep_time = 30 try: super(Consumer, self).__init__(*args, **kwargs) self.failed_connection = False break except Exception as e: # Catching all because carrot sucks + self.failed_connection = True + if max_retries > 0 and tries == max_retries: + break fl_host = FLAGS.rabbit_host fl_port = FLAGS.rabbit_port - fl_intv = FLAGS.rabbit_retry_interval + fl_intv = sleep_time LOG.error(_('AMQP server on %(fl_host)s:%(fl_port)d is' ' unreachable: %(e)s. Trying again in %(fl_intv)d' ' seconds.') % locals()) - self.failed_connection = True if self.failed_connection: LOG.error(_('Unable to connect to AMQP server ' - 'after %d tries. Shutting down.'), - FLAGS.rabbit_max_retries) + 'after %(tries)d tries. Shutting down.') % locals()) sys.exit(1) def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): @@ -520,6 +529,11 @@ class MulticallWaiter(object): yield result +def create_connection(new=True): + """Create a connection""" + return Connection.instance(new=new) + + def create_consumer(conn, topic, proxy, fanout=False): """Create a consumer that calls methods in the proxy""" if fanout: diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index 65199808e..db839dd2a 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -288,9 +288,13 @@ class Connection(object): def __init__(self): self.queues = [] self.max_retries = FLAGS.rabbit_max_retries + # Try forever? + if self.max_retries <= 0: + self.max_retries = None self.interval_start = FLAGS.rabbit_retry_interval - self.interval_stepping = FLAGS.rabbit_interval_stepping - self.interval_max = FLAGS.rabbit_retry_interval + self.interval_stepping = FLAGS.rabbit_retry_backoff + # max retry-interval = 30 seconds + self.interval_max = 30 self.params = dict(hostname=FLAGS.rabbit_host, port=FLAGS.rabbit_port, @@ -302,16 +306,6 @@ class Connection(object): self.connection = None self.reconnect() - @classmethod - def instance(cls, new=True): - """Returns the instance.""" - if new or not hasattr(cls, '_instance'): - if new: - return cls() - else: - cls._instance = cls() - return cls._instance - def reconnect(self): """Handles reconnecting and re-estblishing queues""" if self.connection: @@ -330,12 +324,12 @@ class Connection(object): interval_step=self.interval_stepping, interval_max=self.interval_max) except self.connection.connection_errors, e: + # We should only get here if max_retries is set. We'll go + # ahead and exit in this case. err_str = str(e) - max_retries = FLAGS.rabbit_max_retries + max_retries = self.max_retries LOG.error(_('Unable to connect to AMQP server ' 'after %(max_retries)d tries: %(err_str)s') % locals()) - # NOTE(comstud): Original carrot code exits after so many - # attempts, but I wonder if we should re-try indefinitely sys.exit(1) LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d' % self.params)) @@ -448,7 +442,7 @@ class Pool(pools.Pool): # TODO(comstud): Timeout connections not used in a while def create(self): LOG.debug('Creating new connection') - return RPCIMPL.Connection() + return Connection() # Create a ConnectionPool to use for RPC calls. We'll order the # pool as a stack (LIFO), so that we can potentially loop through and @@ -464,7 +458,7 @@ class ConnectionContext(object): if pooled: self.connection = ConnectionPool.get() else: - self.connection = RPCIMPL.Connection() + self.connection = Connection() self.pooled = pooled def __enter__(self): @@ -636,6 +630,11 @@ class MulticallWaiter(object): yield result +def create_connection(new=True): + """Create a connection""" + return ConnectionContext(pooled=not new) + + def create_consumer(conn, topic, proxy, fanout=False): """Create a consumer that calls a method in a proxy object""" if fanout: @@ -649,7 +648,7 @@ def create_consumer_set(conn, consumers): # Returns an object that you can call .wait() on to consume # all queues? # Needs to have a .close() which will stop consuming? - # Needs to also have an attach_to_eventlet method for tests? + # Needs to also have an method for tests? raise NotImplemented diff --git a/nova/service.py b/nova/service.py index 959e79052..a872a36ee 100644 --- a/nova/service.py +++ b/nova/service.py @@ -242,6 +242,12 @@ class Service(object): self.consumer_set_thread.wait() except greenlet.GreenletExit: pass + # Try to shut the connection down, but if we get any sort of + # errors, go ahead and ignore them.. as we're shutting down anyway + try: + self.conn.close() + except Exception: + pass for x in self.timers: try: x.stop() From 75e79c96fd0594e0102b1034d6e43f74885fbd2f Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Sat, 27 Aug 2011 21:33:14 -0700 Subject: [PATCH 20/46] fix FALGS typo --- nova/rpc/impl_carrot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/rpc/impl_carrot.py b/nova/rpc/impl_carrot.py index 117489bc6..40097e10e 100644 --- a/nova/rpc/impl_carrot.py +++ b/nova/rpc/impl_carrot.py @@ -119,7 +119,7 @@ class Consumer(messaging.Consumer): """ def __init__(self, *args, **kwargs): - max_retries = FALGS.rabbit_max_retries + max_retries = FLAGS.rabbit_max_retries sleep_time = FLAGS.rabbit_retry_interval tries = 0 while True: From 7d60224a4c9df2c217f39fde984fdaa20a4d6709 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Sun, 28 Aug 2011 17:33:11 -0700 Subject: [PATCH 21/46] start to rework some consumer stuff --- nova/rpc/impl_kombu.py | 127 ++++++++++++++++++++++++----------------- nova/service.py | 24 ++------ 2 files changed, 81 insertions(+), 70 deletions(-) diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index db839dd2a..01871606c 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -35,11 +35,11 @@ flags.DEFINE_integer('rpc_thread_pool_size', 1024, 'Size of RPC thread pool') -class QueueBase(object): - """Queue base class.""" +class ConsumerBase(object): + """Consumer base class.""" def __init__(self, channel, callback, tag, **kwargs): - """Init the queue. + """Declare a queue on an amqp channel. 'channel' is the amqp channel to use 'callback' is the callback to call when messages are received @@ -55,20 +55,21 @@ class QueueBase(object): self.reconnect(channel) def reconnect(self, channel): - """Re-create the queue after a rabbit reconnect""" + """Re-declare the queue after a rabbit reconnect""" self.channel = channel self.kwargs['channel'] = channel self.queue = kombu.entity.Queue(**self.kwargs) self.queue.declare() def consume(self, *args, **kwargs): - """Consume from this queue. + """Actually declare the consumer on the amqp channel. This will + start the flow of messages from the queue. Using the + Connection.iterconsume() iterator will process the messages, + calling the appropriate callback. + If a callback is specified in kwargs, use that. Otherwise, use the callback passed during __init__() - The callback will be called if a message was read off of the - queue. - If kwargs['nowait'] is True, then this call will block until a message is read. @@ -100,7 +101,7 @@ class QueueBase(object): self.queue = None -class DirectQueue(QueueBase): +class DirectConsumer(ConsumerBase): """Queue/consumer class for 'direct'""" def __init__(self, channel, msg_id, callback, tag, **kwargs): @@ -123,7 +124,7 @@ class DirectQueue(QueueBase): type='direct', durable=options['durable'], auto_delete=options['auto_delete']) - super(DirectQueue, self).__init__( + super(DirectConsumer, self).__init__( channel, callback, tag, @@ -133,8 +134,8 @@ class DirectQueue(QueueBase): **options) -class TopicQueue(QueueBase): - """Queue/consumer class for 'topic'""" +class TopicConsumer(ConsumerBase): + """Consumer class for 'topic'""" def __init__(self, channel, topic, callback, tag, **kwargs): """Init a 'topic' queue. @@ -156,7 +157,7 @@ class TopicQueue(QueueBase): type='topic', durable=options['durable'], auto_delete=options['auto_delete']) - super(TopicQueue, self).__init__( + super(TopicConsumer, self).__init__( channel, callback, tag, @@ -166,8 +167,8 @@ class TopicQueue(QueueBase): **options) -class FanoutQueue(QueueBase): - """Queue/consumer class for 'fanout'""" +class FanoutConsumer(ConsumerBase): + """Consumer class for 'fanout'""" def __init__(self, channel, topic, callback, tag, **kwargs): """Init a 'fanout' queue. @@ -193,7 +194,7 @@ class FanoutQueue(QueueBase): type='fanout', durable=options['durable'], auto_delete=options['auto_delete']) - super(FanoutQueue, self).__init__( + super(FanoutConsumer, self).__init__( channel, callback, tag, @@ -286,7 +287,8 @@ class Connection(object): """Connection instance object.""" def __init__(self): - self.queues = [] + self.consumers = [] + self.consumer_thread = None self.max_retries = FLAGS.rabbit_max_retries # Try forever? if self.max_retries <= 0: @@ -334,9 +336,9 @@ class Connection(object): LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d' % self.params)) self.channel = self.connection.channel() - for consumer in self.queues: + for consumer in self.consumers: consumer.reconnect(self.channel) - if self.queues: + if self.consumers: LOG.debug(_("Re-established AMQP queues")) def get_channel(self): @@ -354,30 +356,32 @@ class Connection(object): def close(self): """Close/release this connection""" + self.cancel_consumer_thread() self.connection.release() self.connection = None def reset(self): """Reset a connection so it can be used again""" + self.cancel_consumer_thread() self.channel.close() self.channel = self.connection.channel() - self.queues = [] + self.consumers = [] - def create_queue(self, queue_cls, topic, callback): - """Create a queue using the class that was passed in and - add it to our list of queues used for consuming + def declare_consumer(self, consumer_cls, topic, callback): + """Create a Consumer using the class that was passed in and + add it to our list of consumers """ - queue = queue_cls(self.channel, topic, callback, - self.queue_num.next()) - self.queues.append(queue) - return queue + consumer = consumer_cls(self.channel, topic, callback, + self.consumer_num.next()) + self.consumers.append(consumer) + return consumer - def consume(self, limit=None): - """Consume from all queues""" + def iterconsume(self, limit=None): + """Return an iterator that will consume from all queues/consumers""" while True: try: - queues_head = self.queues[:-1] - queues_tail = self.queues[-1] + queues_head = self.consumers[:-1] + queues_tail = self.consumers[-1] for queue in queues_head: queue.consume(nowait=True) queues_tail.consume(nowait=False) @@ -391,6 +395,36 @@ class Connection(object): '%s' % str(e))) self.reconnect() + def consume(self, limit=None): + """Consume from all queues/consumers""" + it = self.iterconsume(limit=limit) + while True: + try: + it.next() + except StopIteration: + return + + def consume_in_thread(self): + """Consumer from all queues/consumers in a greenthread""" + def _consumer_thread(): + try: + self.consume() + except greenlet.GreenletExit: + return + if not self.consumer_thread: + self.consumer_thread = eventlet.spawn(_consumer_thread) + return self.consumer_thread + + def cancel_consumer_thread(self): + """Cancel a consumer thread""" + if self.consumer_thread: + self.consumer_thread.kill() + try: + self.consumer_thread.wait() + except greenlet.GreenletExit: + pass + self.consumer_thread = None + def publisher_send(self, cls, topic, msg): """Send to a publisher based on the publisher class""" while True: @@ -408,20 +442,20 @@ class Connection(object): except self.connection.connection_errors, e: pass - def direct_consumer(self, topic, callback): + def declare_direct_consumer(self, topic, callback): """Create a 'direct' queue. In nova's use, this is generally a msg_id queue used for responses for call/multicall """ - return self.create_queue(DirectQueue, topic, callback) + self.declare_consumer(DirectConsumer, topic, callback) - def topic_consumer(self, topic, callback=None): - """Create a 'topic' queue.""" - return self.create_queue(TopicQueue, topic, callback) + def declare_topic_consumer(self, topic, callback=None): + """Create a 'topic' consumer.""" + self.declare_consumer(TopicConsumer, topic, callback) - def fanout_consumer(self, topic, callback): - """Create a 'fanout' queue""" - return self.create_queue(FanoutQueue, topic, callback) + def declare_fanout_consumer(self, topic, callback): + """Create a 'fanout' consumer""" + self.declare_consumer(FanoutConsumer, topic, callback) def direct_send(self, msg_id, msg): """Send a 'direct' message""" @@ -638,18 +672,9 @@ def create_connection(new=True): def create_consumer(conn, topic, proxy, fanout=False): """Create a consumer that calls a method in a proxy object""" if fanout: - return conn.fanout_consumer(topic, ProxyCallback(proxy)) + conn.declare_fanout_consumer(topic, ProxyCallback(proxy)) else: - return conn.topic_consumer(topic, ProxyCallback(proxy)) - - -def create_consumer_set(conn, consumers): - # FIXME(comstud): Replace this however necessary - # Returns an object that you can call .wait() on to consume - # all queues? - # Needs to have a .close() which will stop consuming? - # Needs to also have an method for tests? - raise NotImplemented + conn.declare_topic_consumer(topic, ProxyCallback(proxy)) def multicall(context, topic, msg): @@ -666,7 +691,7 @@ def multicall(context, topic, msg): conn = ConnectionContext() wait_msg = MulticallWaiter(conn) - conn.direct_consumer(msg_id, wait_msg) + conn.declare_direct_consumer(msg_id, wait_msg) conn.topic_send(topic, msg) return wait_msg diff --git a/nova/service.py b/nova/service.py index a872a36ee..ab7925eb3 100644 --- a/nova/service.py +++ b/nova/service.py @@ -153,26 +153,17 @@ class Service(object): self.topic) # Share this same connection for these Consumers - consumer_all = rpc.create_consumer(self.conn, self.topic, self, + rpc.create_consumer(self.conn, self.topic, self, fanout=False) node_topic = '%s.%s' % (self.topic, self.host) - consumer_node = rpc.create_consumer(self.conn, node_topic, self, + rpc.create_consumer(self.conn, node_topic, self, fanout=False) - fanout = rpc.create_consumer(self.conn, self.topic, self, fanout=True) + rpc.create_consumer(self.conn, self.topic, self, fanout=True) - consumers = [consumer_all, consumer_node, fanout] - consumer_set = rpc.create_consumer_set(self.conn, consumers) - - # Wait forever, processing these consumers - def _wait(): - try: - consumer_set.wait() - finally: - consumer_set.close() - - self.consumer_set_thread = eventlet.spawn(_wait) + # Consume from all consumers in a thread + self.conn.consume_in_thread() if self.report_interval: pulse = utils.LoopingCall(self.report_state) @@ -237,11 +228,6 @@ class Service(object): logging.warn(_('Service killed that has no database entry')) def stop(self): - self.consumer_set_thread.kill() - try: - self.consumer_set_thread.wait() - except greenlet.GreenletExit: - pass # Try to shut the connection down, but if we get any sort of # errors, go ahead and ignore them.. as we're shutting down anyway try: From 7bfe3aabc2bf82bf297d24f27d56968daaee2f1e Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Sun, 28 Aug 2011 18:17:21 -0700 Subject: [PATCH 22/46] fix test_rpc and kombu stuff --- nova/rpc/FIXME | 2 -- nova/rpc/__init__.py | 7 +----- nova/rpc/impl_carrot.py | 56 ++++++++++++++++++++++++++++++++++------- nova/rpc/impl_kombu.py | 21 ++++++++++++---- nova/tests/test_rpc.py | 25 ++++++++++-------- 5 files changed, 79 insertions(+), 32 deletions(-) delete mode 100644 nova/rpc/FIXME diff --git a/nova/rpc/FIXME b/nova/rpc/FIXME deleted file mode 100644 index 704081802..000000000 --- a/nova/rpc/FIXME +++ /dev/null @@ -1,2 +0,0 @@ -Move some code duplication between carrot/kombu into common.py -The other FIXMEs in __init__.py and impl_kombu.py diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py index 9371c2ab3..10b69c8b5 100644 --- a/nova/rpc/__init__.py +++ b/nova/rpc/__init__.py @@ -38,18 +38,13 @@ RPCIMPL = import_object(impl_table.get(FLAGS.rpc_backend, def create_connection(new=True): - return RPCIMPL.Connection.instance(new=True) + return RPCIMPL.create_connection(new=new) def create_consumer(conn, topic, proxy, fanout=False): return RPCIMPL.create_consumer(conn, topic, proxy, fanout) -def create_consumer_set(conn, consumers): - # FIXME(comstud): replace however necessary - return RPCIMPL.ConsumerSet(connection=conn, consumer_list=consumers) - - def call(context, topic, msg): return RPCIMPL.call(context, topic, msg) diff --git a/nova/rpc/impl_carrot.py b/nova/rpc/impl_carrot.py index 40097e10e..efff788a0 100644 --- a/nova/rpc/impl_carrot.py +++ b/nova/rpc/impl_carrot.py @@ -33,6 +33,7 @@ import uuid from carrot import connection as carrot_connection from carrot import messaging +import eventlet from eventlet import greenpool from eventlet import pools from eventlet import queue @@ -42,10 +43,10 @@ from nova import context from nova import exception from nova import fakerabbit from nova import flags -from nova import log as logging -from nova import utils from nova.rpc.common import RemoteError, LOG +# Needed for tests +eventlet.monkey_patch() FLAGS = flags.FLAGS flags.DEFINE_integer('rpc_thread_pool_size', 1024, @@ -57,6 +58,11 @@ flags.DEFINE_integer('rpc_conn_pool_size', 30, class Connection(carrot_connection.BrokerConnection): """Connection instance object.""" + def __init__(self, *args, **kwargs): + super(Connection, self).__init__(*args, **kwargs) + self._rpc_consumers = [] + self._rpc_consumer_thread = None + @classmethod def instance(cls, new=True): """Returns the instance.""" @@ -94,6 +100,42 @@ class Connection(carrot_connection.BrokerConnection): pass return cls.instance() + def close(self): + self.cancel_consumer_thread() + for consumer in self._rpc_consumers: + try: + consumer.close() + except Exception: + # ignore all errors + pass + self._rpc_consumers = [] + super(Connection, self).close() + + def consume_in_thread(self): + """Consumer from all queues/consumers in a greenthread""" + + consumer_set = ConsumerSet(connection=self, + consumer_list=self._rpc_consumers) + + def _consumer_thread(): + try: + consumer_set.wait() + except greenlet.GreenletExit: + return + if not self._rpc_consumer_thread: + self._rpc_consumer_thread = eventlet.spawn(_consumer_thread) + return self._rpc_consumer_thread + + def cancel_consumer_thread(self): + """Cancel a consumer thread""" + if self._rpc_consumer_thread: + self._rpc_consumer_thread.kill() + try: + self._rpc_consumer_thread.wait() + except greenlet.GreenletExit: + pass + self._rpc_consumer_thread = None + class Pool(pools.Pool): """Class that implements a Pool of Connections.""" @@ -119,6 +161,7 @@ class Consumer(messaging.Consumer): """ def __init__(self, *args, **kwargs): + connection = kwargs.get('connection') max_retries = FLAGS.rabbit_max_retries sleep_time = FLAGS.rabbit_retry_interval tries = 0 @@ -148,6 +191,7 @@ class Consumer(messaging.Consumer): LOG.error(_('Unable to connect to AMQP server ' 'after %(tries)d tries. Shutting down.') % locals()) sys.exit(1) + connection._rpc_consumers.append(self) def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): """Wraps the parent fetch with some logic for failed connection.""" @@ -175,12 +219,6 @@ class Consumer(messaging.Consumer): LOG.exception(_('Failed to fetch message from queue: %s' % e)) self.failed_connection = True - def attach_to_eventlet(self): - """Only needed for unit tests!""" - timer = utils.LoopingCall(self.fetch, enable_callbacks=True) - timer.start(0.1) - return timer - class AdapterConsumer(Consumer): """Calls methods on a proxy object based on method and args.""" @@ -251,7 +289,7 @@ class AdapterConsumer(Consumer): # NOTE(vish): this iterates through the generator list(rval) except Exception as e: - logging.exception('Exception during message handling') + LOG.exception('Exception during message handling') if msg_id: msg_reply(msg_id, None, sys.exc_info()) return diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index 01871606c..bd83bc520 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -14,9 +14,6 @@ # License for the specific language governing permissions and limitations # under the License. -from nova import flags -from nova.rpc.common import RemoteError, LOG - import kombu import kombu.entity import kombu.messaging @@ -24,8 +21,22 @@ import kombu.connection import itertools import sys import time +import traceback +import types import uuid +import eventlet +from eventlet import greenpool +from eventlet import pools +import greenlet + +from nova import context +from nova import exception +from nova import flags +from nova.rpc.common import RemoteError, LOG + +# Needed for tests +eventlet.monkey_patch() FLAGS = flags.FLAGS @@ -317,7 +328,7 @@ class Connection(object): pass time.sleep(1) self.connection = kombu.connection.Connection(**self.params) - self.queue_num = itertools.count(1) + self.consumer_num = itertools.count(1) try: self.connection.ensure_connection(errback=self.connect_error, @@ -634,7 +645,7 @@ class RpcContext(context.RequestContext): class MulticallWaiter(object): def __init__(self, connection): self._connection = connection - self._iterator = connection.consume() + self._iterator = connection.iterconsume() self._result = None self._done = False diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index ba9c0a859..2b9922491 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -33,13 +33,17 @@ class RpcTestCase(test.TestCase): super(RpcTestCase, self).setUp() self.conn = rpc.create_connection(True) self.receiver = TestReceiver() - self.consumer = rpc.create_consumer(self.conn, - 'test', - self.receiver, - False) - self.consumer.attach_to_eventlet() + rpc.create_consumer(self.conn, + 'test', + self.receiver, + False) + self.conn.consume_in_thread() self.context = context.get_admin_context() + def tearDown(self): + self.conn.close() + super(RpcTestCase, self).tearDown() + def test_call_succeed(self): value = 42 result = rpc.call(self.context, 'test', {"method": "echo", @@ -139,16 +143,17 @@ class RpcTestCase(test.TestCase): nested = Nested() conn = rpc.create_connection(True) - consumer = rpc.create_consumer(conn, - 'nested', - nested, - False) - consumer.attach_to_eventlet() + rpc.create_consumer(conn, + 'nested', + nested, + False) + conn.consume_in_thread() value = 42 result = rpc.call(self.context, 'nested', {"method": "echo", "args": {"queue": "test", "value": value}}) + conn.close() self.assertEqual(value, result) From eaed4bd9f7f432e1e0397b29b6c3c70c36865605 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Sun, 28 Aug 2011 18:18:40 -0700 Subject: [PATCH 23/46] fix nova-ajax-console-proxy --- bin/nova-ajax-console-proxy | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/bin/nova-ajax-console-proxy b/bin/nova-ajax-console-proxy index 0a789b4b9..b3205ec56 100755 --- a/bin/nova-ajax-console-proxy +++ b/bin/nova-ajax-console-proxy @@ -113,11 +113,11 @@ class AjaxConsoleProxy(object): AjaxConsoleProxy.tokens[kwargs['token']] = \ {'args': kwargs, 'last_activity': time.time()} - conn = rpc.create_connection(new=True) - consumer = rpc.create_consumer( - conn, - FLAGS.ajax_console_proxy_topic, - TopicProxy) + self.conn = rpc.create_connection(new=True) + rpc.create_consumer( + self.conn, + FLAGS.ajax_console_proxy_topic, + TopicProxy) def delete_expired_tokens(): now = time.time() @@ -129,7 +129,7 @@ class AjaxConsoleProxy(object): for k in to_delete: del AjaxConsoleProxy.tokens[k] - utils.LoopingCall(consumer.fetch, enable_callbacks=True).start(0.1) + self.conn.consume_in_thread() utils.LoopingCall(delete_expired_tokens).start(1) if __name__ == '__main__': @@ -142,3 +142,4 @@ if __name__ == '__main__': server = wsgi.Server("AJAX Console Proxy", acp, port=acp_port) service.serve(server) service.wait() + self.conn.close() From 4f375f0e385e6eb3c36605813fe60b50d045379d Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Sun, 28 Aug 2011 19:22:53 -0700 Subject: [PATCH 24/46] add carrot/kombu tests... small thread fix for kombu --- nova/rpc/__init__.py | 4 +- nova/rpc/common.py | 6 + nova/rpc/impl_carrot.py | 4 - nova/rpc/impl_kombu.py | 12 +- nova/tests/test_rpc_amqp.py | 88 ----------- nova/tests/test_rpc_carrot.py | 202 ++++++++++++++++++++++++++ nova/tests/test_rpc_kombu.py | 266 ++++++++++++++++++++++++++++++++++ 7 files changed, 481 insertions(+), 101 deletions(-) delete mode 100644 nova/tests/test_rpc_amqp.py create mode 100644 nova/tests/test_rpc_carrot.py create mode 100644 nova/tests/test_rpc_kombu.py diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py index 10b69c8b5..2a47ba87b 100644 --- a/nova/rpc/__init__.py +++ b/nova/rpc/__init__.py @@ -23,7 +23,7 @@ from nova import flags FLAGS = flags.FLAGS flags.DEFINE_string('rpc_backend', - 'carrot', + 'kombu', "The messaging module to use, defaults to carrot.") impl_table = {'kombu': 'nova.rpc.impl_kombu', @@ -42,7 +42,7 @@ def create_connection(new=True): def create_consumer(conn, topic, proxy, fanout=False): - return RPCIMPL.create_consumer(conn, topic, proxy, fanout) + RPCIMPL.create_consumer(conn, topic, proxy, fanout) def call(context, topic, msg): diff --git a/nova/rpc/common.py b/nova/rpc/common.py index 1d3065a83..b8c280630 100644 --- a/nova/rpc/common.py +++ b/nova/rpc/common.py @@ -1,8 +1,14 @@ from nova import exception +from nova import flags from nova import log as logging LOG = logging.getLogger('nova.rpc') +flags.DEFINE_integer('rpc_thread_pool_size', 1024, + 'Size of RPC thread pool') +flags.DEFINE_integer('rpc_conn_pool_size', 30, + 'Size of RPC connection pool') + class RemoteError(exception.Error): """Signifies that a remote class has raised an exception. diff --git a/nova/rpc/impl_carrot.py b/nova/rpc/impl_carrot.py index efff788a0..07af0a116 100644 --- a/nova/rpc/impl_carrot.py +++ b/nova/rpc/impl_carrot.py @@ -49,10 +49,6 @@ from nova.rpc.common import RemoteError, LOG eventlet.monkey_patch() FLAGS = flags.FLAGS -flags.DEFINE_integer('rpc_thread_pool_size', 1024, - 'Size of RPC thread pool') -flags.DEFINE_integer('rpc_conn_pool_size', 30, - 'Size of RPC connection pool') class Connection(carrot_connection.BrokerConnection): diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index bd83bc520..49bca1d81 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -40,11 +40,6 @@ eventlet.monkey_patch() FLAGS = flags.FLAGS -flags.DEFINE_integer('rpc_conn_pool_size', 30, - 'Size of RPC connection pool') -flags.DEFINE_integer('rpc_thread_pool_size', 1024, - 'Size of RPC thread pool') - class ConsumerBase(object): """Consumer base class.""" @@ -328,6 +323,9 @@ class Connection(object): pass time.sleep(1) self.connection = kombu.connection.Connection(**self.params) + if FLAGS.fake_rabbit: + # Kludge to speed up tests. + self.connection.transport.polling_interval = 0.0 self.consumer_num = itertools.count(1) try: @@ -422,13 +420,13 @@ class Connection(object): self.consume() except greenlet.GreenletExit: return - if not self.consumer_thread: + if self.consumer_thread is None: self.consumer_thread = eventlet.spawn(_consumer_thread) return self.consumer_thread def cancel_consumer_thread(self): """Cancel a consumer thread""" - if self.consumer_thread: + if self.consumer_thread is not None: self.consumer_thread.kill() try: self.consumer_thread.wait() diff --git a/nova/tests/test_rpc_amqp.py b/nova/tests/test_rpc_amqp.py deleted file mode 100644 index 2215a908b..000000000 --- a/nova/tests/test_rpc_amqp.py +++ /dev/null @@ -1,88 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2010 Openstack, LLC. -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Tests For RPC AMQP. -""" - -from nova import context -from nova import log as logging -from nova import rpc -from nova.rpc import amqp -from nova import test - - -LOG = logging.getLogger('nova.tests.rpc') - - -class RpcAMQPTestCase(test.TestCase): - def setUp(self): - super(RpcAMQPTestCase, self).setUp() - self.conn = rpc.create_connection(True) - self.receiver = TestReceiver() - self.consumer = rpc.create_consumer(self.conn, - 'test', - self.receiver, - False) - self.consumer.attach_to_eventlet() - self.context = context.get_admin_context() - - def test_connectionpool_single(self): - """Test that ConnectionPool recycles a single connection.""" - conn1 = amqp.ConnectionPool.get() - amqp.ConnectionPool.put(conn1) - conn2 = amqp.ConnectionPool.get() - amqp.ConnectionPool.put(conn2) - self.assertEqual(conn1, conn2) - - -class TestReceiver(object): - """Simple Proxy class so the consumer has methods to call. - - Uses static methods because we aren't actually storing any state. - - """ - - @staticmethod - def echo(context, value): - """Simply returns whatever value is sent in.""" - LOG.debug(_("Received %s"), value) - return value - - @staticmethod - def context(context, value): - """Returns dictionary version of context.""" - LOG.debug(_("Received %s"), context) - return context.to_dict() - - @staticmethod - def echo_three_times(context, value): - context.reply(value) - context.reply(value + 1) - context.reply(value + 2) - - @staticmethod - def echo_three_times_yield(context, value): - yield value - yield value + 1 - yield value + 2 - - @staticmethod - def fail(context, value): - """Raises an exception with the value sent in.""" - raise Exception(value) diff --git a/nova/tests/test_rpc_carrot.py b/nova/tests/test_rpc_carrot.py new file mode 100644 index 000000000..cf84980ab --- /dev/null +++ b/nova/tests/test_rpc_carrot.py @@ -0,0 +1,202 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for remote procedure calls using queue +""" + +from nova import context +from nova import log as logging +from nova.rpc import impl_carrot as rpc +from nova import test + + +LOG = logging.getLogger('nova.tests.rpc') + + +class RpcCarrotTestCase(test.TestCase): + def setUp(self): + super(RpcCarrotTestCase, self).setUp() + self.conn = rpc.create_connection(True) + self.receiver = TestReceiver() + rpc.create_consumer(self.conn, + 'test', + self.receiver, + False) + self.conn.consume_in_thread() + self.context = context.get_admin_context() + + def tearDown(self): + self.conn.close() + super(RpcCarrotTestCase, self).tearDown() + + def test_connectionpool_single(self): + """Test that ConnectionPool recycles a single connection.""" + conn1 = rpc.ConnectionPool.get() + rpc.ConnectionPool.put(conn1) + conn2 = rpc.ConnectionPool.get() + rpc.ConnectionPool.put(conn2) + self.assertEqual(conn1, conn2) + + def test_call_succeed(self): + value = 42 + result = rpc.call(self.context, 'test', {"method": "echo", + "args": {"value": value}}) + self.assertEqual(value, result) + + def test_call_succeed_despite_multiple_returns(self): + value = 42 + result = rpc.call(self.context, 'test', {"method": "echo_three_times", + "args": {"value": value}}) + self.assertEqual(value + 2, result) + + def test_call_succeed_despite_multiple_returns_yield(self): + value = 42 + result = rpc.call(self.context, 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + self.assertEqual(value + 2, result) + + def test_multicall_succeed_once(self): + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo", + "args": {"value": value}}) + for i, x in enumerate(result): + if i > 0: + self.fail('should only receive one response') + self.assertEqual(value + i, x) + + def test_multicall_succeed_three_times(self): + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo_three_times", + "args": {"value": value}}) + for i, x in enumerate(result): + self.assertEqual(value + i, x) + + def test_multicall_succeed_three_times_yield(self): + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + for i, x in enumerate(result): + self.assertEqual(value + i, x) + + def test_context_passed(self): + """Makes sure a context is passed through rpc call.""" + value = 42 + result = rpc.call(self.context, + 'test', {"method": "context", + "args": {"value": value}}) + self.assertEqual(self.context.to_dict(), result) + + def test_call_exception(self): + """Test that exception gets passed back properly. + + rpc.call returns a RemoteError object. The value of the + exception is converted to a string, so we convert it back + to an int in the test. + + """ + value = 42 + self.assertRaises(rpc.RemoteError, + rpc.call, + self.context, + 'test', + {"method": "fail", + "args": {"value": value}}) + try: + rpc.call(self.context, + 'test', + {"method": "fail", + "args": {"value": value}}) + self.fail("should have thrown rpc.RemoteError") + except rpc.RemoteError as exc: + self.assertEqual(int(exc.value), value) + + def test_nested_calls(self): + """Test that we can do an rpc.call inside another call.""" + class Nested(object): + @staticmethod + def echo(context, queue, value): + """Calls echo in the passed queue""" + LOG.debug(_("Nested received %(queue)s, %(value)s") + % locals()) + # TODO: so, it will replay the context and use the same REQID? + # that's bizarre. + ret = rpc.call(context, + queue, + {"method": "echo", + "args": {"value": value}}) + LOG.debug(_("Nested return %s"), ret) + return value + + nested = Nested() + conn = rpc.create_connection(True) + rpc.create_consumer(conn, + 'nested', + nested, + False) + conn.consume_in_thread() + value = 42 + result = rpc.call(self.context, + 'nested', {"method": "echo", + "args": {"queue": "test", + "value": value}}) + conn.close() + self.assertEqual(value, result) + + +class TestReceiver(object): + """Simple Proxy class so the consumer has methods to call. + + Uses static methods because we aren't actually storing any state. + + """ + + @staticmethod + def echo(context, value): + """Simply returns whatever value is sent in.""" + LOG.debug(_("Received %s"), value) + return value + + @staticmethod + def context(context, value): + """Returns dictionary version of context.""" + LOG.debug(_("Received %s"), context) + return context.to_dict() + + @staticmethod + def echo_three_times(context, value): + context.reply(value) + context.reply(value + 1) + context.reply(value + 2) + + @staticmethod + def echo_three_times_yield(context, value): + yield value + yield value + 1 + yield value + 2 + + @staticmethod + def fail(context, value): + """Raises an exception with the value sent in.""" + raise Exception(value) diff --git a/nova/tests/test_rpc_kombu.py b/nova/tests/test_rpc_kombu.py new file mode 100644 index 000000000..457dfdeca --- /dev/null +++ b/nova/tests/test_rpc_kombu.py @@ -0,0 +1,266 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for remote procedure calls using queue +""" + +from nova import context +from nova import log as logging +from nova.rpc import impl_kombu as rpc +from nova import test + + +LOG = logging.getLogger('nova.tests.rpc') + + +class RpcKombuTestCase(test.TestCase): + def setUp(self): + super(RpcKombuTestCase, self).setUp() + self.conn = rpc.create_connection() + self.receiver = TestReceiver() + rpc.create_consumer(self.conn, + 'test', + self.receiver, + False) + self.conn.consume_in_thread() + self.context = context.get_admin_context() + + def tearDown(self): + self.conn.close() + super(RpcKombuTestCase, self).tearDown() + + def test_reusing_connection(self): + """Test that reusing a connection returns same one.""" + conn_context = rpc.create_connection(new=False) + conn1 = conn_context.connection + conn_context.close() + conn_context = rpc.create_connection(new=False) + conn2 = conn_context.connection + conn_context.close() + self.assertEqual(conn1, conn2) + + def test_topic_send_receive(self): + """Test sending to a topic exchange/queue""" + + conn = rpc.create_connection() + message = 'topic test message' + + self.received_message = None + + def _callback(message): + self.received_message = message + + conn.declare_topic_consumer('a_topic', _callback) + conn.topic_send('a_topic', message) + conn.consume(limit=1) + conn.close() + + self.assertEqual(self.received_message, message) + + def test_direct_send_receive(self): + """Test sending to a direct exchange/queue""" + conn = rpc.create_connection() + message = 'direct test message' + + self.received_message = None + + def _callback(message): + self.received_message = message + + conn.declare_direct_consumer('a_direct', _callback) + conn.direct_send('a_direct', message) + conn.consume(limit=1) + conn.close() + + self.assertEqual(self.received_message, message) + + @test.skip_test("kombu memory transport seems buggy with fanout queues " + "as this test passes when you use rabbit (fake_rabbit=False)") + def test_fanout_send_receive(self): + """Test sending to a fanout exchange and consuming from 2 queues""" + + conn = rpc.create_connection() + conn2 = rpc.create_connection() + message = 'fanout test message' + + self.received_message = None + + def _callback(message): + self.received_message = message + + conn.declare_fanout_consumer('a_fanout', _callback) + conn2.declare_fanout_consumer('a_fanout', _callback) + conn.fanout_send('a_fanout', message) + + conn.consume(limit=1) + conn.close() + self.assertEqual(self.received_message, message) + + self.received_message = None + conn2.consume(limit=1) + conn2.close() + self.assertEqual(self.received_message, message) + + def test_call_succeed(self): + value = 42 + result = rpc.call(self.context, 'test', {"method": "echo", + "args": {"value": value}}) + self.assertEqual(value, result) + + def test_call_succeed_despite_multiple_returns(self): + value = 42 + result = rpc.call(self.context, 'test', {"method": "echo_three_times", + "args": {"value": value}}) + self.assertEqual(value + 2, result) + + def test_call_succeed_despite_multiple_returns_yield(self): + value = 42 + result = rpc.call(self.context, 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + self.assertEqual(value + 2, result) + + def test_multicall_succeed_once(self): + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo", + "args": {"value": value}}) + for i, x in enumerate(result): + if i > 0: + self.fail('should only receive one response') + self.assertEqual(value + i, x) + + def test_multicall_succeed_three_times(self): + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo_three_times", + "args": {"value": value}}) + for i, x in enumerate(result): + self.assertEqual(value + i, x) + + def test_multicall_succeed_three_times_yield(self): + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + for i, x in enumerate(result): + self.assertEqual(value + i, x) + + def test_context_passed(self): + """Makes sure a context is passed through rpc call.""" + value = 42 + result = rpc.call(self.context, + 'test', {"method": "context", + "args": {"value": value}}) + self.assertEqual(self.context.to_dict(), result) + + def test_call_exception(self): + """Test that exception gets passed back properly. + + rpc.call returns a RemoteError object. The value of the + exception is converted to a string, so we convert it back + to an int in the test. + + """ + value = 42 + self.assertRaises(rpc.RemoteError, + rpc.call, + self.context, + 'test', + {"method": "fail", + "args": {"value": value}}) + try: + rpc.call(self.context, + 'test', + {"method": "fail", + "args": {"value": value}}) + self.fail("should have thrown rpc.RemoteError") + except rpc.RemoteError as exc: + self.assertEqual(int(exc.value), value) + + def test_nested_calls(self): + """Test that we can do an rpc.call inside another call.""" + class Nested(object): + @staticmethod + def echo(context, queue, value): + """Calls echo in the passed queue""" + LOG.debug(_("Nested received %(queue)s, %(value)s") + % locals()) + # TODO: so, it will replay the context and use the same REQID? + # that's bizarre. + ret = rpc.call(context, + queue, + {"method": "echo", + "args": {"value": value}}) + LOG.debug(_("Nested return %s"), ret) + return value + + nested = Nested() + conn = rpc.create_connection(True) + rpc.create_consumer(conn, + 'nested', + nested, + False) + conn.consume_in_thread() + value = 42 + result = rpc.call(self.context, + 'nested', {"method": "echo", + "args": {"queue": "test", + "value": value}}) + conn.close() + self.assertEqual(value, result) + + +class TestReceiver(object): + """Simple Proxy class so the consumer has methods to call. + + Uses static methods because we aren't actually storing any state. + + """ + + @staticmethod + def echo(context, value): + """Simply returns whatever value is sent in.""" + LOG.debug(_("Received %s"), value) + return value + + @staticmethod + def context(context, value): + """Returns dictionary version of context.""" + LOG.debug(_("Received %s"), context) + return context.to_dict() + + @staticmethod + def echo_three_times(context, value): + context.reply(value) + context.reply(value + 1) + context.reply(value + 2) + + @staticmethod + def echo_three_times_yield(context, value): + yield value + yield value + 1 + yield value + 2 + + @staticmethod + def fail(context, value): + """Raises an exception with the value sent in.""" + raise Exception(value) From 24649f925a45ef062d01c01acce137b91174546c Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Sun, 28 Aug 2011 19:23:31 -0700 Subject: [PATCH 25/46] carrot consumer thread fix --- nova/rpc/impl_carrot.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/rpc/impl_carrot.py b/nova/rpc/impl_carrot.py index 07af0a116..d0e6f8269 100644 --- a/nova/rpc/impl_carrot.py +++ b/nova/rpc/impl_carrot.py @@ -118,13 +118,13 @@ class Connection(carrot_connection.BrokerConnection): consumer_set.wait() except greenlet.GreenletExit: return - if not self._rpc_consumer_thread: + if self._rpc_consumer_thread is None: self._rpc_consumer_thread = eventlet.spawn(_consumer_thread) return self._rpc_consumer_thread def cancel_consumer_thread(self): """Cancel a consumer thread""" - if self._rpc_consumer_thread: + if self._rpc_consumer_thread is not None: self._rpc_consumer_thread.kill() try: self._rpc_consumer_thread.wait() From 9d4aa97e4c51c6e52a21f9bbf15eec77ea9a6d82 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Sun, 28 Aug 2011 19:27:49 -0700 Subject: [PATCH 26/46] remove unused rpc connections in test_cloud and test_adminapi --- nova/tests/test_adminapi.py | 2 -- nova/tests/test_cloud.py | 2 -- 2 files changed, 4 deletions(-) diff --git a/nova/tests/test_adminapi.py b/nova/tests/test_adminapi.py index 06cc498ac..aaa633adc 100644 --- a/nova/tests/test_adminapi.py +++ b/nova/tests/test_adminapi.py @@ -38,8 +38,6 @@ class AdminApiTestCase(test.TestCase): super(AdminApiTestCase, self).setUp() self.flags(connection_type='fake') - self.conn = rpc.create_connection() - # set up our cloud self.api = admin.AdminController() diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 0793784f8..14ab64f33 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -51,8 +51,6 @@ class CloudTestCase(test.TestCase): self.flags(connection_type='fake', stub_network=True) - self.conn = rpc.create_connection() - # set up our cloud self.cloud = cloud.CloudController() From e3de2ed411ddfa2f5824932789582edc3ad60c5c Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 29 Aug 2011 00:12:30 -0700 Subject: [PATCH 27/46] pep8 fixes --- nova/flags.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/nova/flags.py b/nova/flags.py index e09b4721a..774da4ab4 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -302,9 +302,12 @@ DEFINE_bool('rabbit_use_ssl', False, 'connect over SSL') DEFINE_string('rabbit_userid', 'guest', 'rabbit userid') DEFINE_string('rabbit_password', 'guest', 'rabbit password') DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host') -DEFINE_integer('rabbit_retry_interval', 1, 'rabbit connection retry interval to start') -DEFINE_integer('rabbit_retry_backoff', 2, 'rabbit connection retry backoff in seconds') -DEFINE_integer('rabbit_max_retries', 0, 'maximum rabbit connection attempts (0=try forever)') +DEFINE_integer('rabbit_retry_interval', 1, + 'rabbit connection retry interval to start') +DEFINE_integer('rabbit_retry_backoff', 2, + 'rabbit connection retry backoff in seconds') +DEFINE_integer('rabbit_max_retries', 0, + 'maximum rabbit connection attempts (0=try forever)') DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to') DEFINE_boolean('rabbit_durable_queues', False, 'use durable queues') DEFINE_list('enabled_apis', ['ec2', 'osapi'], From d6261db69b1e122b8ae31f2c3a4add184cfd7524 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Mon, 29 Aug 2011 10:13:39 -0400 Subject: [PATCH 28/46] Removed test_parallel_builds --- nova/tests/test_xenapi.py | 37 ------------------------------------- 1 file changed, 37 deletions(-) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 2f0559366..45dad3516 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -16,7 +16,6 @@ """Test suite for XenAPI.""" -import eventlet import functools import json import os @@ -203,42 +202,6 @@ class XenAPIVMTestCase(test.TestCase): self.context = context.RequestContext(self.user_id, self.project_id) self.conn = xenapi_conn.get_connection(False) - def test_parallel_builds(self): - stubs.stubout_loopingcall_delay(self.stubs) - - def _do_build(id, proj, user, *args): - values = { - 'id': id, - 'project_id': proj, - 'user_id': user, - 'image_ref': 1, - 'kernel_id': 2, - 'ramdisk_id': 3, - 'instance_type_id': '3', # m1.large - 'os_type': 'linux', - 'architecture': 'x86-64'} - network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False}, - {'broadcast': '192.168.0.255', - 'dns': ['192.168.0.1'], - 'gateway': '192.168.0.1', - 'gateway6': 'dead:beef::1', - 'ip6s': [{'enabled': '1', - 'ip': 'dead:beef::dcad:beff:feef:0', - 'netmask': '64'}], - 'ips': [{'enabled': '1', - 'ip': '192.168.0.100', - 'netmask': '255.255.255.0'}], - 'label': 'fake', - 'mac': 'DE:AD:BE:EF:00:00', - 'rxtx_cap': 3})] - instance = db.instance_create(self.context, values) - self.conn.spawn(self.context, instance, network_info) - - gt1 = eventlet.spawn(_do_build, 1, self.project_id, self.user_id) - gt2 = eventlet.spawn(_do_build, 2, self.project_id, self.user_id) - gt1.wait() - gt2.wait() - def test_list_instances_0(self): instances = self.conn.list_instances() self.assertEquals(instances, []) From 6b0fa990c3a339f03aa66a3bee1ed918cac68496 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Mon, 29 Aug 2011 10:27:25 -0700 Subject: [PATCH 29/46] more logging info to help identify bad payloads --- nova/notifier/api.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/notifier/api.py b/nova/notifier/api.py index 6ef4a050e..043838536 100644 --- a/nova/notifier/api.py +++ b/nova/notifier/api.py @@ -122,4 +122,5 @@ def notify(publisher_id, event_type, priority, payload): driver.notify(msg) except Exception, e: LOG.exception(_("Problem '%(e)s' attempting to " - "send to notification system." % locals())) + "send to notification system. Payload=%(payload)s" % + locals())) From 2ecfb47b46bc6350a72a8c257b56714a03b329b9 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 29 Aug 2011 14:36:12 -0700 Subject: [PATCH 30/46] ditched rpc.create_consumer(conn) interface... instead you now do conn.create_consumer(.. --- nova/rpc/__init__.py | 6 +--- nova/rpc/impl_carrot.py | 29 +++++++++--------- nova/rpc/impl_kombu.py | 55 +++++++++++++++++------------------ nova/service.py | 8 ++--- nova/tests/test_rpc.py | 10 ++----- nova/tests/test_rpc_carrot.py | 10 ++----- nova/tests/test_rpc_kombu.py | 10 ++----- 7 files changed, 51 insertions(+), 77 deletions(-) diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py index 2a47ba87b..fe50fb476 100644 --- a/nova/rpc/__init__.py +++ b/nova/rpc/__init__.py @@ -24,7 +24,7 @@ from nova import flags FLAGS = flags.FLAGS flags.DEFINE_string('rpc_backend', 'kombu', - "The messaging module to use, defaults to carrot.") + "The messaging module to use, defaults to kombu.") impl_table = {'kombu': 'nova.rpc.impl_kombu', 'amqp': 'nova.rpc.impl_kombu', @@ -41,10 +41,6 @@ def create_connection(new=True): return RPCIMPL.create_connection(new=new) -def create_consumer(conn, topic, proxy, fanout=False): - RPCIMPL.create_consumer(conn, topic, proxy, fanout) - - def call(context, topic, msg): return RPCIMPL.call(context, topic, msg) diff --git a/nova/rpc/impl_carrot.py b/nova/rpc/impl_carrot.py index d0e6f8269..6d504aaec 100644 --- a/nova/rpc/impl_carrot.py +++ b/nova/rpc/impl_carrot.py @@ -132,6 +132,20 @@ class Connection(carrot_connection.BrokerConnection): pass self._rpc_consumer_thread = None + def create_consumer(self, topic, proxy, fanout=False): + """Create a consumer that calls methods in the proxy""" + if fanout: + consumer = FanoutAdapterConsumer( + connection=self, + topic=topic, + proxy=proxy) + else: + consumer = TopicAdapterConsumer( + connection=self, + topic=topic, + proxy=proxy) + self._rpc_consumers.append(consumer) + class Pool(pools.Pool): """Class that implements a Pool of Connections.""" @@ -187,7 +201,6 @@ class Consumer(messaging.Consumer): LOG.error(_('Unable to connect to AMQP server ' 'after %(tries)d tries. Shutting down.') % locals()) sys.exit(1) - connection._rpc_consumers.append(self) def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): """Wraps the parent fetch with some logic for failed connection.""" @@ -568,20 +581,6 @@ def create_connection(new=True): return Connection.instance(new=new) -def create_consumer(conn, topic, proxy, fanout=False): - """Create a consumer that calls methods in the proxy""" - if fanout: - return FanoutAdapterConsumer( - connection=conn, - topic=topic, - proxy=proxy) - else: - return TopicAdapterConsumer( - connection=conn, - topic=topic, - proxy=proxy) - - def call(context, topic, msg): """Sends a message on a topic and wait for a response.""" rv = multicall(context, topic, msg) diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index 49bca1d81..83ee1b122 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -404,26 +404,6 @@ class Connection(object): '%s' % str(e))) self.reconnect() - def consume(self, limit=None): - """Consume from all queues/consumers""" - it = self.iterconsume(limit=limit) - while True: - try: - it.next() - except StopIteration: - return - - def consume_in_thread(self): - """Consumer from all queues/consumers in a greenthread""" - def _consumer_thread(): - try: - self.consume() - except greenlet.GreenletExit: - return - if self.consumer_thread is None: - self.consumer_thread = eventlet.spawn(_consumer_thread) - return self.consumer_thread - def cancel_consumer_thread(self): """Cancel a consumer thread""" if self.consumer_thread is not None: @@ -478,6 +458,33 @@ class Connection(object): """Send a 'fanout' message""" self.publisher_send(FanoutPublisher, topic, msg) + def consume(self, limit=None): + """Consume from all queues/consumers""" + it = self.iterconsume(limit=limit) + while True: + try: + it.next() + except StopIteration: + return + + def consume_in_thread(self): + """Consumer from all queues/consumers in a greenthread""" + def _consumer_thread(): + try: + self.consume() + except greenlet.GreenletExit: + return + if self.consumer_thread is None: + self.consumer_thread = eventlet.spawn(_consumer_thread) + return self.consumer_thread + + def create_consumer(self, topic, proxy, fanout=False): + """Create a consumer that calls a method in a proxy object""" + if fanout: + self.declare_fanout_consumer(topic, ProxyCallback(proxy)) + else: + self.declare_topic_consumer(topic, ProxyCallback(proxy)) + class Pool(pools.Pool): """Class that implements a Pool of Connections.""" @@ -678,14 +685,6 @@ def create_connection(new=True): return ConnectionContext(pooled=not new) -def create_consumer(conn, topic, proxy, fanout=False): - """Create a consumer that calls a method in a proxy object""" - if fanout: - conn.declare_fanout_consumer(topic, ProxyCallback(proxy)) - else: - conn.declare_topic_consumer(topic, ProxyCallback(proxy)) - - def multicall(context, topic, msg): """Make a call that returns multiple times.""" # Can't use 'with' for multicall, as it returns an iterator diff --git a/nova/service.py b/nova/service.py index ab7925eb3..247eb4fb1 100644 --- a/nova/service.py +++ b/nova/service.py @@ -153,14 +153,12 @@ class Service(object): self.topic) # Share this same connection for these Consumers - rpc.create_consumer(self.conn, self.topic, self, - fanout=False) + self.conn.create_consumer(self.topic, self, fanout=False) node_topic = '%s.%s' % (self.topic, self.host) - rpc.create_consumer(self.conn, node_topic, self, - fanout=False) + self.conn.create_consumer(node_topic, self, fanout=False) - rpc.create_consumer(self.conn, self.topic, self, fanout=True) + self.conn.create_consumer(self.topic, self, fanout=True) # Consume from all consumers in a thread self.conn.consume_in_thread() diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index 2b9922491..ba91ea3b2 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -33,10 +33,7 @@ class RpcTestCase(test.TestCase): super(RpcTestCase, self).setUp() self.conn = rpc.create_connection(True) self.receiver = TestReceiver() - rpc.create_consumer(self.conn, - 'test', - self.receiver, - False) + self.conn.create_consumer('test', self.receiver, False) self.conn.consume_in_thread() self.context = context.get_admin_context() @@ -143,10 +140,7 @@ class RpcTestCase(test.TestCase): nested = Nested() conn = rpc.create_connection(True) - rpc.create_consumer(conn, - 'nested', - nested, - False) + conn.create_consumer('nested', nested, False) conn.consume_in_thread() value = 42 result = rpc.call(self.context, diff --git a/nova/tests/test_rpc_carrot.py b/nova/tests/test_rpc_carrot.py index cf84980ab..ff704ecf8 100644 --- a/nova/tests/test_rpc_carrot.py +++ b/nova/tests/test_rpc_carrot.py @@ -33,10 +33,7 @@ class RpcCarrotTestCase(test.TestCase): super(RpcCarrotTestCase, self).setUp() self.conn = rpc.create_connection(True) self.receiver = TestReceiver() - rpc.create_consumer(self.conn, - 'test', - self.receiver, - False) + self.conn.create_consumer('test', self.receiver, False) self.conn.consume_in_thread() self.context = context.get_admin_context() @@ -151,10 +148,7 @@ class RpcCarrotTestCase(test.TestCase): nested = Nested() conn = rpc.create_connection(True) - rpc.create_consumer(conn, - 'nested', - nested, - False) + conn.create_consumer('nested', nested, False) conn.consume_in_thread() value = 42 result = rpc.call(self.context, diff --git a/nova/tests/test_rpc_kombu.py b/nova/tests/test_rpc_kombu.py index 457dfdeca..7db88ecd0 100644 --- a/nova/tests/test_rpc_kombu.py +++ b/nova/tests/test_rpc_kombu.py @@ -33,10 +33,7 @@ class RpcKombuTestCase(test.TestCase): super(RpcKombuTestCase, self).setUp() self.conn = rpc.create_connection() self.receiver = TestReceiver() - rpc.create_consumer(self.conn, - 'test', - self.receiver, - False) + self.conn.create_consumer('test', self.receiver, False) self.conn.consume_in_thread() self.context = context.get_admin_context() @@ -215,10 +212,7 @@ class RpcKombuTestCase(test.TestCase): nested = Nested() conn = rpc.create_connection(True) - rpc.create_consumer(conn, - 'nested', - nested, - False) + conn.create_consumer('nested', nested, False) conn.consume_in_thread() value = 42 result = rpc.call(self.context, From 9fcf916985bbb21b567e613d2825f7e29bc34a5f Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 29 Aug 2011 14:54:20 -0700 Subject: [PATCH 31/46] created nova/tests/test_rpc_common.py which contains a rpc test base class so we can share tests between the rpc implementations --- nova/tests/test_rpc.py | 157 +--------------------------- nova/tests/test_rpc_carrot.py | 169 ++---------------------------- nova/tests/test_rpc_common.py | 188 ++++++++++++++++++++++++++++++++++ nova/tests/test_rpc_kombu.py | 172 ++----------------------------- 4 files changed, 211 insertions(+), 475 deletions(-) create mode 100644 nova/tests/test_rpc_common.py diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index ba91ea3b2..6b4454747 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -22,167 +22,16 @@ Unit Tests for remote procedure calls using queue from nova import context from nova import log as logging from nova import rpc -from nova import test +from nova.tests import test_rpc_common LOG = logging.getLogger('nova.tests.rpc') -class RpcTestCase(test.TestCase): +class RpcTestCase(test_rpc_common._BaseRpcTestCase): def setUp(self): + self.rpc = rpc super(RpcTestCase, self).setUp() - self.conn = rpc.create_connection(True) - self.receiver = TestReceiver() - self.conn.create_consumer('test', self.receiver, False) - self.conn.consume_in_thread() - self.context = context.get_admin_context() def tearDown(self): - self.conn.close() super(RpcTestCase, self).tearDown() - - def test_call_succeed(self): - value = 42 - result = rpc.call(self.context, 'test', {"method": "echo", - "args": {"value": value}}) - self.assertEqual(value, result) - - def test_call_succeed_despite_multiple_returns(self): - value = 42 - result = rpc.call(self.context, 'test', {"method": "echo_three_times", - "args": {"value": value}}) - self.assertEqual(value + 2, result) - - def test_call_succeed_despite_multiple_returns_yield(self): - value = 42 - result = rpc.call(self.context, 'test', - {"method": "echo_three_times_yield", - "args": {"value": value}}) - self.assertEqual(value + 2, result) - - def test_multicall_succeed_once(self): - value = 42 - result = rpc.multicall(self.context, - 'test', - {"method": "echo", - "args": {"value": value}}) - for i, x in enumerate(result): - if i > 0: - self.fail('should only receive one response') - self.assertEqual(value + i, x) - - def test_multicall_succeed_three_times(self): - value = 42 - result = rpc.multicall(self.context, - 'test', - {"method": "echo_three_times", - "args": {"value": value}}) - for i, x in enumerate(result): - self.assertEqual(value + i, x) - - def test_multicall_succeed_three_times_yield(self): - value = 42 - result = rpc.multicall(self.context, - 'test', - {"method": "echo_three_times_yield", - "args": {"value": value}}) - for i, x in enumerate(result): - self.assertEqual(value + i, x) - - def test_context_passed(self): - """Makes sure a context is passed through rpc call.""" - value = 42 - result = rpc.call(self.context, - 'test', {"method": "context", - "args": {"value": value}}) - self.assertEqual(self.context.to_dict(), result) - - def test_call_exception(self): - """Test that exception gets passed back properly. - - rpc.call returns a RemoteError object. The value of the - exception is converted to a string, so we convert it back - to an int in the test. - - """ - value = 42 - self.assertRaises(rpc.RemoteError, - rpc.call, - self.context, - 'test', - {"method": "fail", - "args": {"value": value}}) - try: - rpc.call(self.context, - 'test', - {"method": "fail", - "args": {"value": value}}) - self.fail("should have thrown rpc.RemoteError") - except rpc.RemoteError as exc: - self.assertEqual(int(exc.value), value) - - def test_nested_calls(self): - """Test that we can do an rpc.call inside another call.""" - class Nested(object): - @staticmethod - def echo(context, queue, value): - """Calls echo in the passed queue""" - LOG.debug(_("Nested received %(queue)s, %(value)s") - % locals()) - # TODO: so, it will replay the context and use the same REQID? - # that's bizarre. - ret = rpc.call(context, - queue, - {"method": "echo", - "args": {"value": value}}) - LOG.debug(_("Nested return %s"), ret) - return value - - nested = Nested() - conn = rpc.create_connection(True) - conn.create_consumer('nested', nested, False) - conn.consume_in_thread() - value = 42 - result = rpc.call(self.context, - 'nested', {"method": "echo", - "args": {"queue": "test", - "value": value}}) - conn.close() - self.assertEqual(value, result) - - -class TestReceiver(object): - """Simple Proxy class so the consumer has methods to call. - - Uses static methods because we aren't actually storing any state. - - """ - - @staticmethod - def echo(context, value): - """Simply returns whatever value is sent in.""" - LOG.debug(_("Received %s"), value) - return value - - @staticmethod - def context(context, value): - """Returns dictionary version of context.""" - LOG.debug(_("Received %s"), context) - return context.to_dict() - - @staticmethod - def echo_three_times(context, value): - context.reply(value) - context.reply(value + 1) - context.reply(value + 2) - - @staticmethod - def echo_three_times_yield(context, value): - yield value - yield value + 1 - yield value + 2 - - @staticmethod - def fail(context, value): - """Raises an exception with the value sent in.""" - raise Exception(value) diff --git a/nova/tests/test_rpc_carrot.py b/nova/tests/test_rpc_carrot.py index ff704ecf8..57cdebf4f 100644 --- a/nova/tests/test_rpc_carrot.py +++ b/nova/tests/test_rpc_carrot.py @@ -16,181 +16,30 @@ # License for the specific language governing permissions and limitations # under the License. """ -Unit Tests for remote procedure calls using queue +Unit Tests for remote procedure calls using carrot """ from nova import context from nova import log as logging -from nova.rpc import impl_carrot as rpc -from nova import test +from nova.rpc import impl_carrot +from nova.tests import test_rpc_common LOG = logging.getLogger('nova.tests.rpc') -class RpcCarrotTestCase(test.TestCase): +class RpcCarrotTestCase(test_rpc_common._BaseRpcTestCase): def setUp(self): + self.rpc = impl_carrot super(RpcCarrotTestCase, self).setUp() - self.conn = rpc.create_connection(True) - self.receiver = TestReceiver() - self.conn.create_consumer('test', self.receiver, False) - self.conn.consume_in_thread() - self.context = context.get_admin_context() def tearDown(self): - self.conn.close() super(RpcCarrotTestCase, self).tearDown() def test_connectionpool_single(self): """Test that ConnectionPool recycles a single connection.""" - conn1 = rpc.ConnectionPool.get() - rpc.ConnectionPool.put(conn1) - conn2 = rpc.ConnectionPool.get() - rpc.ConnectionPool.put(conn2) + conn1 = self.rpc.ConnectionPool.get() + self.rpc.ConnectionPool.put(conn1) + conn2 = self.rpc.ConnectionPool.get() + self.rpc.ConnectionPool.put(conn2) self.assertEqual(conn1, conn2) - - def test_call_succeed(self): - value = 42 - result = rpc.call(self.context, 'test', {"method": "echo", - "args": {"value": value}}) - self.assertEqual(value, result) - - def test_call_succeed_despite_multiple_returns(self): - value = 42 - result = rpc.call(self.context, 'test', {"method": "echo_three_times", - "args": {"value": value}}) - self.assertEqual(value + 2, result) - - def test_call_succeed_despite_multiple_returns_yield(self): - value = 42 - result = rpc.call(self.context, 'test', - {"method": "echo_three_times_yield", - "args": {"value": value}}) - self.assertEqual(value + 2, result) - - def test_multicall_succeed_once(self): - value = 42 - result = rpc.multicall(self.context, - 'test', - {"method": "echo", - "args": {"value": value}}) - for i, x in enumerate(result): - if i > 0: - self.fail('should only receive one response') - self.assertEqual(value + i, x) - - def test_multicall_succeed_three_times(self): - value = 42 - result = rpc.multicall(self.context, - 'test', - {"method": "echo_three_times", - "args": {"value": value}}) - for i, x in enumerate(result): - self.assertEqual(value + i, x) - - def test_multicall_succeed_three_times_yield(self): - value = 42 - result = rpc.multicall(self.context, - 'test', - {"method": "echo_three_times_yield", - "args": {"value": value}}) - for i, x in enumerate(result): - self.assertEqual(value + i, x) - - def test_context_passed(self): - """Makes sure a context is passed through rpc call.""" - value = 42 - result = rpc.call(self.context, - 'test', {"method": "context", - "args": {"value": value}}) - self.assertEqual(self.context.to_dict(), result) - - def test_call_exception(self): - """Test that exception gets passed back properly. - - rpc.call returns a RemoteError object. The value of the - exception is converted to a string, so we convert it back - to an int in the test. - - """ - value = 42 - self.assertRaises(rpc.RemoteError, - rpc.call, - self.context, - 'test', - {"method": "fail", - "args": {"value": value}}) - try: - rpc.call(self.context, - 'test', - {"method": "fail", - "args": {"value": value}}) - self.fail("should have thrown rpc.RemoteError") - except rpc.RemoteError as exc: - self.assertEqual(int(exc.value), value) - - def test_nested_calls(self): - """Test that we can do an rpc.call inside another call.""" - class Nested(object): - @staticmethod - def echo(context, queue, value): - """Calls echo in the passed queue""" - LOG.debug(_("Nested received %(queue)s, %(value)s") - % locals()) - # TODO: so, it will replay the context and use the same REQID? - # that's bizarre. - ret = rpc.call(context, - queue, - {"method": "echo", - "args": {"value": value}}) - LOG.debug(_("Nested return %s"), ret) - return value - - nested = Nested() - conn = rpc.create_connection(True) - conn.create_consumer('nested', nested, False) - conn.consume_in_thread() - value = 42 - result = rpc.call(self.context, - 'nested', {"method": "echo", - "args": {"queue": "test", - "value": value}}) - conn.close() - self.assertEqual(value, result) - - -class TestReceiver(object): - """Simple Proxy class so the consumer has methods to call. - - Uses static methods because we aren't actually storing any state. - - """ - - @staticmethod - def echo(context, value): - """Simply returns whatever value is sent in.""" - LOG.debug(_("Received %s"), value) - return value - - @staticmethod - def context(context, value): - """Returns dictionary version of context.""" - LOG.debug(_("Received %s"), context) - return context.to_dict() - - @staticmethod - def echo_three_times(context, value): - context.reply(value) - context.reply(value + 1) - context.reply(value + 2) - - @staticmethod - def echo_three_times_yield(context, value): - yield value - yield value + 1 - yield value + 2 - - @staticmethod - def fail(context, value): - """Raises an exception with the value sent in.""" - raise Exception(value) diff --git a/nova/tests/test_rpc_common.py b/nova/tests/test_rpc_common.py new file mode 100644 index 000000000..b922be1df --- /dev/null +++ b/nova/tests/test_rpc_common.py @@ -0,0 +1,188 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for remote procedure calls shared between all implementations +""" + +from nova import context +from nova import log as logging +from nova.rpc.common import RemoteError +from nova import test + + +LOG = logging.getLogger('nova.tests.rpc') + + +class _BaseRpcTestCase(test.TestCase): + def setUp(self): + super(_BaseRpcTestCase, self).setUp() + self.conn = self.rpc.create_connection(True) + self.receiver = TestReceiver() + self.conn.create_consumer('test', self.receiver, False) + self.conn.consume_in_thread() + self.context = context.get_admin_context() + + def tearDown(self): + self.conn.close() + super(_BaseRpcTestCase, self).tearDown() + + def test_call_succeed(self): + value = 42 + result = self.rpc.call(self.context, 'test', {"method": "echo", + "args": {"value": value}}) + self.assertEqual(value, result) + + def test_call_succeed_despite_multiple_returns(self): + value = 42 + result = self.rpc.call(self.context, 'test', {"method": "echo_three_times", + "args": {"value": value}}) + self.assertEqual(value + 2, result) + + def test_call_succeed_despite_multiple_returns_yield(self): + value = 42 + result = self.rpc.call(self.context, 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + self.assertEqual(value + 2, result) + + def test_multicall_succeed_once(self): + value = 42 + result = self.rpc.multicall(self.context, + 'test', + {"method": "echo", + "args": {"value": value}}) + for i, x in enumerate(result): + if i > 0: + self.fail('should only receive one response') + self.assertEqual(value + i, x) + + def test_multicall_succeed_three_times(self): + value = 42 + result = self.rpc.multicall(self.context, + 'test', + {"method": "echo_three_times", + "args": {"value": value}}) + for i, x in enumerate(result): + self.assertEqual(value + i, x) + + def test_multicall_succeed_three_times_yield(self): + value = 42 + result = self.rpc.multicall(self.context, + 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + for i, x in enumerate(result): + self.assertEqual(value + i, x) + + def test_context_passed(self): + """Makes sure a context is passed through rpc call.""" + value = 42 + result = self.rpc.call(self.context, + 'test', {"method": "context", + "args": {"value": value}}) + self.assertEqual(self.context.to_dict(), result) + + def test_call_exception(self): + """Test that exception gets passed back properly. + + rpc.call returns a RemoteError object. The value of the + exception is converted to a string, so we convert it back + to an int in the test. + + """ + value = 42 + self.assertRaises(RemoteError, + self.rpc.call, + self.context, + 'test', + {"method": "fail", + "args": {"value": value}}) + try: + self.rpc.call(self.context, + 'test', + {"method": "fail", + "args": {"value": value}}) + self.fail("should have thrown RemoteError") + except RemoteError as exc: + self.assertEqual(int(exc.value), value) + + def test_nested_calls(self): + """Test that we can do an rpc.call inside another call.""" + class Nested(object): + @staticmethod + def echo(context, queue, value): + """Calls echo in the passed queue""" + LOG.debug(_("Nested received %(queue)s, %(value)s") + % locals()) + # TODO: so, it will replay the context and use the same REQID? + # that's bizarre. + ret = self.rpc.call(context, + queue, + {"method": "echo", + "args": {"value": value}}) + LOG.debug(_("Nested return %s"), ret) + return value + + nested = Nested() + conn = self.rpc.create_connection(True) + conn.create_consumer('nested', nested, False) + conn.consume_in_thread() + value = 42 + result = self.rpc.call(self.context, + 'nested', {"method": "echo", + "args": {"queue": "test", + "value": value}}) + conn.close() + self.assertEqual(value, result) + + +class TestReceiver(object): + """Simple Proxy class so the consumer has methods to call. + + Uses static methods because we aren't actually storing any state. + + """ + + @staticmethod + def echo(context, value): + """Simply returns whatever value is sent in.""" + LOG.debug(_("Received %s"), value) + return value + + @staticmethod + def context(context, value): + """Returns dictionary version of context.""" + LOG.debug(_("Received %s"), context) + return context.to_dict() + + @staticmethod + def echo_three_times(context, value): + context.reply(value) + context.reply(value + 1) + context.reply(value + 2) + + @staticmethod + def echo_three_times_yield(context, value): + yield value + yield value + 1 + yield value + 2 + + @staticmethod + def fail(context, value): + """Raises an exception with the value sent in.""" + raise Exception(value) diff --git a/nova/tests/test_rpc_kombu.py b/nova/tests/test_rpc_kombu.py index 7db88ecd0..101ed14af 100644 --- a/nova/tests/test_rpc_kombu.py +++ b/nova/tests/test_rpc_kombu.py @@ -16,37 +16,33 @@ # License for the specific language governing permissions and limitations # under the License. """ -Unit Tests for remote procedure calls using queue +Unit Tests for remote procedure calls using kombu """ from nova import context from nova import log as logging -from nova.rpc import impl_kombu as rpc from nova import test +from nova.rpc import impl_kombu +from nova.tests import test_rpc_common LOG = logging.getLogger('nova.tests.rpc') -class RpcKombuTestCase(test.TestCase): +class RpcKombuTestCase(test_rpc_common._BaseRpcTestCase): def setUp(self): + self.rpc = impl_kombu super(RpcKombuTestCase, self).setUp() - self.conn = rpc.create_connection() - self.receiver = TestReceiver() - self.conn.create_consumer('test', self.receiver, False) - self.conn.consume_in_thread() - self.context = context.get_admin_context() def tearDown(self): - self.conn.close() super(RpcKombuTestCase, self).tearDown() def test_reusing_connection(self): """Test that reusing a connection returns same one.""" - conn_context = rpc.create_connection(new=False) + conn_context = self.rpc.create_connection(new=False) conn1 = conn_context.connection conn_context.close() - conn_context = rpc.create_connection(new=False) + conn_context = self.rpc.create_connection(new=False) conn2 = conn_context.connection conn_context.close() self.assertEqual(conn1, conn2) @@ -54,7 +50,7 @@ class RpcKombuTestCase(test.TestCase): def test_topic_send_receive(self): """Test sending to a topic exchange/queue""" - conn = rpc.create_connection() + conn = self.rpc.create_connection() message = 'topic test message' self.received_message = None @@ -71,7 +67,7 @@ class RpcKombuTestCase(test.TestCase): def test_direct_send_receive(self): """Test sending to a direct exchange/queue""" - conn = rpc.create_connection() + conn = self.rpc.create_connection() message = 'direct test message' self.received_message = None @@ -91,8 +87,8 @@ class RpcKombuTestCase(test.TestCase): def test_fanout_send_receive(self): """Test sending to a fanout exchange and consuming from 2 queues""" - conn = rpc.create_connection() - conn2 = rpc.create_connection() + conn = self.rpc.create_connection() + conn2 = self.rpc.create_connection() message = 'fanout test message' self.received_message = None @@ -112,149 +108,3 @@ class RpcKombuTestCase(test.TestCase): conn2.consume(limit=1) conn2.close() self.assertEqual(self.received_message, message) - - def test_call_succeed(self): - value = 42 - result = rpc.call(self.context, 'test', {"method": "echo", - "args": {"value": value}}) - self.assertEqual(value, result) - - def test_call_succeed_despite_multiple_returns(self): - value = 42 - result = rpc.call(self.context, 'test', {"method": "echo_three_times", - "args": {"value": value}}) - self.assertEqual(value + 2, result) - - def test_call_succeed_despite_multiple_returns_yield(self): - value = 42 - result = rpc.call(self.context, 'test', - {"method": "echo_three_times_yield", - "args": {"value": value}}) - self.assertEqual(value + 2, result) - - def test_multicall_succeed_once(self): - value = 42 - result = rpc.multicall(self.context, - 'test', - {"method": "echo", - "args": {"value": value}}) - for i, x in enumerate(result): - if i > 0: - self.fail('should only receive one response') - self.assertEqual(value + i, x) - - def test_multicall_succeed_three_times(self): - value = 42 - result = rpc.multicall(self.context, - 'test', - {"method": "echo_three_times", - "args": {"value": value}}) - for i, x in enumerate(result): - self.assertEqual(value + i, x) - - def test_multicall_succeed_three_times_yield(self): - value = 42 - result = rpc.multicall(self.context, - 'test', - {"method": "echo_three_times_yield", - "args": {"value": value}}) - for i, x in enumerate(result): - self.assertEqual(value + i, x) - - def test_context_passed(self): - """Makes sure a context is passed through rpc call.""" - value = 42 - result = rpc.call(self.context, - 'test', {"method": "context", - "args": {"value": value}}) - self.assertEqual(self.context.to_dict(), result) - - def test_call_exception(self): - """Test that exception gets passed back properly. - - rpc.call returns a RemoteError object. The value of the - exception is converted to a string, so we convert it back - to an int in the test. - - """ - value = 42 - self.assertRaises(rpc.RemoteError, - rpc.call, - self.context, - 'test', - {"method": "fail", - "args": {"value": value}}) - try: - rpc.call(self.context, - 'test', - {"method": "fail", - "args": {"value": value}}) - self.fail("should have thrown rpc.RemoteError") - except rpc.RemoteError as exc: - self.assertEqual(int(exc.value), value) - - def test_nested_calls(self): - """Test that we can do an rpc.call inside another call.""" - class Nested(object): - @staticmethod - def echo(context, queue, value): - """Calls echo in the passed queue""" - LOG.debug(_("Nested received %(queue)s, %(value)s") - % locals()) - # TODO: so, it will replay the context and use the same REQID? - # that's bizarre. - ret = rpc.call(context, - queue, - {"method": "echo", - "args": {"value": value}}) - LOG.debug(_("Nested return %s"), ret) - return value - - nested = Nested() - conn = rpc.create_connection(True) - conn.create_consumer('nested', nested, False) - conn.consume_in_thread() - value = 42 - result = rpc.call(self.context, - 'nested', {"method": "echo", - "args": {"queue": "test", - "value": value}}) - conn.close() - self.assertEqual(value, result) - - -class TestReceiver(object): - """Simple Proxy class so the consumer has methods to call. - - Uses static methods because we aren't actually storing any state. - - """ - - @staticmethod - def echo(context, value): - """Simply returns whatever value is sent in.""" - LOG.debug(_("Received %s"), value) - return value - - @staticmethod - def context(context, value): - """Returns dictionary version of context.""" - LOG.debug(_("Received %s"), context) - return context.to_dict() - - @staticmethod - def echo_three_times(context, value): - context.reply(value) - context.reply(value + 1) - context.reply(value + 2) - - @staticmethod - def echo_three_times_yield(context, value): - yield value - yield value + 1 - yield value + 2 - - @staticmethod - def fail(context, value): - """Raises an exception with the value sent in.""" - raise Exception(value) From f4d58a968b4f3c504d73d68e245f77043ff37dc7 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 29 Aug 2011 15:08:32 -0700 Subject: [PATCH 32/46] doc string cleanup --- nova/rpc/impl_kombu.py | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index 83ee1b122..ffd6447da 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -290,7 +290,7 @@ class FanoutPublisher(Publisher): class Connection(object): - """Connection instance object.""" + """Connection object.""" def __init__(self): self.consumers = [] @@ -503,7 +503,18 @@ ConnectionPool = Pool( class ConnectionContext(object): + """The class that is actually returned to the caller of + create_connection(). This is a essentially a wrapper around + Connection that supports 'with' and can return a new Connection or + one from a pool. It will also catch when an instance of this class + is to be deleted so that we can return Connections to the pool on + exceptions and so forth without making the caller be responsible for + catching all exceptions and making sure to return a connection to + the pool. + """ + def __init__(self, pooled=True): + """Create a new connection, or get one from the pool""" self.connection = None if pooled: self.connection = ConnectionPool.get() @@ -512,9 +523,13 @@ class ConnectionContext(object): self.pooled = pooled def __enter__(self): + """with ConnectionContext() should return self""" return self def _done(self): + """If the connection came from a pool, clean it up and put it back. + If it did not come from a pool, close it. + """ if self.connection: if self.pooled: # Reset the connection so it's ready for the next caller @@ -533,19 +548,19 @@ class ConnectionContext(object): self.connection = None def __exit__(self, t, v, tb): - """end if 'with' statement. We're done here.""" + """end of 'with' statement. We're done here.""" self._done() def __del__(self): - """Put Connection back into the pool if this ConnectionContext - is being deleted - """ + """Caller is done with this connection. Make sure we cleaned up.""" self._done() def close(self): + """Caller is done with this connection.""" self._done() def __getattr__(self, key): + """Proxy all other calls to the Connection instance""" if self.connection: return getattr(self.connection, key) else: @@ -637,6 +652,7 @@ def _pack_context(msg, context): class RpcContext(context.RequestContext): + """Context that supports replying to a rpc.call""" def __init__(self, *args, **kwargs): msg_id = kwargs.pop('msg_id', None) self.msg_id = msg_id @@ -656,7 +672,7 @@ class MulticallWaiter(object): def done(self): self._done = True - self._connection = None + self._connection.close() def __call__(self, data): """The consume() callback will call this. Store the result.""" @@ -666,6 +682,7 @@ class MulticallWaiter(object): self._result = data['result'] def __iter__(self): + """Return a result until we get a 'None' response from consumer""" if self._done: raise StopIteration while True: From 38b42096e987c037e126183d018e7e1fa09cd93c Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 29 Aug 2011 15:15:58 -0700 Subject: [PATCH 33/46] fix ajax console proxy for new create_consumer method --- bin/nova-ajax-console-proxy | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/bin/nova-ajax-console-proxy b/bin/nova-ajax-console-proxy index b3205ec56..23fb42fb5 100755 --- a/bin/nova-ajax-console-proxy +++ b/bin/nova-ajax-console-proxy @@ -114,8 +114,7 @@ class AjaxConsoleProxy(object): {'args': kwargs, 'last_activity': time.time()} self.conn = rpc.create_connection(new=True) - rpc.create_consumer( - self.conn, + self.conn.create_consumer( FLAGS.ajax_console_proxy_topic, TopicProxy) From 27eb5005006a22040e7d04321575ba7a7570d6d1 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 29 Aug 2011 15:25:54 -0700 Subject: [PATCH 34/46] pep8 fix for test_rpc_common.py --- nova/tests/test_rpc_common.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/tests/test_rpc_common.py b/nova/tests/test_rpc_common.py index b922be1df..4ab4e8a0e 100644 --- a/nova/tests/test_rpc_common.py +++ b/nova/tests/test_rpc_common.py @@ -49,8 +49,9 @@ class _BaseRpcTestCase(test.TestCase): def test_call_succeed_despite_multiple_returns(self): value = 42 - result = self.rpc.call(self.context, 'test', {"method": "echo_three_times", - "args": {"value": value}}) + result = self.rpc.call(self.context, 'test', + {"method": "echo_three_times", + "args": {"value": value}}) self.assertEqual(value + 2, result) def test_call_succeed_despite_multiple_returns_yield(self): From 80e1f8d2f31ca353a32a6561d72bad90d4bba82e Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 29 Aug 2011 15:26:26 -0700 Subject: [PATCH 35/46] remove unneeded connection= in carrot Consumer init --- nova/rpc/impl_carrot.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/rpc/impl_carrot.py b/nova/rpc/impl_carrot.py index 6d504aaec..1d23c1853 100644 --- a/nova/rpc/impl_carrot.py +++ b/nova/rpc/impl_carrot.py @@ -171,7 +171,6 @@ class Consumer(messaging.Consumer): """ def __init__(self, *args, **kwargs): - connection = kwargs.get('connection') max_retries = FLAGS.rabbit_max_retries sleep_time = FLAGS.rabbit_retry_interval tries = 0 From 3f575dfd97fdf5d9de4d239983717ac628f7f3f6 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Tue, 30 Aug 2011 00:03:39 -0700 Subject: [PATCH 36/46] disassociate floating ips before re-associating, and prevent re-association of already associated floating ips in manager --- nova/exception.py | 4 ++++ nova/tests/test_network.py | 16 ++++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/nova/exception.py b/nova/exception.py index 32981f4d5..b54981963 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -533,6 +533,10 @@ class NoMoreFloatingIps(FloatingIpNotFound): message = _("Zero floating ips available.") +class FloatingIpAlreadyInUse(NovaException): + message = _("Floating ip %(address) already in use by %(fixed_ip).") + + class NoFloatingIpsDefined(NotFound): message = _("Zero floating ips exist.") diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index 0b8539442..25ff940f0 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -371,6 +371,22 @@ class VlanNetworkTestCase(test.TestCase): self.mox.ReplayAll() self.network.validate_networks(self.context, requested_networks) + def test_cant_associate_associated_floating_ip(self): + ctxt = context.RequestContext('testuser', 'testproject', + is_admin=False) + + def fake_floating_ip_get_by_address(context, address): + return {'address': '10.10.10.10', + 'fixed_ip': {'address': '10.0.0.1'}} + self.stubs.Set(self.network.db, 'floating_ip_get_by_address', + fake_floating_ip_get_by_address) + + self.assertRaises(exception.FloatingIpAlreadyInUse, + self.network.associate_floating_ip, + ctxt, + mox.IgnoreArg(), + mox.IgnoreArg()) + class CommonNetworkTestCase(test.TestCase): From 6032af06b42a499e6f1d6ab99a4726851de0231e Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 30 Aug 2011 12:01:18 -0700 Subject: [PATCH 37/46] make two functions instead of fast flag and add compute api commands instead of hitting db directly --- bin/instance-usage-audit | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/bin/instance-usage-audit b/bin/instance-usage-audit index a06c6b1b3..7ce5732e7 100755 --- a/bin/instance-usage-audit +++ b/bin/instance-usage-audit @@ -102,9 +102,8 @@ if __name__ == '__main__': logging.setup() begin, end = time_period(FLAGS.instance_usage_audit_period) print "Creating usages for %s until %s" % (str(begin), str(end)) - instances = db.instance_get_active_by_window(context.get_admin_context(), - begin, - end) + ctxt = context.get_admin_context() + instances = db.instance_get_active_by_window_joined(ctxt, begin, end) print "%s instances" % len(instances) for instance_ref in instances: usage_info = utils.usage_from_instance(instance_ref, From bad25cb88c4160122301c5bd8e7c0963f7340090 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 30 Aug 2011 13:53:01 -0700 Subject: [PATCH 39/46] restore old way FLAGS.rpc_backend worked.. no short name support for consistency --- nova/rpc/__init__.py | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py index fe50fb476..4f57a345d 100644 --- a/nova/rpc/__init__.py +++ b/nova/rpc/__init__.py @@ -23,18 +23,10 @@ from nova import flags FLAGS = flags.FLAGS flags.DEFINE_string('rpc_backend', - 'kombu', + 'nova.rpc.impl_kombu', "The messaging module to use, defaults to kombu.") -impl_table = {'kombu': 'nova.rpc.impl_kombu', - 'amqp': 'nova.rpc.impl_kombu', - 'carrot': 'nova.rpc.impl_carrot'} - - -# rpc_backend can be a short name like 'kombu', or it can be the full -# module name -RPCIMPL = import_object(impl_table.get(FLAGS.rpc_backend, - FLAGS.rpc_backend)) +RPCIMPL = import_object(FLAGS.rpc_backend) def create_connection(new=True): From 80ad08bfbf1e57fe59a3d55007f5441f7dea4c25 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 30 Aug 2011 15:12:43 -0700 Subject: [PATCH 40/46] make default carrot again and delay the import in rpc/__init__.py --- nova/rpc/__init__.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py index 4f57a345d..32509fff6 100644 --- a/nova/rpc/__init__.py +++ b/nova/rpc/__init__.py @@ -23,27 +23,33 @@ from nova import flags FLAGS = flags.FLAGS flags.DEFINE_string('rpc_backend', - 'nova.rpc.impl_kombu', - "The messaging module to use, defaults to kombu.") + 'nova.rpc.impl_carrot', + "The messaging module to use, defaults to carrot.") -RPCIMPL = import_object(FLAGS.rpc_backend) +_RPCIMPL = None + +def get_impl(): + global _RPCIMPL + if _RPCIMPL is None: + _RPCIMPL = import_object(FLAGS.rpc_backend) + return _RPCIMPL def create_connection(new=True): - return RPCIMPL.create_connection(new=new) + return get_impl().create_connection(new=new) def call(context, topic, msg): - return RPCIMPL.call(context, topic, msg) + return get_impl().call(context, topic, msg) def cast(context, topic, msg): - return RPCIMPL.cast(context, topic, msg) + return get_impl().cast(context, topic, msg) def fanout_cast(context, topic, msg): - return RPCIMPL.fanout_cast(context, topic, msg) + return get_impl().fanout_cast(context, topic, msg) def multicall(context, topic, msg): - return RPCIMPL.multicall(context, topic, msg) + return get_impl().multicall(context, topic, msg) From da65db6f3981a78bb5cf264c5bad4f7d6f4377a5 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 30 Aug 2011 15:30:16 -0700 Subject: [PATCH 41/46] pep8 fix --- nova/rpc/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py index 32509fff6..e29cd80e1 100644 --- a/nova/rpc/__init__.py +++ b/nova/rpc/__init__.py @@ -28,7 +28,9 @@ flags.DEFINE_string('rpc_backend', _RPCIMPL = None + def get_impl(): + """Delay import of rpc_backend until FLAGS are loaded.""" global _RPCIMPL if _RPCIMPL is None: _RPCIMPL = import_object(FLAGS.rpc_backend) From 1d14c3f26250e755a3086b0139bed6d7abbb9545 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 30 Aug 2011 15:52:25 -0700 Subject: [PATCH 42/46] logging change when rpc pool creates new connection --- nova/rpc/impl_carrot.py | 2 +- nova/rpc/impl_kombu.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/rpc/impl_carrot.py b/nova/rpc/impl_carrot.py index 1d23c1853..303a4ff88 100644 --- a/nova/rpc/impl_carrot.py +++ b/nova/rpc/impl_carrot.py @@ -152,7 +152,7 @@ class Pool(pools.Pool): # TODO(comstud): Timeout connections not used in a while def create(self): - LOG.debug('Creating new connection') + LOG.debug('Pool creating new connection') return Connection.instance(new=True) # Create a ConnectionPool to use for RPC calls. We'll order the diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index ffd6447da..8242bd177 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -491,7 +491,7 @@ class Pool(pools.Pool): # TODO(comstud): Timeout connections not used in a while def create(self): - LOG.debug('Creating new connection') + LOG.debug('Pool creating new connection') return Connection() # Create a ConnectionPool to use for RPC calls. We'll order the From c239d18e36fe204bfc343f7051eb779657fed3c1 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Tue, 30 Aug 2011 20:45:51 -0700 Subject: [PATCH 43/46] fix FloatingIpAlreadyInUse to use correct string pattern, convert ApiErrors to 400 responses --- nova/exception.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/exception.py b/nova/exception.py index b54981963..caa65146d 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -534,7 +534,7 @@ class NoMoreFloatingIps(FloatingIpNotFound): class FloatingIpAlreadyInUse(NovaException): - message = _("Floating ip %(address) already in use by %(fixed_ip).") + message = _("Floating ip %(address)s already in use by %(fixed_ip)s.") class NoFloatingIpsDefined(NotFound): From da534db8ab0391a8e32c489b6b9d822aa1918ebb Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 30 Aug 2011 20:53:27 -0700 Subject: [PATCH 44/46] use kombu.connection.BrokerConnection vs kombu.connection.Connection so that older versions of kombu (1.0.4) work as well as newer. --- nova/rpc/impl_kombu.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index 8242bd177..ab70e7cfb 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -322,7 +322,7 @@ class Connection(object): except self.connection.connection_errors: pass time.sleep(1) - self.connection = kombu.connection.Connection(**self.params) + self.connection = kombu.connection.BrokerConnection(**self.params) if FLAGS.fake_rabbit: # Kludge to speed up tests. self.connection.transport.polling_interval = 0.0 From 76e9ba907b3bb915511e8e405d4f4116199588da Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 30 Aug 2011 21:05:43 -0700 Subject: [PATCH 45/46] switched default to kombu per vishy --- nova/rpc/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py index e29cd80e1..c0cfdd5ce 100644 --- a/nova/rpc/__init__.py +++ b/nova/rpc/__init__.py @@ -23,8 +23,8 @@ from nova import flags FLAGS = flags.FLAGS flags.DEFINE_string('rpc_backend', - 'nova.rpc.impl_carrot', - "The messaging module to use, defaults to carrot.") + 'nova.rpc.impl_kombu', + "The messaging module to use, defaults to kombu.") _RPCIMPL = None From eb96014790251553613511b06685d35ac641fe37 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 31 Aug 2011 11:54:19 -0700 Subject: [PATCH 46/46] kludge for kombu 1.1.3 memory transport bug --- nova/rpc/impl_kombu.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index ab70e7cfb..b994a6a10 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -303,6 +303,7 @@ class Connection(object): self.interval_stepping = FLAGS.rabbit_retry_backoff # max retry-interval = 30 seconds self.interval_max = 30 + self.memory_transport = False self.params = dict(hostname=FLAGS.rabbit_host, port=FLAGS.rabbit_port, @@ -311,6 +312,9 @@ class Connection(object): virtual_host=FLAGS.rabbit_virtual_host) if FLAGS.fake_rabbit: self.params['transport'] = 'memory' + self.memory_transport = True + else: + self.memory_transport = False self.connection = None self.reconnect() @@ -323,7 +327,7 @@ class Connection(object): pass time.sleep(1) self.connection = kombu.connection.BrokerConnection(**self.params) - if FLAGS.fake_rabbit: + if self.memory_transport: # Kludge to speed up tests. self.connection.transport.polling_interval = 0.0 self.consumer_num = itertools.count(1) @@ -345,6 +349,9 @@ class Connection(object): LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d' % self.params)) self.channel = self.connection.channel() + # work around 'memory' transport bug in 1.1.3 + if self.memory_transport: + self.channel._new_queue('ae.undeliver') for consumer in self.consumers: consumer.reconnect(self.channel) if self.consumers: @@ -374,6 +381,9 @@ class Connection(object): self.cancel_consumer_thread() self.channel.close() self.channel = self.connection.channel() + # work around 'memory' transport bug in 1.1.3 + if self.memory_transport: + self.channel._new_queue('ae.undeliver') self.consumers = [] def declare_consumer(self, consumer_cls, topic, callback):