From 91458f29f8b04407471d9d94a5ad165817f72a43 Mon Sep 17 00:00:00 2001 From: Muneyuki Noguchi Date: Fri, 25 Mar 2011 11:47:17 +0900 Subject: [PATCH 01/66] Restore volume state on migration failure. --- nova/compute/manager.py | 19 +++++++++++++++++-- nova/virt/libvirt_conn.py | 2 +- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 468771f46e19..356d20285e5f 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1003,7 +1003,7 @@ class ComputeManager(manager.SchedulerDependentManager): "Domain not found: no domain with matching name.\" " "This error can be safely ignored.")) - def recover_live_migration(self, ctxt, instance_ref, host=None): + def recover_live_migration(self, ctxt, instance_ref, host=None, dest=None): """Recovers Instance/volume state from migrating -> running. :param ctxt: security context @@ -1011,6 +1011,7 @@ class ComputeManager(manager.SchedulerDependentManager): :param host: DB column value is updated by this hostname. if none, the host instance currently running is selected. + :param dest: destination host """ @@ -1024,7 +1025,21 @@ class ComputeManager(manager.SchedulerDependentManager): 'host': host}) for volume in instance_ref['volumes']: - self.db.volume_update(ctxt, volume['id'], {'status': 'in-use'}) + volume_id = volume['id'] + self.db.volume_update(ctxt, volume_id, {'status': 'in-use'}) + if dest: + topic = self.db.queue_get_for(ctxt, FLAGS.compute_topic, dest) + rpc.call(ctxt, topic, + {"method": "restore_volume_state", + "args": {'volume_id': volume_id}}) + + def restore_volume_state(self, context, volume_id): + """Restore volume state on migration failure. + + :param context: security context + :param volume_id: nova.db.sqlalchemy.models.Volume.id + """ + self.volume_manager.remove_compute_volume(context, volume_id) def periodic_tasks(self, context=None): """Tasks to be run at a periodic interval.""" diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 2cecb010d9fc..5c6baa36eef0 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -1370,7 +1370,7 @@ class LibvirtConnection(driver.ComputeDriver): FLAGS.live_migration_bandwidth) except Exception: - recover_method(ctxt, instance_ref) + recover_method(ctxt, instance_ref, None, dest) raise # Waiting for completion of live_migration. From 36e1510e4ad4a83bb062cdd82dc91d4ea15d1c5e Mon Sep 17 00:00:00 2001 From: termie Date: Mon, 28 Mar 2011 10:46:02 -0700 Subject: [PATCH 02/66] HACKING update for docstrings --- HACKING | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/HACKING b/HACKING index e58d60e582a4..95afc0f74f0f 100644 --- a/HACKING +++ b/HACKING @@ -50,17 +50,22 @@ Human Alphabetical Order Examples Docstrings ---------- - """Summary of the function, class or method, less than 80 characters. + """A one line docstring looks like this and ends in a period.""" - New paragraph after newline that explains in more detail any general - information about the function, class or method. After this, if defining - parameters and return types use the Sphinx format. After that an extra - newline then close the quotations. + + """A multiline docstring has a one-line summary, less than 80 characters. + + Then a new paragraph after a newline that explains in more detail any + general information about the function, class or method. Example usages + are also great to have here if it is a complex class for function. When writing the docstring for a class, an extra line should be placed after the closing quotations. For more in-depth explanations for these decisions see http://www.python.org/dev/peps/pep-0257/ + If you are going to describe parameters and return values, use Sphinx, the + appropriate syntax is as follows. + :param foo: the foo parameter :param bar: the bar parameter :returns: description of the return value From e5b0f3921331b5c0acbe321b00e2a9fa8d27be4e Mon Sep 17 00:00:00 2001 From: Muneyuki Noguchi Date: Tue, 29 Mar 2011 10:40:48 +0900 Subject: [PATCH 03/66] Add remove_volume to compute API. --- nova/compute/api.py | 7 +++++++ nova/compute/manager.py | 26 ++++++++++++++------------ 2 files changed, 21 insertions(+), 12 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 266cbe677612..0cdb8f4b7474 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -668,6 +668,13 @@ class API(base.Base): "volume_id": volume_id}}) return instance + def remove_volume(self, context, volume_id, host): + """Remove volume on specified compute host.""" + rpc.call(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "remove_volume", + "args": {'volume_id': volume_id}}) + def associate_floating_ip(self, context, instance_id, address): instance = self.get(context, instance_id) self.network_api.associate_floating_ip(context, diff --git a/nova/compute/manager.py b/nova/compute/manager.py index aa612b1376de..bbd1fed1a8ab 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -46,6 +46,7 @@ import functools from eventlet import greenthread +from nova import compute from nova import exception from nova import flags from nova import log as logging @@ -772,6 +773,14 @@ class ComputeManager(manager.SchedulerDependentManager): self.db.volume_detached(context, volume_id) return True + def remove_volume(self, context, volume_id): + """Remove volume on compute host. + + :param context: security context + :param volume_id: nova.db.sqlalchemy.models.Volume.id + """ + self.volume_manager.remove_compute_volume(context, volume_id) + @exception.wrap_exception def compare_cpu(self, context, cpu_info): """Checks the host cpu is compatible to a cpu given by xml. @@ -1018,22 +1027,15 @@ class ComputeManager(manager.SchedulerDependentManager): 'state': power_state.RUNNING, 'host': host}) + if dest: + # NOTE(noguchimn): We set image_service here + # not to import an image service object. + compute_api = compute.API(image_service=1) for volume in instance_ref['volumes']: volume_id = volume['id'] self.db.volume_update(ctxt, volume_id, {'status': 'in-use'}) if dest: - topic = self.db.queue_get_for(ctxt, FLAGS.compute_topic, dest) - rpc.call(ctxt, topic, - {"method": "restore_volume_state", - "args": {'volume_id': volume_id}}) - - def restore_volume_state(self, context, volume_id): - """Restore volume state on migration failure. - - :param context: security context - :param volume_id: nova.db.sqlalchemy.models.Volume.id - """ - self.volume_manager.remove_compute_volume(context, volume_id) + compute_api.remove_volume(ctxt, volume_id, dest) def periodic_tasks(self, context=None): """Tasks to be run at a periodic interval.""" From d7798b3b383b32576f79e281a220266b65702b1e Mon Sep 17 00:00:00 2001 From: termie Date: Tue, 29 Mar 2011 11:15:16 -0700 Subject: [PATCH 04/66] accidentally dropped a sentence --- HACKING | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/HACKING b/HACKING index 95afc0f74f0f..2f364c894688 100644 --- a/HACKING +++ b/HACKING @@ -57,7 +57,9 @@ Docstrings Then a new paragraph after a newline that explains in more detail any general information about the function, class or method. Example usages - are also great to have here if it is a complex class for function. + are also great to have here if it is a complex class for function. After + you have finished your descriptions add an extra newline and close the + quotations. When writing the docstring for a class, an extra line should be placed after the closing quotations. For more in-depth explanations for these From b5f0d2c22ce48d06dc502f7790ed48fdf007d7f6 Mon Sep 17 00:00:00 2001 From: Muneyuki Noguchi Date: Thu, 31 Mar 2011 17:39:00 +0900 Subject: [PATCH 05/66] Add volume.API.remove_from_compute instead of compute.API.remove_volume. --- nova/compute/api.py | 7 ------- nova/compute/manager.py | 12 +++++------- nova/volume/api.py | 7 +++++++ 3 files changed, 12 insertions(+), 14 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 63983afd8993..1dbd73f8fd1c 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -687,13 +687,6 @@ class API(base.Base): "volume_id": volume_id}}) return instance - def remove_volume(self, context, volume_id, host): - """Remove volume on specified compute host.""" - rpc.call(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "remove_volume", - "args": {'volume_id': volume_id}}) - def associate_floating_ip(self, context, instance_id, address): instance = self.get(context, instance_id) self.network_api.associate_floating_ip(context, diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 8ce8a5d8647e..85bcd7590af2 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -46,13 +46,13 @@ import functools from eventlet import greenthread -from nova import compute from nova import exception from nova import flags from nova import log as logging from nova import manager from nova import rpc from nova import utils +from nova import volume from nova.compute import power_state from nova.virt import driver @@ -1037,14 +1037,12 @@ class ComputeManager(manager.SchedulerDependentManager): 'host': host}) if dest: - # NOTE(noguchimn): We set image_service here - # not to import an image service object. - compute_api = compute.API(image_service=1) - for volume in instance_ref['volumes']: - volume_id = volume['id'] + volume_api = volume.API() + for volume_ref in instance_ref['volumes']: + volume_id = volume_ref['id'] self.db.volume_update(ctxt, volume_id, {'status': 'in-use'}) if dest: - compute_api.remove_volume(ctxt, volume_id, dest) + volume_api.remove_from_compute(ctxt, volume_id, dest) def periodic_tasks(self, context=None): """Tasks to be run at a periodic interval.""" diff --git a/nova/volume/api.py b/nova/volume/api.py index 4b4bb9dc5ed9..09befb647fd5 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -103,3 +103,10 @@ class API(base.Base): # TODO(vish): abstract status checking? if volume['status'] == "available": raise exception.ApiError(_("Volume is already detached")) + + def remove_from_compute(self, context, volume_id, host): + """Remove volume from specified compute host.""" + rpc.call(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "remove_volume", + "args": {'volume_id': volume_id}}) From c1120caaa8c8ed8902b5634da56b2bd5478662e1 Mon Sep 17 00:00:00 2001 From: Muneyuki Noguchi Date: Mon, 4 Apr 2011 10:25:58 +0900 Subject: [PATCH 06/66] Use keyword arguments. --- nova/virt/libvirt_conn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index c5a71d244b0d..c03c2ae1dcce 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -1475,7 +1475,7 @@ class LibvirtConnection(driver.ComputeDriver): FLAGS.live_migration_bandwidth) except Exception: - recover_method(ctxt, instance_ref, None, dest) + recover_method(ctxt, instance_ref, dest=dest) raise # Waiting for completion of live_migration. From d7053efa810aa3d20ef7cd089429c6d96f451a7d Mon Sep 17 00:00:00 2001 From: Ilya Alekseyev Date: Mon, 4 Apr 2011 21:05:38 +0400 Subject: [PATCH 07/66] Fixed network_info creating. --- nova/tests/test_virt.py | 6 ++++-- nova/virt/libvirt_conn.py | 5 +++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 958c8e3e2299..5a010d347a44 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -617,7 +617,8 @@ class IptablesFirewallTestCase(test.TestCase): instance_ref = db.instance_create(self.context, {'user_id': 'fake', 'project_id': 'fake', - 'mac_address': '56:12:12:12:12:12'}) + 'mac_address': '56:12:12:12:12:12', + 'instance_type': 'm1.small'}) ip = '10.11.12.13' network_ref = db.project_get_network(self.context, @@ -840,7 +841,8 @@ class NWFilterTestCase(test.TestCase): instance_ref = db.instance_create(self.context, {'user_id': 'fake', 'project_id': 'fake', - 'mac_address': '00:A0:C9:14:C8:29'}) + 'mac_address': '00:A0:C9:14:C8:29', + 'instance_type': 'm1.small'}) inst_id = instance_ref['id'] ip = '10.11.12.13' diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index f34ea72255db..93a2505021b0 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -167,6 +167,9 @@ def _get_network_info(instance): networks = db.network_get_all_by_instance(admin_context, instance['id']) + + flavor = db.instance_type_get_by_name(admin_context, + instance['instance_type']) network_info = [] def ip_dict(ip): @@ -191,7 +194,9 @@ def _get_network_info(instance): mapping = { 'label': network['label'], 'gateway': network['gateway'], + 'broadcast': network['broadcast'], 'mac': instance.mac_address, + 'rxtx_cap': flavor['rxtx_cap'], 'dns': [network['dns']], 'ips': [ip_dict(ip) for ip in network_ips]} From 519c885a4e3622417cd78655a211a2f23033d610 Mon Sep 17 00:00:00 2001 From: Ilya Alekseyev Date: Tue, 5 Apr 2011 19:42:09 +0400 Subject: [PATCH 08/66] pep8 fixed --- nova/virt/libvirt_conn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 93a2505021b0..1eec55e5fe0e 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -167,7 +167,7 @@ def _get_network_info(instance): networks = db.network_get_all_by_instance(admin_context, instance['id']) - + flavor = db.instance_type_get_by_name(admin_context, instance['instance_type']) network_info = [] From 28568e51ab4eb84e66e7d15adc9648220684ab84 Mon Sep 17 00:00:00 2001 From: Ilya Alekseyev Date: Tue, 5 Apr 2011 20:40:52 +0400 Subject: [PATCH 09/66] removed blank lines for pep8 fix --- nova/virt/libvirt_conn.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 1eec55e5fe0e..d6f51a6446b3 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -164,10 +164,8 @@ def _get_network_info(instance): ip_addresses = db.fixed_ip_get_all_by_instance(admin_context, instance['id']) - networks = db.network_get_all_by_instance(admin_context, instance['id']) - flavor = db.instance_type_get_by_name(admin_context, instance['instance_type']) network_info = [] From 14833117f19a3a1789c99a519b12bf9c61faec07 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Wed, 6 Apr 2011 18:17:43 -0400 Subject: [PATCH 10/66] Added an option to run_tests.sh so you can run just pep8. So now you can: ./run_tests.sh --just-pep8 or ./run_tests.sh -p --- run_tests.sh | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/run_tests.sh b/run_tests.sh index 8f4d37cd4634..9773071c7efd 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -7,6 +7,7 @@ function usage { echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added." + echo " -p, --just-pep8 Just run pep8" echo " -h, --help Print this usage message" echo "" echo "Note: with no options specified, the script will try to run the tests in a virtual environment," @@ -21,6 +22,7 @@ function process_option { -V|--virtual-env) let always_venv=1; let never_venv=0;; -N|--no-virtual-env) let always_venv=0; let never_venv=1;; -f|--force) let force=1;; + -p|--just-pep8) let just_pep8=1;; *) noseargs="$noseargs $1" esac } @@ -32,6 +34,7 @@ never_venv=0 force=0 noseargs= wrapper="" +just_pep8=0 for arg in "$@"; do process_option $arg @@ -53,6 +56,18 @@ function run_tests { return $RESULT } +function run_pep8 { + echo "Running pep8 ..." + srcfiles=`find bin -type f ! -name "nova.conf*"` + srcfiles+=" nova setup.py plugins/xenserver/xenapi/etc/xapi.d/plugins/glance" + pep8 --repeat --show-pep8 --show-source --exclude=vcsversion.py ${srcfiles} +} + +if [ $just_pep8 -eq 1 ]; then + run_pep8 + exit +fi + NOSETESTS="python run_tests.py $noseargs" if [ $never_venv -eq 0 ] @@ -81,11 +96,9 @@ then fi fi -if [ -z "$noseargs" ]; -then - srcfiles=`find bin -type f ! -name "nova.conf*"` - srcfiles+=" nova setup.py plugins/xenserver/xenapi/etc/xapi.d/plugins/glance" - run_tests && pep8 --repeat --show-pep8 --show-source --exclude=vcsversion.py ${srcfiles} || exit 1 -else - run_tests +run_tests + +# Also run pep8 if no options were provided. +if [ -z "$noseargs" ]; then + run_pep8 fi From 1ee150c449e630c6409798399eccb577c8273c70 Mon Sep 17 00:00:00 2001 From: Muneyuki Noguchi Date: Thu, 7 Apr 2011 12:35:45 +0900 Subject: [PATCH 11/66] Make description of volume_id more generic. --- nova/compute/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 85bcd7590af2..383eda0dc641 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -786,7 +786,7 @@ class ComputeManager(manager.SchedulerDependentManager): """Remove volume on compute host. :param context: security context - :param volume_id: nova.db.sqlalchemy.models.Volume.id + :param volume_id: volume ID """ self.volume_manager.remove_compute_volume(context, volume_id) From 9ce66a4a09094d2b0403deea77416149aa789f3c Mon Sep 17 00:00:00 2001 From: Ilya Alekseyev Date: Mon, 11 Apr 2011 22:35:09 +0400 Subject: [PATCH 12/66] Floating ips auto assignment --- nova/compute/manager.py | 21 +++++++++++++++++++++ nova/db/api.py | 3 +++ nova/db/sqlalchemy/api.py | 12 ++++++++++++ 3 files changed, 36 insertions(+) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 68b163355310..86273b6b4530 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -73,6 +73,8 @@ flags.DEFINE_integer('live_migration_retry_count', 30, flags.DEFINE_integer("rescue_timeout", 0, "Automatically unrescue an instance after N seconds." " Set to 0 to disable.") +flags.DEFINE_bool('auto_assign_floating_ip',False, 'Autoassigning floating' + ' ip to VM') LOG = logging.getLogger('nova.compute.manager') @@ -224,6 +226,16 @@ class ComputeManager(manager.SchedulerDependentManager): self.network_manager.setup_compute_network(context, instance_id) + if FLAGS.auto_assign_floating_ip: + public_ip = rpc.call(context, + FLAGS.network_topic, + {"method": "allocate_floating_ip", + "args": {"project_id": context.project_id}}) + self.network_manager.associate_floating_ip(context, + instance_id=instance_id, + address=public_ip) + + # TODO(vish) check to make sure the availability zone matches self.db.instance_set_state(context, instance_id, @@ -271,6 +283,15 @@ class ComputeManager(manager.SchedulerDependentManager): network_topic, {"method": "disassociate_floating_ip", "args": {"floating_address": address}}) + + if FLAGS.auto_assign_floating_ip: + LOG.debug(_("Deallocating floating ip %s"), + floating_ip['address'], context=context) + rpc.cast(context, + FLAGS.network_topic, + {"method": "deallocate_floating_ip", + "args": {"floating_address": + floating_ip['address']}}) address = fixed_ip['address'] if address: diff --git a/nova/db/api.py b/nova/db/api.py index 63901e94d879..859acc146841 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -290,6 +290,9 @@ def floating_ip_update(context, address, values): """Update a floating ip by address or raise if it doesn't exist.""" return IMPL.floating_ip_update(context, address, values) +def floating_ip_set_auto_assigned(context, address): + """Set auto_assigned flag to floating ip""" + return IMPL.floating_ip_set_auto_assigned(context, address) #################### diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index e675022e90e1..28126a517cf0 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -489,6 +489,7 @@ def floating_ip_deallocate(context, address): address, session=session) floating_ip_ref['project_id'] = None + floating_ip_ref['auto_assigned'] = False floating_ip_ref.save(session=session) @@ -522,6 +523,17 @@ def floating_ip_disassociate(context, address): return fixed_ip_address +@require_context +def floating_ip_set_auto_assigned(context, address): + session = get_session() + with session.begin(): + floating_ip_ref = floating_ip_get_by_address(context, + address, + session=session) + floating_ip_ref.auto_assigned = True + floating_ip_ref.save(session=session) + + @require_admin_context def floating_ip_get_all(context): session = get_session() From 4a2c973fe5c7cf68ff7f45a4927dc6d2e0a3986b Mon Sep 17 00:00:00 2001 From: Ilya Alekseyev Date: Tue, 12 Apr 2011 02:44:44 +0400 Subject: [PATCH 13/66] migaration and pep8 fixes --- nova/api/openstack/contrib/volumes.py | 3 +- nova/compute/manager.py | 4 +- nova/db/api.py | 2 + .../015_add_auto_assign_to_floating_ips.py | 38 +++++++++++++++++++ 4 files changed, 42 insertions(+), 5 deletions(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py diff --git a/nova/api/openstack/contrib/volumes.py b/nova/api/openstack/contrib/volumes.py index 6efacce520f8..18de2ec7192a 100644 --- a/nova/api/openstack/contrib/volumes.py +++ b/nova/api/openstack/contrib/volumes.py @@ -322,8 +322,7 @@ class Volumes(extensions.ExtensionDescriptor): # Does this matter? res = extensions.ResourceExtension('volumes', VolumeController(), - collection_actions={'detail': 'GET'} - ) + collection_actions={'detail': 'GET'}) resources.append(res) res = extensions.ResourceExtension('volume_attachments', diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 86273b6b4530..af35517081b5 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -73,7 +73,7 @@ flags.DEFINE_integer('live_migration_retry_count', 30, flags.DEFINE_integer("rescue_timeout", 0, "Automatically unrescue an instance after N seconds." " Set to 0 to disable.") -flags.DEFINE_bool('auto_assign_floating_ip',False, 'Autoassigning floating' +flags.DEFINE_bool('auto_assign_floating_ip', False, 'Autoassigning floating' ' ip to VM') LOG = logging.getLogger('nova.compute.manager') @@ -235,7 +235,6 @@ class ComputeManager(manager.SchedulerDependentManager): instance_id=instance_id, address=public_ip) - # TODO(vish) check to make sure the availability zone matches self.db.instance_set_state(context, instance_id, @@ -283,7 +282,6 @@ class ComputeManager(manager.SchedulerDependentManager): network_topic, {"method": "disassociate_floating_ip", "args": {"floating_address": address}}) - if FLAGS.auto_assign_floating_ip: LOG.debug(_("Deallocating floating ip %s"), floating_ip['address'], context=context) diff --git a/nova/db/api.py b/nova/db/api.py index 859acc146841..6b465c021b72 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -290,12 +290,14 @@ def floating_ip_update(context, address, values): """Update a floating ip by address or raise if it doesn't exist.""" return IMPL.floating_ip_update(context, address, values) + def floating_ip_set_auto_assigned(context, address): """Set auto_assigned flag to floating ip""" return IMPL.floating_ip_set_auto_assigned(context, address) #################### + def migration_update(context, id, values): """Update a migration instance""" return IMPL.migration_update(context, id, values) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py b/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py new file mode 100644 index 000000000000..b7e480e67539 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py @@ -0,0 +1,38 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# Copyright 2011 Grid Dynamics +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from sqlalchemy.sql import text +from migrate import * + + +meta = MetaData() + +c_auto_assigned = Column('auto_assigned',Boolean, default=False) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + floating_ips = Table('floating_ips', meta, autoload=True, + autoload_with=migrate_engine) + + floating_ips.create_column(c_auto_assigned) + + \ No newline at end of file From fa4aeb9af8d00ecff6620646c142e5ff68e1cd5e Mon Sep 17 00:00:00 2001 From: Ilya Alekseyev Date: Tue, 12 Apr 2011 03:07:22 +0400 Subject: [PATCH 14/66] pep8 fixes --- .../versions/015_add_auto_assign_to_floating_ips.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py b/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py index b7e480e67539..6829b5d6e814 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py @@ -22,7 +22,7 @@ from migrate import * meta = MetaData() -c_auto_assigned = Column('auto_assigned',Boolean, default=False) +c_auto_assigned = Column('auto_assigned', Boolean, default=False) def upgrade(migrate_engine): @@ -33,6 +33,4 @@ def upgrade(migrate_engine): floating_ips = Table('floating_ips', meta, autoload=True, autoload_with=migrate_engine) - floating_ips.create_column(c_auto_assigned) - - \ No newline at end of file + floating_ips.create_column(c_auto_assigned) \ No newline at end of file From e288c8aab3092f8691e190d2a3b9405518dab858 Mon Sep 17 00:00:00 2001 From: termie Date: Tue, 12 Apr 2011 13:08:48 -0500 Subject: [PATCH 15/66] remove extra newline --- nova/compute/api.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index e6146231c4b1..c29523033843 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -114,7 +114,6 @@ class API(base.Base): Verifies that quota and other arguments are valid. """ - if not instance_type: instance_type = instance_types.get_default_instance_type() From 33ca304f4cd7156c6a183293521ba29bb9e2833e Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Wed, 13 Apr 2011 12:46:51 -0400 Subject: [PATCH 16/66] Changed pep8 command line option from --just-pep8 to --pep8. --- run_tests.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/run_tests.sh b/run_tests.sh index 9773071c7efd..4f85dfe6dbd0 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -7,7 +7,7 @@ function usage { echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added." - echo " -p, --just-pep8 Just run pep8" + echo " -p, --pep8 Just run pep8" echo " -h, --help Print this usage message" echo "" echo "Note: with no options specified, the script will try to run the tests in a virtual environment," @@ -22,7 +22,7 @@ function process_option { -V|--virtual-env) let always_venv=1; let never_venv=0;; -N|--no-virtual-env) let always_venv=0; let never_venv=1;; -f|--force) let force=1;; - -p|--just-pep8) let just_pep8=1;; + -p|--pep8) let just_pep8=1;; *) noseargs="$noseargs $1" esac } From 1b460de2f881d3cda0fd58bacedc3886020e4ca7 Mon Sep 17 00:00:00 2001 From: Ilya Alekseyev Date: Thu, 14 Apr 2011 17:12:54 +0400 Subject: [PATCH 17/66] bugfix --- nova/compute/manager.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index af35517081b5..63d374326ecf 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -226,15 +226,6 @@ class ComputeManager(manager.SchedulerDependentManager): self.network_manager.setup_compute_network(context, instance_id) - if FLAGS.auto_assign_floating_ip: - public_ip = rpc.call(context, - FLAGS.network_topic, - {"method": "allocate_floating_ip", - "args": {"project_id": context.project_id}}) - self.network_manager.associate_floating_ip(context, - instance_id=instance_id, - address=public_ip) - # TODO(vish) check to make sure the availability zone matches self.db.instance_set_state(context, instance_id, @@ -255,6 +246,16 @@ class ComputeManager(manager.SchedulerDependentManager): instance_id, power_state.SHUTDOWN) + if not FLAGS.stub_network: + if FLAGS.auto_assign_floating_ip: + public_ip = rpc.call(context, + FLAGS.network_topic, + {"method": "allocate_floating_ip", + "args": {"project_id": context.project_id}}) + self.network_manager.associate_floating_ip(context, + floating_address=public_ip, + fixed_address=address) + self._update_state(context, instance_id) @exception.wrap_exception From 76e643dc0b6b8b6e2ad499034f4d4491380e91ba Mon Sep 17 00:00:00 2001 From: Ilya Alekseyev Date: Thu, 14 Apr 2011 21:23:40 +0400 Subject: [PATCH 18/66] bugfix --- nova/compute/manager.py | 31 ++++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 63d374326ecf..94fee36a5fc2 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -252,9 +252,34 @@ class ComputeManager(manager.SchedulerDependentManager): FLAGS.network_topic, {"method": "allocate_floating_ip", "args": {"project_id": context.project_id}}) - self.network_manager.associate_floating_ip(context, - floating_address=public_ip, - fixed_address=address) + + fixed_ip = self.db.fixed_ip_get_by_address(context, address) + floating_ip = self.db.floating_ip_get_by_address(context, + public_ip) + # Check if the floating ip address is allocated + if floating_ip['project_id'] is None: + raise exception.Error(_("Address (%s) is not allocated") % + floating_ip['address']) + # Check if the floating ip address is allocated + # to the same project + if floating_ip['project_id'] != context.project_id: + LOG.warn(_("Address (%(address)s) is not allocated to your" + " project (%(project)s)"), + {'address': floating_ip['address'], + 'project': context.project_id}) + raise exception.Error(_("Address (%(address)s) is not " + "allocated to your project" + "(%(project)s)") % + {'address': floating_ip['address'], + 'project': context.project_id}) + + host = fixed_ip['network']['host'] + rpc.cast(context, + self.db.queue_get_for(context, + FLAGS.network_topic, host), + {"method": "associate_floating_ip", + "args": {"floating_address": floating_ip['address'], + "fixed_address": fixed_ip['address']}}) self._update_state(context, instance_id) From 9a0d079cfe28d6d8d4e909f68541efda5ad3a3c5 Mon Sep 17 00:00:00 2001 From: Ilya Alekseyev Date: Mon, 18 Apr 2011 21:06:29 +0400 Subject: [PATCH 19/66] not performing floating ip operation with auto allocated ips --- nova/compute/manager.py | 3 ++- nova/db/sqlalchemy/api.py | 2 ++ nova/db/sqlalchemy/models.py | 1 + nova/network/api.py | 6 ++++++ 4 files changed, 11 insertions(+), 1 deletion(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 94fee36a5fc2..829d591703c0 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -308,7 +308,8 @@ class ComputeManager(manager.SchedulerDependentManager): network_topic, {"method": "disassociate_floating_ip", "args": {"floating_address": address}}) - if FLAGS.auto_assign_floating_ip: + if FLAGS.auto_assign_floating_ip \ + and floating_ip.get('auto_assigned'): LOG.debug(_("Deallocating floating ip %s"), floating_ip['address'], context=context) rpc.cast(context, diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 28126a517cf0..6b2caf46ca21 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -461,6 +461,7 @@ def floating_ip_count_by_project(context, project_id): session = get_session() return session.query(models.FloatingIp).\ filter_by(project_id=project_id).\ + filter_by(auto_assigned=False).\ filter_by(deleted=False).\ count() @@ -560,6 +561,7 @@ def floating_ip_get_all_by_project(context, project_id): return session.query(models.FloatingIp).\ options(joinedload_all('fixed_ip.instance')).\ filter_by(project_id=project_id).\ + filter_by(auto_assigned=False).\ filter_by(deleted=False).\ all() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index f79d0f16caa4..36a084a1dd24 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -592,6 +592,7 @@ class FloatingIp(BASE, NovaBase): 'FloatingIp.deleted == False)') project_id = Column(String(255)) host = Column(String(255)) # , ForeignKey('hosts.id')) + auto_assigned = Column(Boolean, default=False, nullable=False) class ConsolePool(BASE, NovaBase): diff --git a/nova/network/api.py b/nova/network/api.py index c56e3062b7f8..c5f76a14e533 100644 --- a/nova/network/api.py +++ b/nova/network/api.py @@ -53,6 +53,8 @@ class API(base.Base): def release_floating_ip(self, context, address): floating_ip = self.db.floating_ip_get_by_address(context, address) + if floating_ip.get('auto_assigned'): + return # NOTE(vish): We don't know which network host should get the ip # when we deallocate, so just send it to any one. This # will probably need to move into a network supervisor @@ -66,6 +68,8 @@ class API(base.Base): if isinstance(fixed_ip, str) or isinstance(fixed_ip, unicode): fixed_ip = self.db.fixed_ip_get_by_address(context, fixed_ip) floating_ip = self.db.floating_ip_get_by_address(context, floating_ip) + if floating_ip.get('auto_assigned'): + return # Check if the floating ip address is allocated if floating_ip['project_id'] is None: raise exception.ApiError(_("Address (%s) is not allocated") % @@ -92,6 +96,8 @@ class API(base.Base): def disassociate_floating_ip(self, context, address): floating_ip = self.db.floating_ip_get_by_address(context, address) + if floating_ip.get('auto_assigned'): + return if not floating_ip.get('fixed_ip'): raise exception.ApiError('Address is not associated.') # NOTE(vish): Get the topic from the host name of the network of From 841d25c1c9ab840ed39261a3bb234b981d9c337a Mon Sep 17 00:00:00 2001 From: Ilya Alekseyev Date: Mon, 18 Apr 2011 22:02:12 +0400 Subject: [PATCH 20/66] pep8 fixed --- .../versions/015_add_auto_assign_to_floating_ips.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py b/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py index 6829b5d6e814..f3767b29ffc9 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py @@ -33,4 +33,4 @@ def upgrade(migrate_engine): floating_ips = Table('floating_ips', meta, autoload=True, autoload_with=migrate_engine) - floating_ips.create_column(c_auto_assigned) \ No newline at end of file + floating_ips.create_column(c_auto_assigned) From 50bd39e0413c2231ebdf9f4c9fb7e58d27624250 Mon Sep 17 00:00:00 2001 From: Ilya Alekseyev Date: Tue, 19 Apr 2011 16:57:17 +0400 Subject: [PATCH 21/66] refractoring --- nova/compute/manager.py | 50 +++++++++-------------------------------- nova/network/api.py | 15 ++++++++----- 2 files changed, 19 insertions(+), 46 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index db97ae690ee5..313b9e0e1074 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -54,6 +54,7 @@ from nova import rpc from nova import utils from nova.compute import power_state from nova.virt import driver +from nova.network import api as network_api FLAGS = flags.FLAGS flags.DEFINE_string('instances_path', '$state_path/instances', @@ -134,6 +135,7 @@ class ComputeManager(manager.SchedulerDependentManager): self.network_manager = utils.import_object(FLAGS.network_manager) self.volume_manager = utils.import_object(FLAGS.volume_manager) + self.network_api = network_api.API() super(ComputeManager, self).__init__(service_name="compute", *args, **kwargs) @@ -248,39 +250,15 @@ class ComputeManager(manager.SchedulerDependentManager): if not FLAGS.stub_network: if FLAGS.auto_assign_floating_ip: - public_ip = rpc.call(context, - FLAGS.network_topic, - {"method": "allocate_floating_ip", - "args": {"project_id": context.project_id}}) + public_ip = self.network_api.allocate_floating_ip(context) fixed_ip = self.db.fixed_ip_get_by_address(context, address) floating_ip = self.db.floating_ip_get_by_address(context, public_ip) - # Check if the floating ip address is allocated - if floating_ip['project_id'] is None: - raise exception.Error(_("Address (%s) is not allocated") % - floating_ip['address']) - # Check if the floating ip address is allocated - # to the same project - if floating_ip['project_id'] != context.project_id: - LOG.warn(_("Address (%(address)s) is not allocated to your" - " project (%(project)s)"), - {'address': floating_ip['address'], - 'project': context.project_id}) - raise exception.Error(_("Address (%(address)s) is not " - "allocated to your project" - "(%(project)s)") % - {'address': floating_ip['address'], - 'project': context.project_id}) - - host = fixed_ip['network']['host'] - rpc.cast(context, - self.db.queue_get_for(context, - FLAGS.network_topic, host), - {"method": "associate_floating_ip", - "args": {"floating_address": floating_ip['address'], - "fixed_address": fixed_ip['address']}}) + self.network_api.associate_floating_ip(context, floating_ip, + fixed_ip, + affect_auto_assigned=True) self._update_state(context, instance_id) @exception.wrap_exception @@ -301,22 +279,14 @@ class ComputeManager(manager.SchedulerDependentManager): # NOTE(vish): Right now we don't really care if the ip is # disassociated. We may need to worry about # checking this later. - network_topic = self.db.queue_get_for(context, - FLAGS.network_topic, - floating_ip['host']) - rpc.cast(context, - network_topic, - {"method": "disassociate_floating_ip", - "args": {"floating_address": address}}) + self.network_api.disassociate_floating_ip(context, address, + affect_auto_assigned=True) if FLAGS.auto_assign_floating_ip \ and floating_ip.get('auto_assigned'): LOG.debug(_("Deallocating floating ip %s"), floating_ip['address'], context=context) - rpc.cast(context, - FLAGS.network_topic, - {"method": "deallocate_floating_ip", - "args": {"floating_address": - floating_ip['address']}}) + self.network_api.release_floating_ip(context, address, + affect_auto_assigned=True) address = fixed_ip['address'] if address: diff --git a/nova/network/api.py b/nova/network/api.py index c5f76a14e533..61db646ae142 100644 --- a/nova/network/api.py +++ b/nova/network/api.py @@ -51,9 +51,10 @@ class API(base.Base): {"method": "allocate_floating_ip", "args": {"project_id": context.project_id}}) - def release_floating_ip(self, context, address): + def release_floating_ip(self, context, address, + affect_auto_assigned = False): floating_ip = self.db.floating_ip_get_by_address(context, address) - if floating_ip.get('auto_assigned'): + if not affect_auto_assigned and floating_ip.get('auto_assigned'): return # NOTE(vish): We don't know which network host should get the ip # when we deallocate, so just send it to any one. This @@ -64,11 +65,12 @@ class API(base.Base): {"method": "deallocate_floating_ip", "args": {"floating_address": floating_ip['address']}}) - def associate_floating_ip(self, context, floating_ip, fixed_ip): + def associate_floating_ip(self, context, floating_ip, fixed_ip, + affect_auto_assigned = False): if isinstance(fixed_ip, str) or isinstance(fixed_ip, unicode): fixed_ip = self.db.fixed_ip_get_by_address(context, fixed_ip) floating_ip = self.db.floating_ip_get_by_address(context, floating_ip) - if floating_ip.get('auto_assigned'): + if not affect_auto_assigned and floating_ip.get('auto_assigned'): return # Check if the floating ip address is allocated if floating_ip['project_id'] is None: @@ -94,9 +96,10 @@ class API(base.Base): "args": {"floating_address": floating_ip['address'], "fixed_address": fixed_ip['address']}}) - def disassociate_floating_ip(self, context, address): + def disassociate_floating_ip(self, context, address, + affect_auto_assigned = False): floating_ip = self.db.floating_ip_get_by_address(context, address) - if floating_ip.get('auto_assigned'): + if not affect_auto_assigned and floating_ip.get('auto_assigned'): return if not floating_ip.get('fixed_ip'): raise exception.ApiError('Address is not associated.') From 9812ae8d3c113475f8ef5d609874317d0b330425 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Tue, 19 Apr 2011 11:05:37 -0400 Subject: [PATCH 22/66] Removed extra calls in exception handling and standardized the way LoopingCalls are done. --- nova/virt/libvirt_conn.py | 190 ++++++++++++++++++++------------------ 1 file changed, 100 insertions(+), 90 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index d212be3c9f48..4e96b4e97daa 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -154,8 +154,8 @@ def _get_net_and_prefixlen(cidr): def _get_ip_version(cidr): - net = IPy.IP(cidr) - return int(net.version()) + net = IPy.IP(cidr) + return int(net.version()) def _get_network_info(instance): @@ -359,28 +359,24 @@ class LibvirtConnection(driver.ComputeDriver): locals()) raise - # We'll save this for when we do shutdown, - # instead of destroy - but destroy returns immediately - timer = utils.LoopingCall(f=None) + def _wait_for_destroy(): + """Called at an interval until the VM is running again.""" + instance_name = insatnce['name'] - while True: try: - state = self.get_info(instance['name'])['state'] - db.instance_set_state(context.get_admin_context(), - instance['id'], state) - if state == power_state.SHUTOFF: - break + state = self.get_info(instance_name)['state'] + except exception.NotFound: + msg = _("During destroy, %s disappeared.") % instance_name + LOG.error(msg) + raise utils.LoopingCallDone - # Let's not hammer on the DB - time.sleep(1) - except Exception as ex: - msg = _("Error encountered when destroying instance '%(id)s': " - "%(ex)s") % {"id": instance["id"], "ex": ex} - LOG.debug(msg) - db.instance_set_state(context.get_admin_context(), - instance['id'], - power_state.SHUTOFF) - break + if state == power_state.SHUTOFF: + msg = _("Instance %s destroyed successfully.") % instance_name + LOG.debug(instance_name) + raise utils.LoopingCallDone + + timer = utils.LoopingCall(_wait_for_destroy) + timer.start(interval=0.5, now=True) self.firewall_driver.unfilter_instance(instance) @@ -522,6 +518,12 @@ class LibvirtConnection(driver.ComputeDriver): @exception.wrap_exception def reboot(self, instance): + """Reboot a virtual machine, given an instance reference. + + This method actually destroys and re-creates the domain to ensure the + reboot happens, as the guest OS cannot ignore this action. + + """ self.destroy(instance, False) xml = self.to_xml(instance) self.firewall_driver.setup_basic_filtering(instance) @@ -529,24 +531,23 @@ class LibvirtConnection(driver.ComputeDriver): self._create_new_domain(xml) self.firewall_driver.apply_instance_filter(instance) - timer = utils.LoopingCall(f=None) - def _wait_for_reboot(): - try: - state = self.get_info(instance['name'])['state'] - db.instance_set_state(context.get_admin_context(), - instance['id'], state) - if state == power_state.RUNNING: - LOG.debug(_('instance %s: rebooted'), instance['name']) - timer.stop() - except Exception, exn: - LOG.exception(_('_wait_for_reboot failed: %s'), exn) - db.instance_set_state(context.get_admin_context(), - instance['id'], - power_state.SHUTDOWN) - timer.stop() + """Called at an interval until the VM is running again.""" + instance_name = insatnce['name'] - timer.f = _wait_for_reboot + try: + state = self.get_info(instance_name)['state'] + except exception.NotFound: + msg = _("During reboot, %s disappeared.") % instance_name + LOG.error(msg) + raise utils.LoopingCallDone + + if state == power_state.RUNNING: + msg = _("Instance %s rebooted successfully.") % instance_name + LOG.debug(instance_name) + raise utils.LoopingCallDone + + timer = utils.LoopingCall(_wait_for_reboot) return timer.start(interval=0.5, now=True) @exception.wrap_exception @@ -566,7 +567,15 @@ class LibvirtConnection(driver.ComputeDriver): raise exception.ApiError("resume not supported for libvirt") @exception.wrap_exception - def rescue(self, instance, callback=None): + def rescue(self, instance): + """Loads a VM using rescue images. + + A rescue is normally performed when something goes wrong with the + primary images and data needs to be corrected/recovered. Rescuing + should not edit or over-ride the original image, only allow for + data recovery. + + """ self.destroy(instance, False) xml = self.to_xml(instance, rescue=True) @@ -576,29 +585,33 @@ class LibvirtConnection(driver.ComputeDriver): self._create_image(instance, xml, '.rescue', rescue_images) self._create_new_domain(xml) - timer = utils.LoopingCall(f=None) - def _wait_for_rescue(): - try: - state = self.get_info(instance['name'])['state'] - db.instance_set_state(None, instance['id'], state) - if state == power_state.RUNNING: - LOG.debug(_('instance %s: rescued'), instance['name']) - timer.stop() - except Exception, exn: - LOG.exception(_('_wait_for_rescue failed: %s'), exn) - db.instance_set_state(None, - instance['id'], - power_state.SHUTDOWN) - timer.stop() + """Called at an interval until the VM is running again.""" + instance_name = instance['name'] - timer.f = _wait_for_rescue + try: + state = self.get_info(instance_name)['state'] + except exception.NotFound: + msg = _("During reboot, %s disappeared.") % instance_name + LOG.error(msg) + raise utils.LoopingCallDone + + if state == power_state.RUNNING: + msg = _("Instance %s rescued successfully.") % instance_name + LOG.debug(instance_name) + raise utils.LoopingCallDone + + timer = utils.LoopingCall(_wait_for_rescue) return timer.start(interval=0.5, now=True) @exception.wrap_exception - def unrescue(self, instance, callback=None): - # NOTE(vish): Because reboot destroys and recreates an instance using - # the normal xml file, we can just call reboot here + def unrescue(self, instance): + """Reboot the VM which is being rescued back into primary images. + + Because reboot destroys and re-creates instances, unresue should + simply call reboot. + + """ self.reboot(instance) @exception.wrap_exception @@ -610,10 +623,6 @@ class LibvirtConnection(driver.ComputeDriver): @exception.wrap_exception def spawn(self, instance, network_info=None): xml = self.to_xml(instance, False, network_info) - db.instance_set_state(context.get_admin_context(), - instance['id'], - power_state.NOSTATE, - 'launching') self.firewall_driver.setup_basic_filtering(instance, network_info) self.firewall_driver.prepare_instance_filter(instance, network_info) self._create_image(instance, xml, network_info) @@ -626,25 +635,23 @@ class LibvirtConnection(driver.ComputeDriver): instance['name']) domain.setAutostart(1) - timer = utils.LoopingCall(f=None) - def _wait_for_boot(): - try: - state = self.get_info(instance['name'])['state'] - db.instance_set_state(context.get_admin_context(), - instance['id'], state) - if state == power_state.RUNNING: - LOG.debug(_('instance %s: booted'), instance['name']) - timer.stop() - except: - LOG.exception(_('instance %s: failed to boot'), - instance['name']) - db.instance_set_state(context.get_admin_context(), - instance['id'], - power_state.SHUTDOWN) - timer.stop() + """Called at an interval until the VM is running.""" + instance_name = insatnce['name'] - timer.f = _wait_for_boot + try: + state = self.get_info(instance_name)['state'] + except exception.NotFound: + msg = _("During reboot, %s disappeared.") % instance_name + LOG.error(msg) + raise utils.LoopingCallDone + + if state == power_state.RUNNING: + msg = _("Instance %s spawned successfully.") % instance_name + LOG.debug(instance_name) + raise utils.LoopingCallDone + + timer = utils.LoopingCall(_wait_for_boot) return timer.start(interval=0.5, now=True) def _flush_xen_console(self, virsh_output): @@ -1045,21 +1052,24 @@ class LibvirtConnection(driver.ComputeDriver): return xml def get_info(self, instance_name): - # NOTE(justinsb): When libvirt isn't running / can't connect, we get: - # libvir: Remote error : unable to connect to - # '/var/run/libvirt/libvirt-sock', libvirtd may need to be started: - # No such file or directory + """Retrieve information from libvirt for a specific instance name. + + If a libvirt error is encountered during lookup, we might raise a + NotFound exception or Error exception depending on how severe the + libvirt error is. + + """ try: virt_dom = self._conn.lookupByName(instance_name) - except libvirt.libvirtError as e: - errcode = e.get_error_code() - if errcode == libvirt.VIR_ERR_NO_DOMAIN: - raise exception.NotFound(_("Instance %s not found") - % instance_name) - LOG.warning(_("Error from libvirt during lookup. " - "Code=%(errcode)s Error=%(e)s") % - locals()) - raise + except libvirt.libvirtError as ex: + error_code = ex.get_error_code() + if error_code == libvirt.VIR_ERR_NO_DOMAIN: + msg = _("Instance %s not found") % instance_name + raise exception.NotFound(msg) + + msg = _("Error from libvirt while looking up %(instance_name)s: " + "[Error Code %(error_code)s] %(ex)s") % locals() + raise exception.Error(msg) (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info() return {'state': state, From 8e98888323d4308640ab5061cdae5ccd4e3ebabf Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Tue, 19 Apr 2011 11:09:07 -0400 Subject: [PATCH 23/66] Pretty critical spelling error. --- nova/virt/libvirt_conn.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 4e96b4e97daa..fad8dd52ac3c 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -361,7 +361,7 @@ class LibvirtConnection(driver.ComputeDriver): def _wait_for_destroy(): """Called at an interval until the VM is running again.""" - instance_name = insatnce['name'] + instance_name = instance['name'] try: state = self.get_info(instance_name)['state'] @@ -533,7 +533,7 @@ class LibvirtConnection(driver.ComputeDriver): def _wait_for_reboot(): """Called at an interval until the VM is running again.""" - instance_name = insatnce['name'] + instance_name = instance['name'] try: state = self.get_info(instance_name)['state'] @@ -637,7 +637,7 @@ class LibvirtConnection(driver.ComputeDriver): def _wait_for_boot(): """Called at an interval until the VM is running.""" - instance_name = insatnce['name'] + instance_name = instance['name'] try: state = self.get_info(instance_name)['state'] From 3e31785d86c59dbda62e3a3ba3a1e23452e52562 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Tue, 19 Apr 2011 11:16:46 -0400 Subject: [PATCH 24/66] Tweak to destroy loop logic. --- nova/virt/libvirt_conn.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index fad8dd52ac3c..53137395ea2d 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -366,11 +366,6 @@ class LibvirtConnection(driver.ComputeDriver): try: state = self.get_info(instance_name)['state'] except exception.NotFound: - msg = _("During destroy, %s disappeared.") % instance_name - LOG.error(msg) - raise utils.LoopingCallDone - - if state == power_state.SHUTOFF: msg = _("Instance %s destroyed successfully.") % instance_name LOG.debug(instance_name) raise utils.LoopingCallDone From ad2d97972d63f50500ec8215c7f8f04d87468060 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Tue, 19 Apr 2011 11:29:26 -0400 Subject: [PATCH 25/66] Fixed info messages. --- nova/virt/libvirt_conn.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 53137395ea2d..13378bbd22d8 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -367,7 +367,7 @@ class LibvirtConnection(driver.ComputeDriver): state = self.get_info(instance_name)['state'] except exception.NotFound: msg = _("Instance %s destroyed successfully.") % instance_name - LOG.debug(instance_name) + LOG.info(msg) raise utils.LoopingCallDone timer = utils.LoopingCall(_wait_for_destroy) @@ -539,7 +539,7 @@ class LibvirtConnection(driver.ComputeDriver): if state == power_state.RUNNING: msg = _("Instance %s rebooted successfully.") % instance_name - LOG.debug(instance_name) + LOG.info(msg) raise utils.LoopingCallDone timer = utils.LoopingCall(_wait_for_reboot) @@ -593,7 +593,7 @@ class LibvirtConnection(driver.ComputeDriver): if state == power_state.RUNNING: msg = _("Instance %s rescued successfully.") % instance_name - LOG.debug(instance_name) + LOG.info(msg) raise utils.LoopingCallDone timer = utils.LoopingCall(_wait_for_rescue) @@ -643,7 +643,7 @@ class LibvirtConnection(driver.ComputeDriver): if state == power_state.RUNNING: msg = _("Instance %s spawned successfully.") % instance_name - LOG.debug(instance_name) + LOG.info(msg) raise utils.LoopingCallDone timer = utils.LoopingCall(_wait_for_boot) From 25e1e2d64ad43638ad4231e6e6edd84d96e14bdb Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Tue, 19 Apr 2011 11:33:51 -0400 Subject: [PATCH 26/66] Merged trunk and fixed small comment. --- nova/virt/libvirt_conn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 13378bbd22d8..2582b97305c1 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -360,7 +360,7 @@ class LibvirtConnection(driver.ComputeDriver): raise def _wait_for_destroy(): - """Called at an interval until the VM is running again.""" + """Called at an interval until the VM is gone.""" instance_name = instance['name'] try: From da99e8e6b143cd2051c23f14d4d46602f16f7ba3 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 19 Apr 2011 09:16:25 -0700 Subject: [PATCH 27/66] add instructions for setting up interfaces --- doc/source/devref/cloudpipe.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/doc/source/devref/cloudpipe.rst b/doc/source/devref/cloudpipe.rst index 95570aa1b34a..a1f6c6450979 100644 --- a/doc/source/devref/cloudpipe.rst +++ b/doc/source/devref/cloudpipe.rst @@ -68,6 +68,12 @@ Making a cloudpipe image is relatively easy. :language: bash :linenos: +# setup network interfaces. + +.. literalinclude:: interfaces + :language: bash + :linenos: + # register the image and set the image id in your flagfile:: --vpn_image_id=ami-xxxxxxxx From 8b21dd6634cc32c43d0bebf3dede40b4b28c0a78 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 19 Apr 2011 09:16:45 -0700 Subject: [PATCH 28/66] add include file for doc interfaces --- doc/source/devref/interfaces | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 doc/source/devref/interfaces diff --git a/doc/source/devref/interfaces b/doc/source/devref/interfaces new file mode 100644 index 000000000000..2aae39558ea9 --- /dev/null +++ b/doc/source/devref/interfaces @@ -0,0 +1,18 @@ +# The loopback network interface +auto lo +iface lo inet loopback + +# The primary network interface +auto br0 +iface br0 inet dhcp + bridge_ports eth0 + bridge_fd 9 ## from the libvirt docs (forward delay time) + bridge_hello 2 ## from the libvirt docs (hello time) + bridge_maxage 12 ## from the libvirt docs (maximum message age) + bridge_stp off ## from the libvirt docs (spanning tree protocol) + +iface eth0 inet manual + up ifconfig $IFACE 0.0.0.0 up + up ip link set $IFACE promisc on + down ip link set $IFACE promisc off + down ifconfig $IFACE down From 745351d1e2a98a98de0a5f955385a92c01110684 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 19 Apr 2011 09:19:52 -0700 Subject: [PATCH 29/66] Fixes cloudpipe to get the proper ip address. * Changes FLAGS.vpn_image_id to integer * Converts to str when comparing because instance['image_id'] is a str * Removes unused method from db * Converts integer_id to ami when launching * Adds docs for setting up interface in cloudpipe image --- nova/api/ec2/admin.py | 2 +- nova/api/ec2/cloud.py | 4 ++-- nova/cloudpipe/pipelib.py | 2 ++ nova/compute/manager.py | 2 +- nova/db/api.py | 5 ----- nova/db/sqlalchemy/api.py | 9 +-------- nova/flags.py | 2 +- nova/virt/libvirt_conn.py | 2 +- 8 files changed, 9 insertions(+), 19 deletions(-) diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py index 6a5609d4aa39..ea94d9c1f821 100644 --- a/nova/api/ec2/admin.py +++ b/nova/api/ec2/admin.py @@ -266,7 +266,7 @@ class AdminController(object): def _vpn_for(self, context, project_id): """Get the VPN instance for a project ID.""" for instance in db.instance_get_all_by_project(context, project_id): - if (instance['image_id'] == FLAGS.vpn_image_id + if (instance['image_id'] == str(FLAGS.vpn_image_id) and not instance['state_description'] in ['shutting_down', 'shutdown']): return instance diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 10b1d0ac5afa..d9c1af072b82 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -703,7 +703,7 @@ class CloudController(object): instances = self.compute_api.get_all(context, **kwargs) for instance in instances: if not context.is_admin: - if instance['image_id'] == FLAGS.vpn_image_id: + if instance['image_id'] == str(FLAGS.vpn_image_id): continue i = {} instance_id = instance['id'] @@ -898,7 +898,7 @@ class CloudController(object): return image_type @staticmethod - def _image_ec2_id(image_id, image_type='ami'): + def image_ec2_id(image_id, image_type='ami'): """Returns image ec2_id using id and three letter type.""" template = image_type + '-%08x' return ec2utils.id_to_ec2_id(int(image_id), template=template) diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py index dc6f55af290c..2c8912422bcc 100644 --- a/nova/cloudpipe/pipelib.py +++ b/nova/cloudpipe/pipelib.py @@ -37,6 +37,7 @@ from nova import utils from nova.auth import manager # TODO(eday): Eventually changes these to something not ec2-specific from nova.api.ec2 import cloud +from nova.api FLAGS = flags.FLAGS @@ -101,6 +102,7 @@ class CloudPipe(object): key_name = self.setup_key_pair(ctxt) group_name = self.setup_security_group(ctxt) + ec2_id = self.controller.image_ec2_id(FLAGS.vpn_image_id) reservation = self.controller.run_instances(ctxt, user_data=self.get_encoded_zip(project_id), max_count=1, diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 39d7af9c19fd..05b168e13bf1 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -209,7 +209,7 @@ class ComputeManager(manager.SchedulerDependentManager): power_state.NOSTATE, 'networking') - is_vpn = instance_ref['image_id'] == FLAGS.vpn_image_id + is_vpn = instance_ref['image_id'] == str(FLAGS.vpn_image_id) # NOTE(vish): This could be a cast because we don't do anything # with the address currently, but I'm leaving it as # a call to ensure that network setup completes. We diff --git a/nova/db/api.py b/nova/db/api.py index 63901e94d879..030d2f434332 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -455,11 +455,6 @@ def instance_get_project_vpn(context, project_id): return IMPL.instance_get_project_vpn(context, project_id) -def instance_is_vpn(context, instance_id): - """True if instance is a vpn.""" - return IMPL.instance_is_vpn(context, instance_id) - - def instance_set_state(context, instance_id, state, description=None): """Set the state of an instance.""" return IMPL.instance_set_state(context, instance_id, state, description) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index e675022e90e1..e9450e392fbb 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -940,7 +940,7 @@ def instance_get_project_vpn(context, project_id): options(joinedload('security_groups')).\ options(joinedload('instance_type')).\ filter_by(project_id=project_id).\ - filter_by(image_id=FLAGS.vpn_image_id).\ + filter_by(image_id=str(FLAGS.vpn_image_id)).\ filter_by(deleted=can_read_deleted(context)).\ first() @@ -979,13 +979,6 @@ def instance_get_floating_address(context, instance_id): return instance_ref.fixed_ip.floating_ips[0]['address'] -@require_admin_context -def instance_is_vpn(context, instance_id): - # TODO(vish): Move this into image code somewhere - instance_ref = instance_get(context, instance_id) - return instance_ref['image_id'] == FLAGS.vpn_image_id - - @require_admin_context def instance_set_state(context, instance_id, state, description=None): # TODO(devcamcar): Move this out of models and into driver diff --git a/nova/flags.py b/nova/flags.py index f011ab383dd0..760bcc37dc10 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -316,7 +316,7 @@ DEFINE_string('null_kernel', 'nokernel', 'kernel image that indicates not to use a kernel,' ' but to use a raw disk image instead') -DEFINE_string('vpn_image_id', 'ami-cloudpipe', 'AMI for cloudpipe vpn server') +DEFINE_integer('vpn_image_id', 0, 'integer id for cloudpipe vpn server') DEFINE_string('vpn_key_suffix', '-vpn', 'Suffix to add to project name for vpn key and secgroups') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 5da091920298..9c8d64446e75 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -1837,7 +1837,7 @@ class NWFilterFirewall(FirewallDriver): """ if not network_info: network_info = _get_network_info(instance) - if instance['image_id'] == FLAGS.vpn_image_id: + if instance['image_id'] == str(FLAGS.vpn_image_id): base_filter = 'nova-vpn' else: base_filter = 'nova-base' From 7554ab7da290565ee457b2d42730a2bff2fd7861 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 19 Apr 2011 09:31:18 -0700 Subject: [PATCH 30/66] remove typo --- nova/cloudpipe/pipelib.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py index 2c8912422bcc..f4cb53da0c60 100644 --- a/nova/cloudpipe/pipelib.py +++ b/nova/cloudpipe/pipelib.py @@ -37,7 +37,6 @@ from nova import utils from nova.auth import manager # TODO(eday): Eventually changes these to something not ec2-specific from nova.api.ec2 import cloud -from nova.api FLAGS = flags.FLAGS From 2d649fa8928e9682064613f2c984f53f492efbec Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 19 Apr 2011 09:32:33 -0700 Subject: [PATCH 31/66] actually use the ec2_id --- nova/cloudpipe/pipelib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py index f4cb53da0c60..7844d31e11b1 100644 --- a/nova/cloudpipe/pipelib.py +++ b/nova/cloudpipe/pipelib.py @@ -107,7 +107,7 @@ class CloudPipe(object): max_count=1, min_count=1, instance_type='m1.tiny', - image_id=FLAGS.vpn_image_id, + image_id=ec2_id, key_name=key_name, security_group=[group_name]) From 3e3f8e1f09d0615e66cc1be0b656d0d8e1d69671 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Tue, 19 Apr 2011 12:36:07 -0400 Subject: [PATCH 32/66] Abstracted lookupByName calls to _lookup_by_name for centralized error handling. --- nova/virt/libvirt_conn.py | 53 +++++++++++++++++++-------------------- 1 file changed, 26 insertions(+), 27 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 2582b97305c1..c1f62c391f37 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -309,19 +309,10 @@ class LibvirtConnection(driver.ComputeDriver): def destroy(self, instance, cleanup=True): instance_name = instance['name'] - # TODO(justinsb): Refactor all lookupByName calls for error-handling try: - virt_dom = self._conn.lookupByName(instance_name) - except libvirt.libvirtError as e: - errcode = e.get_error_code() - if errcode == libvirt.VIR_ERR_NO_DOMAIN: - virt_dom = None - else: - LOG.warning(_("Error from libvirt during lookup of " - "%(instance_name)s. Code=%(errcode)s " - "Error=%(e)s") % - locals()) - raise + virt_dom = self._lookup_by_name(instance_name) + except exception.NotFound: + virt_dom = None # If the instance is already terminated, we're still happy # Otherwise, destroy it @@ -392,7 +383,7 @@ class LibvirtConnection(driver.ComputeDriver): @exception.wrap_exception def attach_volume(self, instance_name, device_path, mountpoint): - virt_dom = self._conn.lookupByName(instance_name) + virt_dom = self._lookup_by_name(instance_name) mount_device = mountpoint.rpartition("/")[2] if device_path.startswith('/dev/'): xml = """ @@ -436,7 +427,7 @@ class LibvirtConnection(driver.ComputeDriver): @exception.wrap_exception def detach_volume(self, instance_name, mountpoint): - virt_dom = self._conn.lookupByName(instance_name) + virt_dom = self._lookup_by_name(instance_name) mount_device = mountpoint.rpartition("/")[2] xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device) if not xml: @@ -453,7 +444,7 @@ class LibvirtConnection(driver.ComputeDriver): """ image_service = utils.import_object(FLAGS.image_service) - virt_dom = self._conn.lookupByName(instance['name']) + virt_dom = self._lookup_by_name(instance['name']) elevated = context.get_admin_context() base = image_service.show(elevated, instance['image_id']) @@ -712,7 +703,7 @@ class LibvirtConnection(driver.ComputeDriver): raise Exception(_('Unable to find an open port')) def get_pty_for_instance(instance_name): - virt_dom = self._conn.lookupByName(instance_name) + virt_dom = self._lookup_by_name(instance_name) xml = virt_dom.XMLDesc(0) dom = minidom.parseString(xml) @@ -737,7 +728,7 @@ class LibvirtConnection(driver.ComputeDriver): @exception.wrap_exception def get_vnc_console(self, instance): def get_vnc_port_for_instance(instance_name): - virt_dom = self._conn.lookupByName(instance_name) + virt_dom = self._lookup_by_name(instance_name) xml = virt_dom.XMLDesc(0) # TODO: use etree instead of minidom dom = minidom.parseString(xml) @@ -1046,16 +1037,15 @@ class LibvirtConnection(driver.ComputeDriver): instance['name']) return xml - def get_info(self, instance_name): - """Retrieve information from libvirt for a specific instance name. + def _lookup_by_name(self, instance_name): + """Retrieve libvirt domain object given an instance name. - If a libvirt error is encountered during lookup, we might raise a - NotFound exception or Error exception depending on how severe the - libvirt error is. + All libvirt error handling should be handled in this method and + relevant nova exceptions should be raised in response. """ try: - virt_dom = self._conn.lookupByName(instance_name) + return self._conn.lookupByName(instance_name) except libvirt.libvirtError as ex: error_code = ex.get_error_code() if error_code == libvirt.VIR_ERR_NO_DOMAIN: @@ -1066,6 +1056,15 @@ class LibvirtConnection(driver.ComputeDriver): "[Error Code %(error_code)s] %(ex)s") % locals() raise exception.Error(msg) + def get_info(self, instance_name): + """Retrieve information from libvirt for a specific instance name. + + If a libvirt error is encountered during lookup, we might raise a + NotFound exception or Error exception depending on how severe the + libvirt error is. + + """ + virt_dom = self._lookup_by_name(instance_name) (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info() return {'state': state, 'max_mem': max_mem, @@ -1102,7 +1101,7 @@ class LibvirtConnection(driver.ComputeDriver): Returns a list of all block devices for this domain. """ - domain = self._conn.lookupByName(instance_name) + domain = self._lookup_by_name(instance_name) # TODO(devcamcar): Replace libxml2 with etree. xml = domain.XMLDesc(0) doc = None @@ -1144,7 +1143,7 @@ class LibvirtConnection(driver.ComputeDriver): Returns a list of all network interfaces for this instance. """ - domain = self._conn.lookupByName(instance_name) + domain = self._lookup_by_name(instance_name) # TODO(devcamcar): Replace libxml2 with etree. xml = domain.XMLDesc(0) doc = None @@ -1359,7 +1358,7 @@ class LibvirtConnection(driver.ComputeDriver): Note that this function takes an instance name, not an Instance, so that it can be called by monitor. """ - domain = self._conn.lookupByName(instance_name) + domain = self._lookup_by_name(instance_name) return domain.blockStats(disk) def interface_stats(self, instance_name, interface): @@ -1367,7 +1366,7 @@ class LibvirtConnection(driver.ComputeDriver): Note that this function takes an instance name, not an Instance, so that it can be called by monitor. """ - domain = self._conn.lookupByName(instance_name) + domain = self._lookup_by_name(instance_name) return domain.interfaceStats(interface) def get_console_pool_info(self, console_type): From c3a45962a322086e4d7339f980bcf61ee8bd3167 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 19 Apr 2011 09:38:01 -0700 Subject: [PATCH 33/66] rename all versions of image_ec2_id --- nova/api/ec2/cloud.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index d9c1af072b82..c48ec9004d4b 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -159,7 +159,7 @@ class CloudController(object): floating_ip = db.instance_get_floating_address(ctxt, instance_ref['id']) ec2_id = ec2utils.id_to_ec2_id(instance_ref['id']) - image_ec2_id = self._image_ec2_id(instance_ref['image_id'], 'ami') + image_ec2_id = self.image_ec2_id(instance_ref['image_id']) data = { 'user-data': base64.b64decode(instance_ref['user_data']), 'meta-data': { @@ -188,8 +188,8 @@ class CloudController(object): for image_type in ['kernel', 'ramdisk']: if '%s_id' % image_type in instance_ref: - ec2_id = self._image_ec2_id(instance_ref['%s_id' % image_type], - self._image_type(image_type)) + ec2_id = self.image_ec2_id(instance_ref['%s_id' % image_type], + self._image_type(image_type)) data['meta-data']['%s-id' % image_type] = ec2_id if False: # TODO(vish): store ancestor ids @@ -709,7 +709,7 @@ class CloudController(object): instance_id = instance['id'] ec2_id = ec2utils.id_to_ec2_id(instance_id) i['instanceId'] = ec2_id - i['imageId'] = self._image_ec2_id(instance['image_id']) + i['imageId'] = self.image_ec2_id(instance['image_id']) i['instanceState'] = { 'code': instance['state'], 'name': instance['state_description']} @@ -917,15 +917,15 @@ class CloudController(object): """Convert from format defined by BaseImageService to S3 format.""" i = {} image_type = self._image_type(image.get('container_format')) - ec2_id = self._image_ec2_id(image.get('id'), image_type) + ec2_id = self.image_ec2_id(image.get('id'), image_type) name = image.get('name') i['imageId'] = ec2_id kernel_id = image['properties'].get('kernel_id') if kernel_id: - i['kernelId'] = self._image_ec2_id(kernel_id, 'aki') + i['kernelId'] = self.image_ec2_id(kernel_id, 'aki') ramdisk_id = image['properties'].get('ramdisk_id') if ramdisk_id: - i['ramdiskId'] = self._image_ec2_id(ramdisk_id, 'ari') + i['ramdiskId'] = self.image_ec2_id(ramdisk_id, 'ari') i['imageOwnerId'] = image['properties'].get('owner_id') if name: i['imageLocation'] = "%s (%s)" % (image['properties']. @@ -976,8 +976,8 @@ class CloudController(object): metadata = {'properties': {'image_location': image_location}} image = self.image_service.create(context, metadata) image_type = self._image_type(image.get('container_format')) - image_id = self._image_ec2_id(image['id'], - image_type) + image_id = self.image_ec2_id(image['id'], + image_type) msg = _("Registered image %(image_location)s with" " id %(image_id)s") % locals() LOG.audit(msg, context=context) From 4442d8f7017868f64eacc6d8ad94620263b9a9c9 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 19 Apr 2011 10:20:56 -0700 Subject: [PATCH 34/66] make geninter.sh use the right tmpl file --- nova/CA/geninter.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/CA/geninter.sh b/nova/CA/geninter.sh index 4b7f5a55c7cc..9b3ea3b767b4 100755 --- a/nova/CA/geninter.sh +++ b/nova/CA/geninter.sh @@ -21,7 +21,7 @@ NAME=$1 SUBJ=$2 mkdir -p projects/$NAME cd projects/$NAME -cp ../../openssl.cnf.tmpl openssl.cnf +cp "$(dirname $0)/openssl.cnf.tmpl" openssl.cnf sed -i -e s/%USERNAME%/$NAME/g openssl.cnf mkdir -p certs crl newcerts private openssl req -new -x509 -extensions v3_ca -keyout private/cakey.pem -out cacert.pem -days 365 -config ./openssl.cnf -batch -nodes From ccf9b2ccb41b1e7f946f2b2c21e6f8fbc9bd04e8 Mon Sep 17 00:00:00 2001 From: Eldar Nugaev Date: Tue, 19 Apr 2011 21:25:53 +0400 Subject: [PATCH 35/66] fix logging in reboot OpenStack API --- nova/api/openstack/servers.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index f221229f0a0b..e9f5702134d0 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -40,7 +40,7 @@ import nova.api.openstack from nova.scheduler import api as scheduler_api -LOG = logging.getLogger('server') +LOG = logging.getLogger('nova.api.openstack.servers') FLAGS = flags.FLAGS @@ -331,6 +331,7 @@ class Controller(common.OpenstackController): return exc.HTTPAccepted() def _action_rebuild(self, input_dict, req, id): + LOG.debug(_("Rebuild server action is not implemented")) return faults.Fault(exc.HTTPNotImplemented()) def _action_resize(self, input_dict, req, id): @@ -346,18 +347,20 @@ class Controller(common.OpenstackController): except Exception, e: LOG.exception(_("Error in resize %s"), e) return faults.Fault(exc.HTTPBadRequest()) - return faults.Fault(exc.HTTPAccepted()) + return exc.HTTPAccepted() def _action_reboot(self, input_dict, req, id): - try: + if 'reboot' in input_dict and 'type' in input_dict['reboot']: reboot_type = input_dict['reboot']['type'] - except Exception: - raise faults.Fault(exc.HTTPNotImplemented()) + else: + LOG.exception(_("Missing argument 'type' for reboot")) + return faults.Fault(exc.HTTPUnprocessableEntity()) try: # TODO(gundlach): pass reboot_type, support soft reboot in # virt driver self.compute_api.reboot(req.environ['nova.context'], id) - except: + except Exception, e: + LOG.exception(_("Error in reboot %s"), e) return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() From 66a15373a14e9acc30808d2cf21bd800c64cc012 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 19 Apr 2011 10:31:35 -0700 Subject: [PATCH 36/66] fix doc typo --- doc/source/devref/cloudpipe.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/devref/cloudpipe.rst b/doc/source/devref/cloudpipe.rst index a1f6c6450979..15d3160b7d52 100644 --- a/doc/source/devref/cloudpipe.rst +++ b/doc/source/devref/cloudpipe.rst @@ -62,13 +62,13 @@ Making a cloudpipe image is relatively easy. :language: bash :linenos: -# download and run the payload on boot from /etc/rc.local. +# download and run the payload on boot from /etc/rc.local .. literalinclude:: rc.local :language: bash :linenos: -# setup network interfaces. +# setup /etc/network/interfaces .. literalinclude:: interfaces :language: bash From a474310be8ed4d7a9840412779567abef71406f1 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Tue, 19 Apr 2011 17:24:01 -0400 Subject: [PATCH 37/66] Create a dictionary of instance_types before executing SQL updates in the instance_type_id migration (014). This should resolve a "cannot commit transaction - SQL statements in progress" error with some versions of sqlite. --- .../versions/014_add_instance_type_id_to_instances.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py index b12a0a8013bb..334d1f255e7a 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py @@ -54,10 +54,12 @@ def upgrade(migrate_engine): instances.create_column(c_instance_type_id) + type_names = {} recs = migrate_engine.execute(instance_types.select()) for row in recs: - type_id = row[0] - type_name = row[1] + type_names[row[0]] = row[1] + + for type_id, type_name in type_names.iteritems(): migrate_engine.execute(instances.update()\ .where(instances.c.instance_type == type_name)\ .values(instance_type_id=type_id)) From d992fbbde7c8e5274d80e2fce9c840e7209c78c6 Mon Sep 17 00:00:00 2001 From: Jimmy Bergman Date: Wed, 20 Apr 2011 14:06:23 +0200 Subject: [PATCH 38/66] Change response format of CreateVolume to match EC2 --- nova/api/ec2/cloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index bd4c9dcd4a90..2bbe4c36837b 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -613,7 +613,7 @@ class CloudController(object): # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. - return {'volumeSet': [self._format_volume(context, dict(volume))]} + return self._format_volume(context, dict(volume)) def delete_volume(self, context, volume_id, **kwargs): volume_id = ec2utils.ec2_id_to_id(volume_id) From 19aaf2523b1f157b5f9cad0d625857e98c19002b Mon Sep 17 00:00:00 2001 From: Jimmy Bergman Date: Wed, 20 Apr 2011 14:12:47 +0200 Subject: [PATCH 39/66] Add to Authors --- Authors | 1 + 1 file changed, 1 insertion(+) diff --git a/Authors b/Authors index ce280749d8ae..c440d3c11bb6 100644 --- a/Authors +++ b/Authors @@ -30,6 +30,7 @@ Ilya Alekseyev Jason Koelker Jay Pipes Jesse Andrews +Jimmy Bergman Joe Heck Joel Moore Johannes Erdfelt From 45178fd6da58ff37617e35b5cddaf416ae5cee65 Mon Sep 17 00:00:00 2001 From: Ilya Alekseyev Date: Wed, 20 Apr 2011 17:44:25 +0400 Subject: [PATCH 40/66] fix: mark floating ip as auto assigned --- nova/compute/manager.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 313b9e0e1074..c1dc06557259 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -252,6 +252,7 @@ class ComputeManager(manager.SchedulerDependentManager): if FLAGS.auto_assign_floating_ip: public_ip = self.network_api.allocate_floating_ip(context) + self.db.floating_ip_set_auto_assigned(context, public_ip) fixed_ip = self.db.fixed_ip_get_by_address(context, address) floating_ip = self.db.floating_ip_get_by_address(context, public_ip) From 803d246c35256e0578837226b1a91003e451ab6f Mon Sep 17 00:00:00 2001 From: Ilya Alekseyev Date: Wed, 20 Apr 2011 18:35:07 +0400 Subject: [PATCH 41/66] instance type get approach changed. tests fixed --- nova/tests/test_virt.py | 4 ++-- nova/virt/libvirt_conn.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 5c8705a1c3f1..0a0c7a958554 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -619,7 +619,7 @@ class IptablesFirewallTestCase(test.TestCase): {'user_id': 'fake', 'project_id': 'fake', 'mac_address': '56:12:12:12:12:12', - 'instance_type': 'm1.small'}) + 'instance_type_id': 1}) ip = '10.11.12.13' network_ref = db.project_get_network(self.context, @@ -843,7 +843,7 @@ class NWFilterTestCase(test.TestCase): {'user_id': 'fake', 'project_id': 'fake', 'mac_address': '00:A0:C9:14:C8:29', - 'instance_type': 'm1.small'}) + 'instance_type_id': 1}) inst_id = instance_ref['id'] ip = '10.11.12.13' diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index c12b6e91e2a0..d5a88ebedcba 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -167,8 +167,8 @@ def _get_network_info(instance): instance['id']) networks = db.network_get_all_by_instance(admin_context, instance['id']) - flavor = db.instance_type_get_by_name(admin_context, - instance['instance_type']) + flavor = db.instance_type_get_by_id(admin_context, + instance['instance_type_id']) network_info = [] for network in networks: From 48936f6b8f063cf71fa42b4586d8ba524ed39cc4 Mon Sep 17 00:00:00 2001 From: Eldar Nugaev Date: Wed, 20 Apr 2011 20:37:51 +0400 Subject: [PATCH 42/66] fix Request.get_content_type --- nova/api/openstack/servers.py | 67 ++++++++------------- nova/tests/api/openstack/test_extensions.py | 1 + nova/tests/api/openstack/test_servers.py | 4 ++ nova/tests/api/test_wsgi.py | 7 +++ nova/wsgi.py | 20 ++++-- 5 files changed, 52 insertions(+), 47 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 22a9c632cbd8..e0681e597eae 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -294,61 +294,47 @@ class Controller(common.OpenstackController): 'revertResize': self._action_revert_resize, 'rebuild': self._action_rebuild, } - input_dict = self._deserialize(req.body, req.get_content_type()) for key in actions.keys(): if key in input_dict: - return actions[key](input_dict, req, id) + try: + context = req.environ['nova.context'] + return actions[key](context, input_dict, id) + except Exception, e: + LOG.exception(_("Error in action %(key)s: %(e)s") % + locals()) + return faults.Fault(exc.HTTPBadRequest()) return faults.Fault(exc.HTTPNotImplemented()) - def _action_change_password(self, input_dict, req, id): + def _action_change_password(self, context, input_dict, id): return exc.HTTPNotImplemented() - def _action_confirm_resize(self, input_dict, req, id): - try: - self.compute_api.confirm_resize(req.environ['nova.context'], id) - except Exception, e: - LOG.exception(_("Error in confirm-resize %s"), e) - return faults.Fault(exc.HTTPBadRequest()) + def _action_confirm_resize(self, context, input_dict, id): + self.compute_api.confirm_resize(context, id) return exc.HTTPNoContent() - def _action_revert_resize(self, input_dict, req, id): - try: - self.compute_api.revert_resize(req.environ['nova.context'], id) - except Exception, e: - LOG.exception(_("Error in revert-resize %s"), e) - return faults.Fault(exc.HTTPBadRequest()) + def _action_revert_resize(self, context, input_dict, id): + self.compute_api.revert_resize(context, id) return exc.HTTPAccepted() - def _action_rebuild(self, input_dict, req, id): + def _action_rebuild(self, context, input_dict, id): return faults.Fault(exc.HTTPNotImplemented()) - def _action_resize(self, input_dict, req, id): + def _action_resize(self, context, input_dict, id): """ Resizes a given instance to the flavor size requested """ - try: - if 'resize' in input_dict and 'flavorId' in input_dict['resize']: - flavor_id = input_dict['resize']['flavorId'] - self.compute_api.resize(req.environ['nova.context'], id, - flavor_id) - else: - LOG.exception(_("Missing arguments for resize")) - return faults.Fault(exc.HTTPUnprocessableEntity()) - except Exception, e: - LOG.exception(_("Error in resize %s"), e) - return faults.Fault(exc.HTTPBadRequest()) + if 'resize' in input_dict and 'flavorId' in input_dict['resize']: + flavor_id = input_dict['resize']['flavorId'] + self.compute_api.resize(context, id, flavor_id) + else: + LOG.exception(_("Missing arguments for resize")) + return faults.Fault(exc.HTTPUnprocessableEntity()) return faults.Fault(exc.HTTPAccepted()) - def _action_reboot(self, input_dict, req, id): - try: - reboot_type = input_dict['reboot']['type'] - except Exception: - raise faults.Fault(exc.HTTPNotImplemented()) - try: - # TODO(gundlach): pass reboot_type, support soft reboot in - # virt driver - self.compute_api.reboot(req.environ['nova.context'], id) - except: - return faults.Fault(exc.HTTPUnprocessableEntity()) + def _action_reboot(self, context, input_dict, id): + reboot_type = input_dict['reboot']['type'] + # TODO(gundlach): pass reboot_type, support soft reboot in + # virt driver + self.compute_api.reboot(context, id) return exc.HTTPAccepted() @scheduler_api.redirect_handler @@ -632,8 +618,7 @@ class ControllerV11(Controller): def _get_addresses_view_builder(self, req): return nova.api.openstack.views.addresses.ViewBuilderV11(req) - def _action_change_password(self, input_dict, req, id): - context = req.environ['nova.context'] + def _action_change_password(self, context, input_dict, id): if (not 'changePassword' in input_dict or not 'adminPass' in input_dict['changePassword']): msg = _("No adminPass was specified") diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index 481d34ed140f..6453e96c98e1 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -158,6 +158,7 @@ class ActionExtensionTest(unittest.TestCase): request.method = 'POST' request.content_type = 'application/json' request.body = json.dumps(body) + request.environ = {'nova.context': 'context'} response = request.get_response(ext_midware) return response diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 556046e9de32..a92da52b12fe 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -952,6 +952,7 @@ class ServersTest(test.TestCase): req.method = 'POST' req.content_type = 'application/json' req.body = json.dumps(body) + req.environ = {"nova.context": "context"} res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 501) @@ -973,6 +974,7 @@ class ServersTest(test.TestCase): req.method = 'POST' req.content_type = 'application/json' req.body = json.dumps(body) + req.environ = {"nova.context": "context"} res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 202) self.assertEqual(mock_method.instance_id, '1') @@ -993,6 +995,7 @@ class ServersTest(test.TestCase): req.method = 'POST' req.content_type = 'application/json' req.body = json.dumps(body) + req.environ = {"nova.context": "context"} res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 400) @@ -1002,6 +1005,7 @@ class ServersTest(test.TestCase): req.method = 'POST' req.content_type = 'application/json' req.body = json.dumps(body) + req.environ = {"nova.context": "context"} res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 400) diff --git a/nova/tests/api/test_wsgi.py b/nova/tests/api/test_wsgi.py index 1ecdd1cfb27f..ed96aac5e254 100644 --- a/nova/tests/api/test_wsgi.py +++ b/nova/tests/api/test_wsgi.py @@ -136,6 +136,13 @@ class RequestTest(test.TestCase): request.body = "asdf
" self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type) + def test_request_content_type_with_charset(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Content-Type"] = "application/json; charset=UTF-8" + request.body = "" + result = request.get_content_type() + self.assertEqual(result, "application/json") + def test_content_type_from_accept_xml(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/xml" diff --git a/nova/wsgi.py b/nova/wsgi.py index de2e0749fbdf..617830c22e23 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -28,6 +28,7 @@ from xml.dom import minidom import eventlet import eventlet.wsgi eventlet.patcher.monkey_patch(all=False, socket=True, time=True) +import re import routes import routes.middleware import webob @@ -105,12 +106,19 @@ class Request(webob.Request): return bm or "application/json" def get_content_type(self): - try: - ct = self.headers["Content-Type"] - assert ct in ("application/xml", "application/json") - return ct - except Exception: - raise webob.exc.HTTPBadRequest("Invalid content type") + allowed_types = ("application/xml", "application/json") + if not "Content-Type" in self.headers: + msg = _("Missing Content-Type") + LOG.debug(msg) + raise webob.exc.HTTPBadRequest(msg) + content_type = self.headers["Content-Type"] + match = re.search("([\w/]+)", content_type) + if match: + type = match.group(0) + if type in allowed_types: + return type + LOG.debug(_("Wrong Content-Type: %s") % content_type) + raise webob.exc.HTTPBadRequest("Invalid content type") class Application(object): From 155635faf20d4a1996639baf5d2c10b05734c3df Mon Sep 17 00:00:00 2001 From: Eldar Nugaev Date: Wed, 20 Apr 2011 21:50:03 +0400 Subject: [PATCH 43/66] replaced regex to webob.Request.content_type --- nova/wsgi.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/nova/wsgi.py b/nova/wsgi.py index 617830c22e23..9e0f80565dbb 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -28,7 +28,6 @@ from xml.dom import minidom import eventlet import eventlet.wsgi eventlet.patcher.monkey_patch(all=False, socket=True, time=True) -import re import routes import routes.middleware import webob @@ -111,13 +110,10 @@ class Request(webob.Request): msg = _("Missing Content-Type") LOG.debug(msg) raise webob.exc.HTTPBadRequest(msg) - content_type = self.headers["Content-Type"] - match = re.search("([\w/]+)", content_type) - if match: - type = match.group(0) - if type in allowed_types: - return type - LOG.debug(_("Wrong Content-Type: %s") % content_type) + type = self.content_type + if type in allowed_types: + return type + LOG.debug(_("Wrong Content-Type: %s") % type) raise webob.exc.HTTPBadRequest("Invalid content type") From 584cde68aa36c35c03c29eb4bb09ede5f8c4074e Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Wed, 20 Apr 2011 12:50:23 -0500 Subject: [PATCH 44/66] Pylinted nova-manage --- bin/nova-manage | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index b2308bc03fa1..2c06767f177b 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -58,7 +58,6 @@ import gettext import glob import json import os -import re import sys import time @@ -66,11 +65,11 @@ import IPy # If ../nova/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... -possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), +POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) -if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): - sys.path.insert(0, possible_topdir) +if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')): + sys.path.insert(0, POSSIBLE_TOPDIR) gettext.install('nova', unicode=1) @@ -809,11 +808,11 @@ class VolumeCommands(object): class InstanceTypeCommands(object): """Class for managing instance types / flavors.""" - def _print_instance_types(self, n, val): + def _print_instance_types(self, name, val): deleted = ('', ', inactive')[val["deleted"] == 1] print ("%s: Memory: %sMB, VCPUS: %s, Storage: %sGB, FlavorID: %s, " "Swap: %sGB, RXTX Quota: %sGB, RXTX Cap: %sMB%s") % ( - n, val["memory_mb"], val["vcpus"], val["local_gb"], + name, val["memory_mb"], val["vcpus"], val["local_gb"], val["flavorid"], val["swap"], val["rxtx_quota"], val["rxtx_cap"], deleted) @@ -1021,7 +1020,7 @@ class ImageCommands(object): machine_images[image_path] = image_metadata else: other_images[image_path] = image_metadata - except Exception as exc: + except Exception: print _("Failed to load %(fn)s.") % locals() # NOTE(vish): do kernels and ramdisks first so images self._convert_images(other_images) From 8b2ac745211a567b7c05e31343ada3ef4be85eb4 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Wed, 20 Apr 2011 12:56:44 -0500 Subject: [PATCH 45/66] Pylinted nova-compute. --- bin/nova-compute | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bin/nova-compute b/bin/nova-compute index 95fa393b1956..cd7c78def0ca 100755 --- a/bin/nova-compute +++ b/bin/nova-compute @@ -28,11 +28,11 @@ import sys # If ../nova/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... -possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), +POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) -if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): - sys.path.insert(0, possible_topdir) +if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')): + sys.path.insert(0, POSSIBLE_TOPDIR) gettext.install('nova', unicode=1) From fe23f71687e09248feb7542ea97001a496697742 Mon Sep 17 00:00:00 2001 From: Eldar Nugaev Date: Wed, 20 Apr 2011 22:01:14 +0400 Subject: [PATCH 46/66] remove ambiguity in test --- nova/tests/api/test_wsgi.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/tests/api/test_wsgi.py b/nova/tests/api/test_wsgi.py index ed96aac5e254..5820ecdc253f 100644 --- a/nova/tests/api/test_wsgi.py +++ b/nova/tests/api/test_wsgi.py @@ -139,7 +139,6 @@ class RequestTest(test.TestCase): def test_request_content_type_with_charset(self): request = wsgi.Request.blank('/tests/123') request.headers["Content-Type"] = "application/json; charset=UTF-8" - request.body = "" result = request.get_content_type() self.assertEqual(result, "application/json") From f4cfb9f0e654f26664345a041a62fd93613ef83b Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 20 Apr 2011 11:52:17 -0700 Subject: [PATCH 47/66] docstring cleanup compute manager --- nova/compute/manager.py | 232 ++++++++++++++++++---------------------- 1 file changed, 103 insertions(+), 129 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index c795d72ad0ea..8103ed61e28b 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -17,8 +17,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Handles all processes relating to instances (guest vms). +"""Handles all processes relating to instances (guest vms). The :py:class:`ComputeManager` class is a :py:class:`nova.manager.Manager` that handles RPC calls relating to creating instances. It is responsible for @@ -33,6 +32,7 @@ terminating it. by :func:`nova.utils.import_object` :volume_manager: Name of class that handles persistent storage, loaded by :func:`nova.utils.import_object` + """ import datetime @@ -55,6 +55,7 @@ from nova import utils from nova.compute import power_state from nova.virt import driver + FLAGS = flags.FLAGS flags.DEFINE_string('instances_path', '$state_path/instances', 'where instances are stored on disk') @@ -74,19 +75,14 @@ flags.DEFINE_integer("rescue_timeout", 0, "Automatically unrescue an instance after N seconds." " Set to 0 to disable.") + LOG = logging.getLogger('nova.compute.manager') def checks_instance_lock(function): - """ - decorator used for preventing action against locked instances - unless, of course, you happen to be admin - - """ - + """Decorator to prevent action against locked instances for non-admins.""" @functools.wraps(function) def decorated_function(self, context, instance_id, *args, **kwargs): - LOG.info(_("check_instance_lock: decorating: |%s|"), function, context=context) LOG.info(_("check_instance_lock: arguments: |%(self)s| |%(context)s|" @@ -112,7 +108,6 @@ def checks_instance_lock(function): class ComputeManager(manager.SchedulerDependentManager): - """Manages the running instances from creation to destruction.""" def __init__(self, compute_driver=None, *args, **kwargs): @@ -136,9 +131,7 @@ class ComputeManager(manager.SchedulerDependentManager): *args, **kwargs) def init_host(self): - """Do any initialization that needs to be run if this is a - standalone service. - """ + """Initialization for a standalone compute service.""" self.driver.init_host(host=self.host) def _update_state(self, context, instance_id): @@ -153,16 +146,18 @@ class ComputeManager(manager.SchedulerDependentManager): self.db.instance_set_state(context, instance_id, state) def get_console_topic(self, context, **kwargs): - """Retrieves the console host for a project on this host - Currently this is just set in the flags for each compute - host.""" + """Retrieves the console host for a project on this host. + + Currently this is just set in the flags for each compute host. + + """ #TODO(mdragon): perhaps make this variable by console_type? return self.db.queue_get_for(context, FLAGS.console_topic, FLAGS.console_host) def get_network_topic(self, context, **kwargs): - """Retrieves the network host for a project on this host""" + """Retrieves the network host for a project on this host.""" # TODO(vish): This method should be memoized. This will make # the call to get_network_host cheaper, so that # it can pas messages instead of checking the db @@ -179,15 +174,23 @@ class ComputeManager(manager.SchedulerDependentManager): return self.driver.get_console_pool_info(console_type) @exception.wrap_exception - def refresh_security_group_rules(self, context, - security_group_id, **kwargs): - """This call passes straight through to the virtualization driver.""" + def refresh_security_group_rules(self, context, security_group_id, + **kwargs): + """Tell the virtualization driver to refresh security group rules. + + Passes straight through to the virtualization driver. + + """ return self.driver.refresh_security_group_rules(security_group_id) @exception.wrap_exception def refresh_security_group_members(self, context, security_group_id, **kwargs): - """This call passes straight through to the virtualization driver.""" + """Tell the virtualization driver to refresh security group members. + + Passes straight through to the virtualization driver. + + """ return self.driver.refresh_security_group_members(security_group_id) @exception.wrap_exception @@ -249,7 +252,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock def terminate_instance(self, context, instance_id): - """Terminate an instance on this machine.""" + """Terminate an instance on this host.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) LOG.audit(_("Terminating instance %s"), instance_id, context=context) @@ -297,7 +300,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock def reboot_instance(self, context, instance_id): - """Reboot an instance on this server.""" + """Reboot an instance on this host.""" context = context.elevated() self._update_state(context, instance_id) instance_ref = self.db.instance_get(context, instance_id) @@ -321,7 +324,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception def snapshot_instance(self, context, instance_id, image_id): - """Snapshot an instance on this server.""" + """Snapshot an instance on this host.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) @@ -344,7 +347,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock def set_admin_password(self, context, instance_id, new_pass=None): - """Set the root/admin password for an instance on this server.""" + """Set the root/admin password for an instance on this host.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) instance_id = instance_ref['id'] @@ -365,7 +368,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock def inject_file(self, context, instance_id, path, file_contents): - """Write a file to the specified path on an instance on this server""" + """Write a file to the specified path in an instance on this host.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) instance_id = instance_ref['id'] @@ -383,44 +386,34 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock def rescue_instance(self, context, instance_id): - """Rescue an instance on this server.""" + """Rescue an instance on this host.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) LOG.audit(_('instance %s: rescuing'), instance_id, context=context) - self.db.instance_set_state( - context, - instance_id, - power_state.NOSTATE, - 'rescuing') + self.db.instance_set_state(context, + instance_id, + power_state.NOSTATE, + 'rescuing') self.network_manager.setup_compute_network(context, instance_id) - self.driver.rescue( - instance_ref, - lambda result: self._update_state_callback( - self, - context, - instance_id, - result)) + _update_state = lambda result: self._update_state_callback( + self, context, instance_id, result)) + self.driver.rescue(instance_ref, _update_state) self._update_state(context, instance_id) @exception.wrap_exception @checks_instance_lock def unrescue_instance(self, context, instance_id): - """Rescue an instance on this server.""" + """Rescue an instance on this host.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) LOG.audit(_('instance %s: unrescuing'), instance_id, context=context) - self.db.instance_set_state( - context, - instance_id, - power_state.NOSTATE, - 'unrescuing') - self.driver.unrescue( - instance_ref, - lambda result: self._update_state_callback( - self, - context, - instance_id, - result)) + self.db.instance_set_state(context, + instance_id, + power_state.NOSTATE, + 'unrescuing') + _update_state = lambda result: self._update_state_callback( + self, context, instance_id, result)) + self.driver.unrescue(instance_ref, _update_state) self._update_state(context, instance_id) @staticmethod @@ -431,7 +424,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock def confirm_resize(self, context, instance_id, migration_id): - """Destroys the source instance""" + """Destroys the source instance.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) self.driver.destroy(instance_ref) @@ -439,9 +432,12 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock def revert_resize(self, context, instance_id, migration_id): - """Destroys the new instance on the destination machine, - reverts the model changes, and powers on the old - instance on the source machine""" + """Destroys the new instance on the destination machine. + + Reverts the model changes, and powers on the old instance on the + source machine. + + """ instance_ref = self.db.instance_get(context, instance_id) migration_ref = self.db.migration_get(context, migration_id) @@ -458,9 +454,12 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock def finish_revert_resize(self, context, instance_id, migration_id): - """Finishes the second half of reverting a resize, powering back on - the source instance and reverting the resized attributes in the - database""" + """Finishes the second half of reverting a resize. + + Power back on the source instance and revert the resized attributes + in the database. + + """ instance_ref = self.db.instance_get(context, instance_id) migration_ref = self.db.migration_get(context, migration_id) instance_type = self.db.instance_type_get_by_flavor_id(context, @@ -480,8 +479,11 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock def prep_resize(self, context, instance_id, flavor_id): - """Initiates the process of moving a running instance to another - host, possibly changing the RAM and disk size in the process""" + """Initiates the process of moving a running instance to another host. + + Possibly changes the RAM and disk size in the process. + + """ context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) if instance_ref['host'] == FLAGS.host: @@ -513,35 +515,38 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock def resize_instance(self, context, instance_id, migration_id): - """Starts the migration of a running instance to another host""" + """Starts the migration of a running instance to another host.""" migration_ref = self.db.migration_get(context, migration_id) instance_ref = self.db.instance_get(context, instance_id) - self.db.migration_update(context, migration_id, - {'status': 'migrating', }) + self.db.migration_update(context, + migration_id, + {'status': 'migrating'}) - disk_info = self.driver.migrate_disk_and_power_off(instance_ref, - migration_ref['dest_host']) - self.db.migration_update(context, migration_id, - {'status': 'post-migrating', }) + disk_info = self.driver.migrate_disk_and_power_off( + instance_ref, migration_ref['dest_host']) + self.db.migration_update(context, + migration_id, + {'status': 'post-migrating'}) - # Make sure the service exists before sending a message. - _service = self.db.service_get_by_host_and_topic(context, - migration_ref['dest_compute'], FLAGS.compute_topic) - topic = self.db.queue_get_for(context, FLAGS.compute_topic, - migration_ref['dest_compute']) - rpc.cast(context, topic, - {'method': 'finish_resize', - 'args': { - 'migration_id': migration_id, - 'instance_id': instance_id, - 'disk_info': disk_info, }, - }) + service = self.db.service_get_by_host_and_topic( + context, migration_ref['dest_compute'], FLAGS.compute_topic) + topic = self.db.queue_get_for(context, + FLAGS.compute_topic, + migration_ref['dest_compute']) + rpc.cast(context, topic, {'method': 'finish_resize', + 'args': {'migration_id': migration_id, + 'instance_id': instance_id, + 'disk_info': disk_info}}) @exception.wrap_exception @checks_instance_lock def finish_resize(self, context, instance_id, migration_id, disk_info): - """Completes the migration process by setting up the newly transferred - disk and turning on the instance on its new host machine""" + """Completes the migration process. + + Sets up the newly transferred disk and turns on the instance at its + new host machine. + + """ migration_ref = self.db.migration_get(context, migration_id) instance_ref = self.db.instance_get(context, migration_ref['instance_id']) @@ -566,7 +571,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock def pause_instance(self, context, instance_id): - """Pause an instance on this server.""" + """Pause an instance on this host.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) LOG.audit(_('instance %s: pausing'), instance_id, context=context) @@ -583,7 +588,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock def unpause_instance(self, context, instance_id): - """Unpause a paused instance on this server.""" + """Unpause a paused instance on this host.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) LOG.audit(_('instance %s: unpausing'), instance_id, context=context) @@ -599,7 +604,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception def get_diagnostics(self, context, instance_id): - """Retrieve diagnostics for an instance on this server.""" + """Retrieve diagnostics for an instance on this host.""" instance_ref = self.db.instance_get(context, instance_id) if instance_ref["state"] == power_state.RUNNING: @@ -610,10 +615,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock def suspend_instance(self, context, instance_id): - """ - suspend the instance with instance_id - - """ + """Suspend the given instance.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) LOG.audit(_('instance %s: suspending'), instance_id, context=context) @@ -629,10 +631,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock def resume_instance(self, context, instance_id): - """ - resume the suspended instance with instance_id - - """ + """Resume the given suspended instance.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) LOG.audit(_('instance %s: resuming'), instance_id, context=context) @@ -647,10 +646,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception def lock_instance(self, context, instance_id): - """ - lock the instance with instance_id - - """ + """Lock the given instance.""" context = context.elevated() LOG.debug(_('instance %s: locking'), instance_id, context=context) @@ -658,10 +654,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception def unlock_instance(self, context, instance_id): - """ - unlock the instance with instance_id - - """ + """Unlock the given instance.""" context = context.elevated() LOG.debug(_('instance %s: unlocking'), instance_id, context=context) @@ -669,10 +662,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception def get_lock(self, context, instance_id): - """ - return the boolean state of (instance with instance_id)'s lock - - """ + """Return the boolean state of the given instance's lock.""" context = context.elevated() LOG.debug(_('instance %s: getting locked state'), instance_id, context=context) @@ -681,10 +671,7 @@ class ComputeManager(manager.SchedulerDependentManager): @checks_instance_lock def reset_network(self, context, instance_id): - """ - Reset networking on the instance. - - """ + """Reset networking on the given instance.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) LOG.debug(_('instance %s: reset network'), instance_id, @@ -693,10 +680,7 @@ class ComputeManager(manager.SchedulerDependentManager): @checks_instance_lock def inject_network_info(self, context, instance_id): - """ - Inject network info for the instance. - - """ + """Inject network info for the given instance.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) LOG.debug(_('instance %s: inject network info'), instance_id, @@ -705,7 +689,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception def get_console_output(self, context, instance_id): - """Send the console output for an instance.""" + """Send the console output for the given instance.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) LOG.audit(_("Get console output for instance %s"), instance_id, @@ -714,20 +698,18 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception def get_ajax_console(self, context, instance_id): - """Return connection information for an ajax console""" + """Return connection information for an ajax console.""" context = context.elevated() LOG.debug(_("instance %s: getting ajax console"), instance_id) instance_ref = self.db.instance_get(context, instance_id) - return self.driver.get_ajax_console(instance_ref) @exception.wrap_exception def get_vnc_console(self, context, instance_id): - """Return connection information for an vnc console.""" + """Return connection information for a vnc console.""" context = context.elevated() LOG.debug(_("instance %s: getting vnc console"), instance_id) instance_ref = self.db.instance_get(context, instance_id) - return self.driver.get_vnc_console(instance_ref) @checks_instance_lock @@ -781,7 +763,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception def compare_cpu(self, context, cpu_info): - """Checks the host cpu is compatible to a cpu given by xml. + """Checks that the host cpu is compatible with a cpu given by xml. :param context: security context :param cpu_info: json string obtained from virConnect.getCapabilities @@ -802,7 +784,6 @@ class ComputeManager(manager.SchedulerDependentManager): :returns: tmpfile name(basename) """ - dirpath = FLAGS.instances_path fd, tmp_file = tempfile.mkstemp(dir=dirpath) LOG.debug(_("Creating tmpfile %s to notify to other " @@ -819,7 +800,6 @@ class ComputeManager(manager.SchedulerDependentManager): :param filename: confirm existence of FLAGS.instances_path/thisfile """ - tmp_file = os.path.join(FLAGS.instances_path, filename) if not os.path.exists(tmp_file): raise exception.NotFound(_('%s not found') % tmp_file) @@ -832,7 +812,6 @@ class ComputeManager(manager.SchedulerDependentManager): :param filename: remove existence of FLAGS.instances_path/thisfile """ - tmp_file = os.path.join(FLAGS.instances_path, filename) os.remove(tmp_file) @@ -844,7 +823,6 @@ class ComputeManager(manager.SchedulerDependentManager): :returns: See driver.update_available_resource() """ - return self.driver.update_available_resource(context, self.host) def pre_live_migration(self, context, instance_id, time=None): @@ -854,7 +832,6 @@ class ComputeManager(manager.SchedulerDependentManager): :param instance_id: nova.db.sqlalchemy.models.Instance.Id """ - if not time: time = greenthread @@ -913,7 +890,6 @@ class ComputeManager(manager.SchedulerDependentManager): :param dest: destination host """ - # Get instance for error handling. instance_ref = self.db.instance_get(context, instance_id) i_name = instance_ref.name @@ -1009,12 +985,10 @@ class ComputeManager(manager.SchedulerDependentManager): :param ctxt: security context :param instance_id: nova.db.sqlalchemy.models.Instance.Id - :param host: - DB column value is updated by this hostname. - if none, the host instance currently running is selected. + :param host: DB column value is updated by this hostname. + If none, the host instance currently running is selected. """ - if not host: host = instance_ref['host'] From b26f3166554cf5de29fcedee7463bc523786cf72 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 20 Apr 2011 11:52:19 -0700 Subject: [PATCH 48/66] fix typo --- nova/compute/manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 8103ed61e28b..fac00e45e0a6 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -396,7 +396,7 @@ class ComputeManager(manager.SchedulerDependentManager): 'rescuing') self.network_manager.setup_compute_network(context, instance_id) _update_state = lambda result: self._update_state_callback( - self, context, instance_id, result)) + self, context, instance_id, result) self.driver.rescue(instance_ref, _update_state) self._update_state(context, instance_id) @@ -412,7 +412,7 @@ class ComputeManager(manager.SchedulerDependentManager): power_state.NOSTATE, 'unrescuing') _update_state = lambda result: self._update_state_callback( - self, context, instance_id, result)) + self, context, instance_id, result) self.driver.unrescue(instance_ref, _update_state) self._update_state(context, instance_id) From 98b8a800b16a1e6699b1d4c7ce0e4ab61319be6e Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 20 Apr 2011 12:00:21 -0700 Subject: [PATCH 49/66] docstring cleanup, nova/db dir --- nova/db/api.py | 61 +++++++++++++++++++++++--------------------- nova/db/base.py | 8 +++--- nova/db/migration.py | 2 ++ 3 files changed, 38 insertions(+), 33 deletions(-) diff --git a/nova/db/api.py b/nova/db/api.py index 63901e94d879..1b33d89326a9 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -15,8 +15,8 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -""" -Defines interface for DB access. + +"""Defines interface for DB access. The underlying driver is loaded as a :class:`LazyPluggable`. @@ -30,6 +30,7 @@ The underlying driver is loaded as a :class:`LazyPluggable`. :enable_new_services: when adding a new service to the database, is it in the pool of available hardware (Default: True) + """ from nova import exception @@ -86,7 +87,7 @@ def service_get(context, service_id): def service_get_by_host_and_topic(context, host, topic): - """Get a service by host it's on and topic it listens to""" + """Get a service by host it's on and topic it listens to.""" return IMPL.service_get_by_host_and_topic(context, host, topic) @@ -113,7 +114,7 @@ def service_get_all_compute_by_host(context, host): def service_get_all_compute_sorted(context): """Get all compute services sorted by instance count. - Returns a list of (Service, instance_count) tuples. + :returns: a list of (Service, instance_count) tuples. """ return IMPL.service_get_all_compute_sorted(context) @@ -122,7 +123,7 @@ def service_get_all_compute_sorted(context): def service_get_all_network_sorted(context): """Get all network services sorted by network count. - Returns a list of (Service, network_count) tuples. + :returns: a list of (Service, network_count) tuples. """ return IMPL.service_get_all_network_sorted(context) @@ -131,7 +132,7 @@ def service_get_all_network_sorted(context): def service_get_all_volume_sorted(context): """Get all volume services sorted by volume count. - Returns a list of (Service, volume_count) tuples. + :returns: a list of (Service, volume_count) tuples. """ return IMPL.service_get_all_volume_sorted(context) @@ -241,7 +242,7 @@ def floating_ip_count_by_project(context, project_id): def floating_ip_deallocate(context, address): - """Deallocate an floating ip by address""" + """Deallocate an floating ip by address.""" return IMPL.floating_ip_deallocate(context, address) @@ -253,7 +254,7 @@ def floating_ip_destroy(context, address): def floating_ip_disassociate(context, address): """Disassociate an floating ip from a fixed ip by address. - Returns the address of the existing fixed ip. + :returns: the address of the existing fixed ip. """ return IMPL.floating_ip_disassociate(context, address) @@ -294,22 +295,22 @@ def floating_ip_update(context, address, values): #################### def migration_update(context, id, values): - """Update a migration instance""" + """Update a migration instance.""" return IMPL.migration_update(context, id, values) def migration_create(context, values): - """Create a migration record""" + """Create a migration record.""" return IMPL.migration_create(context, values) def migration_get(context, migration_id): - """Finds a migration by the id""" + """Finds a migration by the id.""" return IMPL.migration_get(context, migration_id) def migration_get_by_instance_and_status(context, instance_id, status): - """Finds a migration by the instance id its migrating""" + """Finds a migration by the instance id its migrating.""" return IMPL.migration_get_by_instance_and_status(context, instance_id, status) @@ -579,7 +580,9 @@ def network_create_safe(context, values): def network_delete_safe(context, network_id): """Delete network with key network_id. + This method assumes that the network is not associated with any project + """ return IMPL.network_delete_safe(context, network_id) @@ -674,7 +677,6 @@ def project_get_network(context, project_id, associate=True): network if one is not found, otherwise it returns None. """ - return IMPL.project_get_network(context, project_id, associate) @@ -722,7 +724,9 @@ def iscsi_target_create_safe(context, values): The device is not returned. If the create violates the unique constraints because the iscsi_target and host already exist, - no exception is raised.""" + no exception is raised. + + """ return IMPL.iscsi_target_create_safe(context, values) @@ -1050,10 +1054,7 @@ def project_delete(context, project_id): def host_get_networks(context, host): - """Return all networks for which the given host is the designated - network host. - - """ + """All networks for which the given host is the network host.""" return IMPL.host_get_networks(context, host) @@ -1115,38 +1116,40 @@ def console_get(context, console_id, instance_id=None): def instance_type_create(context, values): - """Create a new instance type""" + """Create a new instance type.""" return IMPL.instance_type_create(context, values) def instance_type_get_all(context, inactive=False): - """Get all instance types""" + """Get all instance types.""" return IMPL.instance_type_get_all(context, inactive) def instance_type_get_by_id(context, id): - """Get instance type by id""" + """Get instance type by id.""" return IMPL.instance_type_get_by_id(context, id) def instance_type_get_by_name(context, name): - """Get instance type by name""" + """Get instance type by name.""" return IMPL.instance_type_get_by_name(context, name) def instance_type_get_by_flavor_id(context, id): - """Get instance type by name""" + """Get instance type by name.""" return IMPL.instance_type_get_by_flavor_id(context, id) def instance_type_destroy(context, name): - """Delete a instance type""" + """Delete a instance type.""" return IMPL.instance_type_destroy(context, name) def instance_type_purge(context, name): - """Purges (removes) an instance type from DB - Use instance_type_destroy for most cases + """Purges (removes) an instance type from DB. + + Use instance_type_destroy for most cases + """ return IMPL.instance_type_purge(context, name) @@ -1183,15 +1186,15 @@ def zone_get_all(context): def instance_metadata_get(context, instance_id): - """Get all metadata for an instance""" + """Get all metadata for an instance.""" return IMPL.instance_metadata_get(context, instance_id) def instance_metadata_delete(context, instance_id, key): - """Delete the given metadata item""" + """Delete the given metadata item.""" IMPL.instance_metadata_delete(context, instance_id, key) def instance_metadata_update_or_create(context, instance_id, metadata): - """Create or update instance metadata""" + """Create or update instance metadata.""" IMPL.instance_metadata_update_or_create(context, instance_id, metadata) diff --git a/nova/db/base.py b/nova/db/base.py index a0f2180c6569..a0d055d5b0f6 100644 --- a/nova/db/base.py +++ b/nova/db/base.py @@ -16,20 +16,20 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Base class for classes that need modular database access. -""" +"""Base class for classes that need modular database access.""" from nova import utils from nova import flags + FLAGS = flags.FLAGS flags.DEFINE_string('db_driver', 'nova.db.api', 'driver to use for database access') class Base(object): - """DB driver is injected in the init method""" + """DB driver is injected in the init method.""" + def __init__(self, db_driver=None): if not db_driver: db_driver = FLAGS.db_driver diff --git a/nova/db/migration.py b/nova/db/migration.py index e54b90cd8f17..ccd06cffe10c 100644 --- a/nova/db/migration.py +++ b/nova/db/migration.py @@ -15,11 +15,13 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + """Database setup and migration commands.""" from nova import flags from nova import utils + FLAGS = flags.FLAGS flags.DECLARE('db_backend', 'nova.db.api') From 04fd29085c6ed5fe72378b061b1d7659f110c924 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 20 Apr 2011 12:06:10 -0700 Subject: [PATCH 50/66] docstring cleanup, console --- nova/console/api.py | 23 +++++------- nova/console/fake.py | 22 +++++------ nova/console/manager.py | 17 +++++---- nova/console/vmrc.py | 48 ++++++++++++------------ nova/console/vmrc_manager.py | 72 ++++++++++++++++-------------------- nova/console/xvp.py | 48 ++++++++++++------------ 6 files changed, 108 insertions(+), 122 deletions(-) diff --git a/nova/console/api.py b/nova/console/api.py index 3850d2c44e53..137ddcaaccb7 100644 --- a/nova/console/api.py +++ b/nova/console/api.py @@ -15,23 +15,19 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Handles ConsoleProxy API requests -""" +"""Handles ConsoleProxy API requests.""" from nova import exception -from nova.db import base - - from nova import flags from nova import rpc +from nova.db import base FLAGS = flags.FLAGS class API(base.Base): - """API for spining up or down console proxy connections""" + """API for spinning up or down console proxy connections.""" def __init__(self, **kwargs): super(API, self).__init__(**kwargs) @@ -51,8 +47,8 @@ class API(base.Base): self.db.queue_get_for(context, FLAGS.console_topic, pool['host']), - {"method": "remove_console", - "args": {"console_id": console['id']}}) + {'method': 'remove_console', + 'args': {'console_id': console['id']}}) def create_console(self, context, instance_id): instance = self.db.instance_get(context, instance_id) @@ -63,13 +59,12 @@ class API(base.Base): # here. rpc.cast(context, self._get_console_topic(context, instance['host']), - {"method": "add_console", - "args": {"instance_id": instance_id}}) + {'method': 'add_console', + 'args': {'instance_id': instance_id}}) def _get_console_topic(self, context, instance_host): topic = self.db.queue_get_for(context, FLAGS.compute_topic, instance_host) - return rpc.call(context, - topic, - {"method": "get_console_topic", "args": {'fake': 1}}) + return rpc.call(context, topic, {'method': 'get_console_topic', + 'args': {'fake': 1}}) diff --git a/nova/console/fake.py b/nova/console/fake.py index 7a90d5221001..e2eb886f851c 100644 --- a/nova/console/fake.py +++ b/nova/console/fake.py @@ -15,9 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Fake ConsoleProxy driver for tests. -""" +"""Fake ConsoleProxy driver for tests.""" from nova import exception @@ -27,32 +25,32 @@ class FakeConsoleProxy(object): @property def console_type(self): - return "fake" + return 'fake' def setup_console(self, context, console): - """Sets up actual proxies""" + """Sets up actual proxies.""" pass def teardown_console(self, context, console): - """Tears down actual proxies""" + """Tears down actual proxies.""" pass def init_host(self): - """Start up any config'ed consoles on start""" + """Start up any config'ed consoles on start.""" pass def generate_password(self, length=8): - """Returns random console password""" - return "fakepass" + """Returns random console password.""" + return 'fakepass' def get_port(self, context): - """get available port for consoles that need one""" + """Get available port for consoles that need one.""" return 5999 def fix_pool_password(self, password): - """Trim password to length, and any other massaging""" + """Trim password to length, and any other massaging.""" return password def fix_console_password(self, password): - """Trim password to length, and any other massaging""" + """Trim password to length, and any other massaging.""" return password diff --git a/nova/console/manager.py b/nova/console/manager.py index bfa571ea9994..e0db21666474 100644 --- a/nova/console/manager.py +++ b/nova/console/manager.py @@ -15,9 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Console Proxy Service -""" +"""Console Proxy Service.""" import functools import socket @@ -29,6 +27,7 @@ from nova import manager from nova import rpc from nova import utils + FLAGS = flags.FLAGS flags.DEFINE_string('console_driver', 'nova.console.xvp.XVPConsoleProxy', @@ -41,9 +40,11 @@ flags.DEFINE_string('console_public_hostname', class ConsoleProxyManager(manager.Manager): + """Sets up and tears down any console proxy connections. - """ Sets up and tears down any proxy connections needed for accessing - instance consoles securely""" + Needed for accessing instance consoles securely. + + """ def __init__(self, console_driver=None, *args, **kwargs): if not console_driver: @@ -67,7 +68,7 @@ class ConsoleProxyManager(manager.Manager): pool['id'], instance_id) except exception.NotFound: - logging.debug(_("Adding console")) + logging.debug(_('Adding console')) if not password: password = utils.generate_password(8) if not port: @@ -115,8 +116,8 @@ class ConsoleProxyManager(manager.Manager): self.db.queue_get_for(context, FLAGS.compute_topic, instance_host), - {"method": "get_console_pool_info", - "args": {"console_type": console_type}}) + {'method': 'get_console_pool_info', + 'args': {'console_type': console_type}}) pool_info['password'] = self.driver.fix_pool_password( pool_info['password']) pool_info['host'] = self.host diff --git a/nova/console/vmrc.py b/nova/console/vmrc.py index 521da289f402..127d311212ce 100644 --- a/nova/console/vmrc.py +++ b/nova/console/vmrc.py @@ -15,9 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -VMRC console drivers. -""" +"""VMRC console drivers.""" import base64 import json @@ -27,6 +25,8 @@ from nova import flags from nova import log as logging from nova.virt.vmwareapi import vim_util + +FLAGS = flags.FLAGS flags.DEFINE_integer('console_vmrc_port', 443, "port for VMware VMRC connections") @@ -34,8 +34,6 @@ flags.DEFINE_integer('console_vmrc_error_retries', 10, "number of retries for retrieving VMRC information") -FLAGS = flags.FLAGS - class VMRCConsole(object): """VMRC console driver with ESX credentials.""" @@ -69,34 +67,34 @@ class VMRCConsole(object): return password def generate_password(self, vim_session, pool, instance_name): - """ - Returns VMRC Connection credentials. + """Returns VMRC Connection credentials. Return string is of the form ':@'. + """ username, password = pool['username'], pool['password'] - vms = vim_session._call_method(vim_util, "get_objects", - "VirtualMachine", ["name", "config.files.vmPathName"]) + vms = vim_session._call_method(vim_util, 'get_objects', + 'VirtualMachine', ['name', 'config.files.vmPathName']) vm_ds_path_name = None vm_ref = None for vm in vms: vm_name = None ds_path_name = None for prop in vm.propSet: - if prop.name == "name": + if prop.name == 'name': vm_name = prop.val - elif prop.name == "config.files.vmPathName": + elif prop.name == 'config.files.vmPathName': ds_path_name = prop.val if vm_name == instance_name: vm_ref = vm.obj vm_ds_path_name = ds_path_name break if vm_ref is None: - raise exception.NotFound(_("instance - %s not present") % + raise exception.NotFound(_('instance - %s not present') % instance_name) - json_data = json.dumps({"vm_id": vm_ds_path_name, - "username": username, - "password": password}) + json_data = json.dumps({'vm_id': vm_ds_path_name, + 'username': username, + 'password': password}) return base64.b64encode(json_data) def is_otp(self): @@ -115,28 +113,28 @@ class VMRCSessionConsole(VMRCConsole): return 'vmrc+session' def generate_password(self, vim_session, pool, instance_name): - """ - Returns a VMRC Session. + """Returns a VMRC Session. Return string is of the form ':'. + """ - vms = vim_session._call_method(vim_util, "get_objects", - "VirtualMachine", ["name"]) - vm_ref = None + vms = vim_session._call_method(vim_util, 'get_objects', + 'VirtualMachine', ['name']) + vm_ref = NoneV for vm in vms: if vm.propSet[0].val == instance_name: vm_ref = vm.obj if vm_ref is None: - raise exception.NotFound(_("instance - %s not present") % + raise exception.NotFound(_('instance - %s not present') % instance_name) virtual_machine_ticket = \ vim_session._call_method( vim_session._get_vim(), - "AcquireCloneTicket", + 'AcquireCloneTicket', vim_session._get_vim().get_service_content().sessionManager) - json_data = json.dumps({"vm_id": str(vm_ref.value), - "username": virtual_machine_ticket, - "password": virtual_machine_ticket}) + json_data = json.dumps({'vm_id': str(vm_ref.value), + 'username': virtual_machine_ticket, + 'password': virtual_machine_ticket}) return base64.b64encode(json_data) def is_otp(self): diff --git a/nova/console/vmrc_manager.py b/nova/console/vmrc_manager.py index 09beac7a0d9a..3c6b777823ab 100644 --- a/nova/console/vmrc_manager.py +++ b/nova/console/vmrc_manager.py @@ -15,9 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -VMRC Console Manager. -""" +"""VMRC Console Manager.""" from nova import exception from nova import flags @@ -25,24 +23,21 @@ from nova import log as logging from nova import manager from nova import rpc from nova import utils -from nova.virt.vmwareapi_conn import VMWareAPISession +from nova.virt import vmwareapi_conn + LOG = logging.getLogger("nova.console.vmrc_manager") + FLAGS = flags.FLAGS -flags.DEFINE_string('console_public_hostname', - '', +flags.DEFINE_string('console_public_hostname', '', 'Publicly visible name for this console host') -flags.DEFINE_string('console_driver', - 'nova.console.vmrc.VMRCConsole', +flags.DEFINE_string('console_driver', 'nova.console.vmrc.VMRCConsole', 'Driver to use for the console') class ConsoleVMRCManager(manager.Manager): - - """ - Manager to handle VMRC connections needed for accessing instance consoles. - """ + """Manager to handle VMRC connections for accessing instance consoles.""" def __init__(self, console_driver=None, *args, **kwargs): self.driver = utils.import_object(FLAGS.console_driver) @@ -56,7 +51,7 @@ class ConsoleVMRCManager(manager.Manager): """Get VIM session for the pool specified.""" vim_session = None if pool['id'] not in self.sessions.keys(): - vim_session = VMWareAPISession(pool['address'], + vim_session = vmwareapi_conn.VMWareAPISession(pool['address'], pool['username'], pool['password'], FLAGS.console_vmrc_error_retries) @@ -65,7 +60,7 @@ class ConsoleVMRCManager(manager.Manager): def _generate_console(self, context, pool, name, instance_id, instance): """Sets up console for the instance.""" - LOG.debug(_("Adding console")) + LOG.debug(_('Adding console')) password = self.driver.generate_password( self._get_vim_session(pool), @@ -84,9 +79,10 @@ class ConsoleVMRCManager(manager.Manager): @exception.wrap_exception def add_console(self, context, instance_id, password=None, port=None, **kwargs): - """ - Adds a console for the instance. If it is one time password, then we - generate new console credentials. + """Adds a console for the instance. + + If it is one time password, then we generate new console credentials. + """ instance = self.db.instance_get(context, instance_id) host = instance['host'] @@ -97,19 +93,17 @@ class ConsoleVMRCManager(manager.Manager): pool['id'], instance_id) if self.driver.is_otp(): - console = self._generate_console( - context, - pool, - name, - instance_id, - instance) + console = self._generate_console(context, + pool, + name, + instance_id, + instance) except exception.NotFound: - console = self._generate_console( - context, - pool, - name, - instance_id, - instance) + console = self._generate_console(context, + pool, + name, + instance_id, + instance) return console['id'] @exception.wrap_exception @@ -118,13 +112,11 @@ class ConsoleVMRCManager(manager.Manager): try: console = self.db.console_get(context, console_id) except exception.NotFound: - LOG.debug(_("Tried to remove non-existent console " - "%(console_id)s.") % - {'console_id': console_id}) + LOG.debug(_('Tried to remove non-existent console ' + '%(console_id)s.') % {'console_id': console_id}) return - LOG.debug(_("Removing console " - "%(console_id)s.") % - {'console_id': console_id}) + LOG.debug(_('Removing console ' + '%(console_id)s.') % {'console_id': console_id}) self.db.console_delete(context, console_id) self.driver.teardown_console(context, console) @@ -139,11 +131,11 @@ class ConsoleVMRCManager(manager.Manager): console_type) except exception.NotFound: pool_info = rpc.call(context, - self.db.queue_get_for(context, - FLAGS.compute_topic, - instance_host), - {"method": "get_console_pool_info", - "args": {"console_type": console_type}}) + self.db.queue_get_for(context, + FLAGS.compute_topic, + instance_host), + {'method': 'get_console_pool_info', + 'args': {'console_type': console_type}}) pool_info['password'] = self.driver.fix_pool_password( pool_info['password']) pool_info['host'] = self.host diff --git a/nova/console/xvp.py b/nova/console/xvp.py index 0cedfbb1329d..3cd287183313 100644 --- a/nova/console/xvp.py +++ b/nova/console/xvp.py @@ -15,16 +15,14 @@ # License for the specific language governing permissions and limitations # under the License. -""" -XVP (Xenserver VNC Proxy) driver. -""" +"""XVP (Xenserver VNC Proxy) driver.""" import fcntl import os import signal import subprocess -from Cheetah.Template import Template +from Cheetah import Template from nova import context from nova import db @@ -33,6 +31,8 @@ from nova import flags from nova import log as logging from nova import utils + +FLAGS = flags.FLAGS flags.DEFINE_string('console_xvp_conf_template', utils.abspath('console/xvp.conf.template'), 'XVP conf template') @@ -47,12 +47,11 @@ flags.DEFINE_string('console_xvp_log', 'XVP log file') flags.DEFINE_integer('console_xvp_multiplex_port', 5900, - "port for XVP to multiplex VNC connections on") -FLAGS = flags.FLAGS + 'port for XVP to multiplex VNC connections on') class XVPConsoleProxy(object): - """Sets up XVP config, and manages xvp daemon""" + """Sets up XVP config, and manages XVP daemon.""" def __init__(self): self.xvpconf_template = open(FLAGS.console_xvp_conf_template).read() @@ -61,50 +60,51 @@ class XVPConsoleProxy(object): @property def console_type(self): - return "vnc+xvp" + return 'vnc+xvp' def get_port(self, context): - """get available port for consoles that need one""" + """Get available port for consoles that need one.""" #TODO(mdragon): implement port selection for non multiplex ports, # we are not using that, but someone else may want # it. return FLAGS.console_xvp_multiplex_port def setup_console(self, context, console): - """Sets up actual proxies""" + """Sets up actual proxies.""" self._rebuild_xvp_conf(context.elevated()) def teardown_console(self, context, console): - """Tears down actual proxies""" + """Tears down actual proxies.""" self._rebuild_xvp_conf(context.elevated()) def init_host(self): - """Start up any config'ed consoles on start""" + """Start up any config'ed consoles on start.""" ctxt = context.get_admin_context() self._rebuild_xvp_conf(ctxt) def fix_pool_password(self, password): - """Trim password to length, and encode""" + """Trim password to length, and encode.""" return self._xvp_encrypt(password, is_pool_password=True) def fix_console_password(self, password): - """Trim password to length, and encode""" + """Trim password to length, and encode.""" return self._xvp_encrypt(password) def _rebuild_xvp_conf(self, context): - logging.debug(_("Rebuilding xvp conf")) + logging.debug(_('Rebuilding xvp conf')) pools = [pool for pool in db.console_pool_get_all_by_host_type(context, self.host, self.console_type) if pool['consoles']] if not pools: - logging.debug("No console pools!") + logging.debug('No console pools!') self._xvp_stop() return conf_data = {'multiplex_port': FLAGS.console_xvp_multiplex_port, 'pools': pools, 'pass_encode': self.fix_console_password} - config = str(Template(self.xvpconf_template, searchList=[conf_data])) + config = str(Template.Template(self.xvpconf_template, + searchList=[conf_data])) self._write_conf(config) self._xvp_restart() @@ -114,7 +114,7 @@ class XVPConsoleProxy(object): cfile.write(config) def _xvp_stop(self): - logging.debug(_("Stopping xvp")) + logging.debug(_('Stopping xvp')) pid = self._xvp_pid() if not pid: return @@ -127,19 +127,19 @@ class XVPConsoleProxy(object): def _xvp_start(self): if self._xvp_check_running(): return - logging.debug(_("Starting xvp")) + logging.debug(_('Starting xvp')) try: utils.execute('xvp', '-p', FLAGS.console_xvp_pid, '-c', FLAGS.console_xvp_conf, '-l', FLAGS.console_xvp_log) except exception.ProcessExecutionError, err: - logging.error(_("Error starting xvp: %s") % err) + logging.error(_('Error starting xvp: %s') % err) def _xvp_restart(self): - logging.debug(_("Restarting xvp")) + logging.debug(_('Restarting xvp')) if not self._xvp_check_running(): - logging.debug(_("xvp not running...")) + logging.debug(_('xvp not running...')) self._xvp_start() else: pid = self._xvp_pid() @@ -178,7 +178,9 @@ class XVPConsoleProxy(object): Note that xvp's obfuscation should not be considered 'real' encryption. It simply DES encrypts the passwords with static keys plainly viewable - in the xvp source code.""" + in the xvp source code. + + """ maxlen = 8 flag = '-e' if is_pool_password: From 740547fcb1aa02c31a362d1be2d4a27b3799e36a Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 20 Apr 2011 12:06:10 -0700 Subject: [PATCH 51/66] fixed indentation --- nova/console/vmrc_manager.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/nova/console/vmrc_manager.py b/nova/console/vmrc_manager.py index 3c6b777823ab..acecc1075c5b 100644 --- a/nova/console/vmrc_manager.py +++ b/nova/console/vmrc_manager.py @@ -51,10 +51,11 @@ class ConsoleVMRCManager(manager.Manager): """Get VIM session for the pool specified.""" vim_session = None if pool['id'] not in self.sessions.keys(): - vim_session = vmwareapi_conn.VMWareAPISession(pool['address'], - pool['username'], - pool['password'], - FLAGS.console_vmrc_error_retries) + vim_session = vmwareapi_conn.VMWareAPISession( + pool['address'], + pool['username'], + pool['password'], + FLAGS.console_vmrc_error_retries) self.sessions[pool['id']] = vim_session return self.sessions[pool['id']] From f69600e1844898bd48dc8f615c6684044d9aebe0 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 20 Apr 2011 12:08:22 -0700 Subject: [PATCH 52/66] docstring cleanup, nova dir --- nova/context.py | 10 ++- nova/crypto.py | 61 +++++++++-------- nova/exception.py | 25 +++---- nova/fakememcache.py | 4 +- nova/flags.py | 16 +++-- nova/log.py | 49 +++++++------- nova/manager.py | 34 ++++++---- nova/quota.py | 23 ++++--- nova/rpc.py | 152 ++++++++++++++++++++++++------------------- nova/service.py | 83 +++++++++++------------ nova/test.py | 50 +++++++------- nova/utils.py | 147 ++++++++++++++++++++--------------------- nova/version.py | 6 +- nova/wsgi.py | 144 ++++++++++++++++++++-------------------- 14 files changed, 422 insertions(+), 382 deletions(-) diff --git a/nova/context.py b/nova/context.py index 0256bf448dba..c113f7ea73cd 100644 --- a/nova/context.py +++ b/nova/context.py @@ -16,9 +16,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -RequestContext: context for requests that persist through all of nova. -""" +"""RequestContext: context for requests that persist through all of nova.""" import datetime import random @@ -28,6 +26,12 @@ from nova import utils class RequestContext(object): + """Security context and request information. + + Represents the user taking a given action within the system. + + """ + def __init__(self, user, project, is_admin=None, read_deleted=False, remote_address=None, timestamp=None, request_id=None): if hasattr(user, 'id'): diff --git a/nova/crypto.py b/nova/crypto.py index 605be2a3219d..14b9cbef6b97 100644 --- a/nova/crypto.py +++ b/nova/crypto.py @@ -15,10 +15,11 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -""" -Wrappers around standard crypto data elements. + +"""Wrappers around standard crypto data elements. Includes root and intermediate CAs, SSH key_pairs and x509 certificates. + """ import base64 @@ -43,6 +44,8 @@ from nova import log as logging LOG = logging.getLogger("nova.crypto") + + FLAGS = flags.FLAGS flags.DEFINE_string('ca_file', 'cacert.pem', _('Filename of root CA')) flags.DEFINE_string('key_file', @@ -90,13 +93,13 @@ def key_path(project_id=None): def fetch_ca(project_id=None, chain=True): if not FLAGS.use_project_ca: project_id = None - buffer = "" + buffer = '' if project_id: - with open(ca_path(project_id), "r") as cafile: + with open(ca_path(project_id), 'r') as cafile: buffer += cafile.read() if not chain: return buffer - with open(ca_path(None), "r") as cafile: + with open(ca_path(None), 'r') as cafile: buffer += cafile.read() return buffer @@ -143,7 +146,7 @@ def ssl_pub_to_ssh_pub(ssl_public_key, name='root', suffix='nova'): def revoke_cert(project_id, file_name): - """Revoke a cert by file name""" + """Revoke a cert by file name.""" start = os.getcwd() os.chdir(ca_folder(project_id)) # NOTE(vish): potential race condition here @@ -155,14 +158,14 @@ def revoke_cert(project_id, file_name): def revoke_certs_by_user(user_id): - """Revoke all user certs""" + """Revoke all user certs.""" admin = context.get_admin_context() for cert in db.certificate_get_all_by_user(admin, user_id): revoke_cert(cert['project_id'], cert['file_name']) def revoke_certs_by_project(project_id): - """Revoke all project certs""" + """Revoke all project certs.""" # NOTE(vish): This is somewhat useless because we can just shut down # the vpn. admin = context.get_admin_context() @@ -171,29 +174,29 @@ def revoke_certs_by_project(project_id): def revoke_certs_by_user_and_project(user_id, project_id): - """Revoke certs for user in project""" + """Revoke certs for user in project.""" admin = context.get_admin_context() for cert in db.certificate_get_all_by_user(admin, user_id, project_id): revoke_cert(cert['project_id'], cert['file_name']) def _project_cert_subject(project_id): - """Helper to generate user cert subject""" + """Helper to generate user cert subject.""" return FLAGS.project_cert_subject % (project_id, utils.isotime()) def _vpn_cert_subject(project_id): - """Helper to generate user cert subject""" + """Helper to generate user cert subject.""" return FLAGS.vpn_cert_subject % (project_id, utils.isotime()) def _user_cert_subject(user_id, project_id): - """Helper to generate user cert subject""" + """Helper to generate user cert subject.""" return FLAGS.user_cert_subject % (project_id, user_id, utils.isotime()) def generate_x509_cert(user_id, project_id, bits=1024): - """Generate and sign a cert for user in project""" + """Generate and sign a cert for user in project.""" subject = _user_cert_subject(user_id, project_id) tmpdir = tempfile.mkdtemp() keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key')) @@ -205,7 +208,7 @@ def generate_x509_cert(user_id, project_id, bits=1024): csr = open(csrfile).read() shutil.rmtree(tmpdir) (serial, signed_csr) = sign_csr(csr, project_id) - fname = os.path.join(ca_folder(project_id), "newcerts/%s.pem" % serial) + fname = os.path.join(ca_folder(project_id), 'newcerts/%s.pem' % serial) cert = {'user_id': user_id, 'project_id': project_id, 'file_name': fname} @@ -227,8 +230,8 @@ def _ensure_project_folder(project_id): def generate_vpn_files(project_id): project_folder = ca_folder(project_id) - csr_fn = os.path.join(project_folder, "server.csr") - crt_fn = os.path.join(project_folder, "server.crt") + csr_fn = os.path.join(project_folder, 'server.csr') + crt_fn = os.path.join(project_folder, 'server.crt') genvpn_sh_path = os.path.join(os.path.dirname(__file__), 'CA', @@ -241,10 +244,10 @@ def generate_vpn_files(project_id): # TODO(vish): the shell scripts could all be done in python utils.execute('sh', genvpn_sh_path, project_id, _vpn_cert_subject(project_id)) - with open(csr_fn, "r") as csrfile: + with open(csr_fn, 'r') as csrfile: csr_text = csrfile.read() (serial, signed_csr) = sign_csr(csr_text, project_id) - with open(crt_fn, "w") as crtfile: + with open(crt_fn, 'w') as crtfile: crtfile.write(signed_csr) os.chdir(start) @@ -261,12 +264,12 @@ def sign_csr(csr_text, project_id=None): def _sign_csr(csr_text, ca_folder): tmpfolder = tempfile.mkdtemp() - inbound = os.path.join(tmpfolder, "inbound.csr") - outbound = os.path.join(tmpfolder, "outbound.csr") - csrfile = open(inbound, "w") + inbound = os.path.join(tmpfolder, 'inbound.csr') + outbound = os.path.join(tmpfolder, 'outbound.csr') + csrfile = open(inbound, 'w') csrfile.write(csr_text) csrfile.close() - LOG.debug(_("Flags path: %s"), ca_folder) + LOG.debug(_('Flags path: %s'), ca_folder) start = os.getcwd() # Change working dir to CA if not os.path.exists(ca_folder): @@ -276,13 +279,13 @@ def _sign_csr(csr_text, ca_folder): './openssl.cnf', '-infiles', inbound) out, _err = utils.execute('openssl', 'x509', '-in', outbound, '-serial', '-noout') - serial = string.strip(out.rpartition("=")[2]) + serial = string.strip(out.rpartition('=')[2]) os.chdir(start) - with open(outbound, "r") as crtfile: + with open(outbound, 'r') as crtfile: return (serial, crtfile.read()) -def mkreq(bits, subject="foo", ca=0): +def mkreq(bits, subject='foo', ca=0): pk = M2Crypto.EVP.PKey() req = M2Crypto.X509.Request() rsa = M2Crypto.RSA.gen_key(bits, 65537, callback=lambda: None) @@ -314,7 +317,7 @@ def mkcacert(subject='nova', years=1): cert.set_not_before(now) cert.set_not_after(nowPlusYear) issuer = M2Crypto.X509.X509_Name() - issuer.C = "US" + issuer.C = 'US' issuer.CN = subject cert.set_issuer(issuer) cert.set_pubkey(pkey) @@ -352,13 +355,15 @@ def mkcacert(subject='nova', years=1): # http://code.google.com/p/boto def compute_md5(fp): - """ + """Compute an md5 hash. + :type fp: file :param fp: File pointer to the file to MD5 hash. The file pointer will be reset to the beginning of the file before the method returns. :rtype: tuple - :return: the hex digest version of the MD5 hash + :returns: the hex digest version of the MD5 hash + """ m = hashlib.md5() fp.seek(0) diff --git a/nova/exception.py b/nova/exception.py index 4e2bbdbaf956..3123b2f1f985 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -16,31 +16,34 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Nova base exception handling, including decorator for re-raising -Nova-type exceptions. SHOULD include dedicated exception logging. +"""Nova base exception handling. + +Includes decorator for re-raising Nova-type exceptions. + +SHOULD include dedicated exception logging. + """ from nova import log as logging + + LOG = logging.getLogger('nova.exception') class ProcessExecutionError(IOError): - def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, description=None): if description is None: - description = _("Unexpected error while running command.") + description = _('Unexpected error while running command.') if exit_code is None: exit_code = '-' - message = _("%(description)s\nCommand: %(cmd)s\n" - "Exit code: %(exit_code)s\nStdout: %(stdout)r\n" - "Stderr: %(stderr)r") % locals() + message = _('%(description)s\nCommand: %(cmd)s\n' + 'Exit code: %(exit_code)s\nStdout: %(stdout)r\n' + 'Stderr: %(stderr)r') % locals() IOError.__init__(self, message) class Error(Exception): - def __init__(self, message=None): super(Error, self).__init__(message) @@ -97,7 +100,7 @@ class TimeoutException(Error): class DBError(Error): - """Wraps an implementation specific exception""" + """Wraps an implementation specific exception.""" def __init__(self, inner_exception): self.inner_exception = inner_exception super(DBError, self).__init__(str(inner_exception)) @@ -108,7 +111,7 @@ def wrap_db_error(f): try: return f(*args, **kwargs) except Exception, e: - LOG.exception(_('DB exception wrapped')) + LOG.exception(_('DB exception wrapped.')) raise DBError(e) return _wrap _wrap.func_name = f.func_name diff --git a/nova/fakememcache.py b/nova/fakememcache.py index 67f46dbdca06..e4f238aa9f3a 100644 --- a/nova/fakememcache.py +++ b/nova/fakememcache.py @@ -18,14 +18,14 @@ """Super simple fake memcache client.""" -import utils +from nova import utils class Client(object): """Replicates a tiny subset of memcached client interface.""" def __init__(self, *args, **kwargs): - """Ignores the passed in args""" + """Ignores the passed in args.""" self.cache = {} def get(self, key): diff --git a/nova/flags.py b/nova/flags.py index f011ab383dd0..d1b93f0a85a4 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -16,9 +16,13 @@ # License for the specific language governing permissions and limitations # under the License. -""" +"""Command-line flag library. + +Wraps gflags. + Package-level global flags are defined here, the rest are defined where they're used. + """ import getopt @@ -145,10 +149,12 @@ class FlagValues(gflags.FlagValues): class StrWrapper(object): - """Wrapper around FlagValues objects + """Wrapper around FlagValues objects. Wraps FlagValues objects for string.Template so that we're - sure to return strings.""" + sure to return strings. + + """ def __init__(self, context_objs): self.context_objs = context_objs @@ -169,6 +175,7 @@ def _GetCallingModule(): We generally use this function to get the name of the module calling a DEFINE_foo... function. + """ # Walk down the stack to find the first globals dict that's not ours. for depth in range(1, sys.getrecursionlimit()): @@ -192,6 +199,7 @@ def __GetModuleName(globals_dict): Returns: A string (the name of the module) or None (if the module could not be identified. + """ for name, module in sys.modules.iteritems(): if getattr(module, '__dict__', None) is globals_dict: @@ -326,7 +334,7 @@ DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger') DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'), "Top-level directory for maintaining nova's state") DEFINE_string('lock_path', os.path.join(os.path.dirname(__file__), '../'), - "Directory for lock files") + 'Directory for lock files') DEFINE_string('logdir', None, 'output to a per-service log file in named ' 'directory') diff --git a/nova/log.py b/nova/log.py index ea94be19419c..096279f7ca19 100644 --- a/nova/log.py +++ b/nova/log.py @@ -16,16 +16,15 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Nova logging handler. +"""Nova logging handler. This module adds to logging functionality by adding the option to specify a context object when calling the various log methods. If the context object is not specified, default formatting is used. It also allows setting of formatting information through flags. -""" +""" import cStringIO import inspect @@ -41,34 +40,28 @@ from nova import version FLAGS = flags.FLAGS - flags.DEFINE_string('logging_context_format_string', '%(asctime)s %(levelname)s %(name)s ' '[%(request_id)s %(user)s ' '%(project)s] %(message)s', 'format string to use for log messages with context') - flags.DEFINE_string('logging_default_format_string', '%(asctime)s %(levelname)s %(name)s [-] ' '%(message)s', 'format string to use for log messages without context') - flags.DEFINE_string('logging_debug_format_suffix', 'from (pid=%(process)d) %(funcName)s' ' %(pathname)s:%(lineno)d', 'data to append to log format when level is DEBUG') - flags.DEFINE_string('logging_exception_prefix', '(%(name)s): TRACE: ', 'prefix each line of exception output with this format') - flags.DEFINE_list('default_log_levels', ['amqplib=WARN', 'sqlalchemy=WARN', 'boto=WARN', 'eventlet.wsgi.server=WARN'], 'list of logger=LEVEL pairs') - flags.DEFINE_bool('use_syslog', False, 'output to syslog') flags.DEFINE_string('logfile', None, 'output to named file') @@ -83,6 +76,8 @@ WARN = logging.WARN INFO = logging.INFO DEBUG = logging.DEBUG NOTSET = logging.NOTSET + + # methods getLogger = logging.getLogger debug = logging.debug @@ -93,6 +88,8 @@ error = logging.error exception = logging.exception critical = logging.critical log = logging.log + + # handlers StreamHandler = logging.StreamHandler WatchedFileHandler = logging.handlers.WatchedFileHandler @@ -127,17 +124,18 @@ def _get_log_file_path(binary=None): class NovaLogger(logging.Logger): - """ - NovaLogger manages request context and formatting. + """NovaLogger manages request context and formatting. This becomes the class that is instanciated by logging.getLogger. + """ + def __init__(self, name, level=NOTSET): logging.Logger.__init__(self, name, level) self.setup_from_flags() def setup_from_flags(self): - """Setup logger from flags""" + """Setup logger from flags.""" level = NOTSET for pair in FLAGS.default_log_levels: logger, _sep, level_name = pair.partition('=') @@ -148,7 +146,7 @@ class NovaLogger(logging.Logger): self.setLevel(level) def _log(self, level, msg, args, exc_info=None, extra=None, context=None): - """Extract context from any log call""" + """Extract context from any log call.""" if not extra: extra = {} if context: @@ -157,17 +155,17 @@ class NovaLogger(logging.Logger): return logging.Logger._log(self, level, msg, args, exc_info, extra) def addHandler(self, handler): - """Each handler gets our custom formatter""" + """Each handler gets our custom formatter.""" handler.setFormatter(_formatter) return logging.Logger.addHandler(self, handler) def audit(self, msg, *args, **kwargs): - """Shortcut for our AUDIT level""" + """Shortcut for our AUDIT level.""" if self.isEnabledFor(AUDIT): self._log(AUDIT, msg, args, **kwargs) def exception(self, msg, *args, **kwargs): - """Logging.exception doesn't handle kwargs, so breaks context""" + """Logging.exception doesn't handle kwargs, so breaks context.""" if not kwargs.get('exc_info'): kwargs['exc_info'] = 1 self.error(msg, *args, **kwargs) @@ -181,14 +179,13 @@ class NovaLogger(logging.Logger): for k in env.keys(): if not isinstance(env[k], str): env.pop(k) - message = "Environment: %s" % json.dumps(env) + message = 'Environment: %s' % json.dumps(env) kwargs.pop('exc_info') self.error(message, **kwargs) class NovaFormatter(logging.Formatter): - """ - A nova.context.RequestContext aware formatter configured through flags. + """A nova.context.RequestContext aware formatter configured through flags. The flags used to set format strings are: logging_context_foramt_string and logging_default_format_string. You can also specify @@ -197,10 +194,11 @@ class NovaFormatter(logging.Formatter): For information about what variables are available for the formatter see: http://docs.python.org/library/logging.html#formatter + """ def format(self, record): - """Uses contextstring if request_id is set, otherwise default""" + """Uses contextstring if request_id is set, otherwise default.""" if record.__dict__.get('request_id', None): self._fmt = FLAGS.logging_context_format_string else: @@ -214,20 +212,21 @@ class NovaFormatter(logging.Formatter): return logging.Formatter.format(self, record) def formatException(self, exc_info, record=None): - """Format exception output with FLAGS.logging_exception_prefix""" + """Format exception output with FLAGS.logging_exception_prefix.""" if not record: return logging.Formatter.formatException(self, exc_info) stringbuffer = cStringIO.StringIO() traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], None, stringbuffer) - lines = stringbuffer.getvalue().split("\n") + lines = stringbuffer.getvalue().split('\n') stringbuffer.close() formatted_lines = [] for line in lines: pl = FLAGS.logging_exception_prefix % record.__dict__ - fl = "%s%s" % (pl, line) + fl = '%s%s' % (pl, line) formatted_lines.append(fl) - return "\n".join(formatted_lines) + return '\n'.join(formatted_lines) + _formatter = NovaFormatter() @@ -241,7 +240,7 @@ class NovaRootLogger(NovaLogger): NovaLogger.__init__(self, name, level) def setup_from_flags(self): - """Setup logger from flags""" + """Setup logger from flags.""" global _filelog if FLAGS.use_syslog: self.syslog = SysLogHandler(address='/dev/log') diff --git a/nova/manager.py b/nova/manager.py index 804a50479660..34338ac0436d 100644 --- a/nova/manager.py +++ b/nova/manager.py @@ -16,7 +16,8 @@ # License for the specific language governing permissions and limitations # under the License. -""" +"""Base Manager class. + Managers are responsible for a certain aspect of the sytem. It is a logical grouping of code relating to a portion of the system. In general other components should be using the manager to make changes to the components that @@ -49,16 +50,19 @@ Managers will often provide methods for initial setup of a host or periodic tasksto a wrapping service. This module provides Manager, a base class for managers. + """ -from nova import utils from nova import flags from nova import log as logging +from nova import utils from nova.db import base from nova.scheduler import api + FLAGS = flags.FLAGS + LOG = logging.getLogger('nova.manager') @@ -70,23 +74,29 @@ class Manager(base.Base): super(Manager, self).__init__(db_driver) def periodic_tasks(self, context=None): - """Tasks to be run at a periodic interval""" + """Tasks to be run at a periodic interval.""" pass def init_host(self): - """Do any initialization that needs to be run if this is a standalone - service. Child classes should override this method.""" + """Handle initialization if this is a standalone service. + + Child classes should override this method. + + """ pass class SchedulerDependentManager(Manager): """Periodically send capability updates to the Scheduler services. - Services that need to update the Scheduler of their capabilities - should derive from this class. Otherwise they can derive from - manager.Manager directly. Updates are only sent after - update_service_capabilities is called with non-None values.""" - def __init__(self, host=None, db_driver=None, service_name="undefined"): + Services that need to update the Scheduler of their capabilities + should derive from this class. Otherwise they can derive from + manager.Manager directly. Updates are only sent after + update_service_capabilities is called with non-None values. + + """ + + def __init__(self, host=None, db_driver=None, service_name='undefined'): self.last_capabilities = None self.service_name = service_name super(SchedulerDependentManager, self).__init__(host, db_driver) @@ -96,9 +106,9 @@ class SchedulerDependentManager(Manager): self.last_capabilities = capabilities def periodic_tasks(self, context=None): - """Pass data back to the scheduler at a periodic interval""" + """Pass data back to the scheduler at a periodic interval.""" if self.last_capabilities: - LOG.debug(_("Notifying Schedulers of capabilities ...")) + LOG.debug(_('Notifying Schedulers of capabilities ...')) api.update_service_capabilities(context, self.service_name, self.host, self.last_capabilities) diff --git a/nova/quota.py b/nova/quota.py index 2b24c0b5bd8c..d8b5d9a93a2d 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -15,16 +15,15 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -""" -Quotas for instances, volumes, and floating ips -""" + +"""Quotas for instances, volumes, and floating ips.""" from nova import db from nova import exception from nova import flags -FLAGS = flags.FLAGS +FLAGS = flags.FLAGS flags.DEFINE_integer('quota_instances', 10, 'number of instances allowed per project') flags.DEFINE_integer('quota_cores', 20, @@ -64,7 +63,7 @@ def get_quota(context, project_id): def allowed_instances(context, num_instances, instance_type): - """Check quota and return min(num_instances, allowed_instances)""" + """Check quota and return min(num_instances, allowed_instances).""" project_id = context.project_id context = context.elevated() used_instances, used_cores = db.instance_data_get_for_project(context, @@ -79,7 +78,7 @@ def allowed_instances(context, num_instances, instance_type): def allowed_volumes(context, num_volumes, size): - """Check quota and return min(num_volumes, allowed_volumes)""" + """Check quota and return min(num_volumes, allowed_volumes).""" project_id = context.project_id context = context.elevated() used_volumes, used_gigabytes = db.volume_data_get_for_project(context, @@ -95,7 +94,7 @@ def allowed_volumes(context, num_volumes, size): def allowed_floating_ips(context, num_floating_ips): - """Check quota and return min(num_floating_ips, allowed_floating_ips)""" + """Check quota and return min(num_floating_ips, allowed_floating_ips).""" project_id = context.project_id context = context.elevated() used_floating_ips = db.floating_ip_count_by_project(context, project_id) @@ -105,7 +104,7 @@ def allowed_floating_ips(context, num_floating_ips): def allowed_metadata_items(context, num_metadata_items): - """Check quota; return min(num_metadata_items,allowed_metadata_items)""" + """Check quota; return min(num_metadata_items,allowed_metadata_items).""" project_id = context.project_id context = context.elevated() quota = get_quota(context, project_id) @@ -114,20 +113,20 @@ def allowed_metadata_items(context, num_metadata_items): def allowed_injected_files(context): - """Return the number of injected files allowed""" + """Return the number of injected files allowed.""" return FLAGS.quota_max_injected_files def allowed_injected_file_content_bytes(context): - """Return the number of bytes allowed per injected file content""" + """Return the number of bytes allowed per injected file content.""" return FLAGS.quota_max_injected_file_content_bytes def allowed_injected_file_path_bytes(context): - """Return the number of bytes allowed in an injected file path""" + """Return the number of bytes allowed in an injected file path.""" return FLAGS.quota_max_injected_file_path_bytes class QuotaError(exception.ApiError): - """Quota Exceeeded""" + """Quota Exceeeded.""" pass diff --git a/nova/rpc.py b/nova/rpc.py index b610cdf9b516..2116f22c3b2c 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -16,9 +16,12 @@ # License for the specific language governing permissions and limitations # under the License. -""" -AMQP-based RPC. Queues have consumers and publishers. +"""AMQP-based RPC. + +Queues have consumers and publishers. + No fan-out support yet. + """ import json @@ -40,17 +43,19 @@ from nova import log as logging from nova import utils -FLAGS = flags.FLAGS LOG = logging.getLogger('nova.rpc') + +FLAGS = flags.FLAGS flags.DEFINE_integer('rpc_thread_pool_size', 1024, 'Size of RPC thread pool') class Connection(carrot_connection.BrokerConnection): - """Connection instance object""" + """Connection instance object.""" + @classmethod def instance(cls, new=True): - """Returns the instance""" + """Returns the instance.""" if new or not hasattr(cls, '_instance'): params = dict(hostname=FLAGS.rabbit_host, port=FLAGS.rabbit_port, @@ -71,9 +76,11 @@ class Connection(carrot_connection.BrokerConnection): @classmethod def recreate(cls): - """Recreates the connection instance + """Recreates the connection instance. - This is necessary to recover from some network errors/disconnects""" + This is necessary to recover from some network errors/disconnects. + + """ try: del cls._instance except AttributeError, e: @@ -84,10 +91,12 @@ class Connection(carrot_connection.BrokerConnection): class Consumer(messaging.Consumer): - """Consumer base class + """Consumer base class. + + Contains methods for connecting the fetch method to async loops. - Contains methods for connecting the fetch method to async loops """ + def __init__(self, *args, **kwargs): for i in xrange(FLAGS.rabbit_max_retries): if i > 0: @@ -100,19 +109,18 @@ class Consumer(messaging.Consumer): fl_host = FLAGS.rabbit_host fl_port = FLAGS.rabbit_port fl_intv = FLAGS.rabbit_retry_interval - LOG.error(_("AMQP server on %(fl_host)s:%(fl_port)d is" - " unreachable: %(e)s. Trying again in %(fl_intv)d" - " seconds.") - % locals()) + LOG.error(_('AMQP server on %(fl_host)s:%(fl_port)d is' + ' unreachable: %(e)s. Trying again in %(fl_intv)d' + ' seconds.') % locals()) self.failed_connection = True if self.failed_connection: - LOG.error(_("Unable to connect to AMQP server " - "after %d tries. Shutting down."), + LOG.error(_('Unable to connect to AMQP server ' + 'after %d tries. Shutting down.'), FLAGS.rabbit_max_retries) sys.exit(1) def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): - """Wraps the parent fetch with some logic for failed connections""" + """Wraps the parent fetch with some logic for failed connection.""" # TODO(vish): the logic for failed connections and logging should be # refactored into some sort of connection manager object try: @@ -125,14 +133,14 @@ class Consumer(messaging.Consumer): self.declare() super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks) if self.failed_connection: - LOG.error(_("Reconnected to queue")) + LOG.error(_('Reconnected to queue')) self.failed_connection = False # NOTE(vish): This is catching all errors because we really don't # want exceptions to be logged 10 times a second if some # persistent failure occurs. except Exception, e: # pylint: disable=W0703 if not self.failed_connection: - LOG.exception(_("Failed to fetch message from queue: %s" % e)) + LOG.exception(_('Failed to fetch message from queue: %s' % e)) self.failed_connection = True def attach_to_eventlet(self): @@ -143,8 +151,9 @@ class Consumer(messaging.Consumer): class AdapterConsumer(Consumer): - """Calls methods on a proxy object based on method and args""" - def __init__(self, connection=None, topic="broadcast", proxy=None): + """Calls methods on a proxy object based on method and args.""" + + def __init__(self, connection=None, topic='broadcast', proxy=None): LOG.debug(_('Initing the Adapter Consumer for %s') % topic) self.proxy = proxy self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size) @@ -156,13 +165,14 @@ class AdapterConsumer(Consumer): @exception.wrap_exception def _receive(self, message_data, message): - """Magically looks for a method on the proxy object and calls it + """Magically looks for a method on the proxy object and calls it. Message data should be a dictionary with two keys: method: string representing the method to call args: dictionary of arg: value Example: {'method': 'echo', 'args': {'value': 42}} + """ LOG.debug(_('received %s') % message_data) msg_id = message_data.pop('_msg_id', None) @@ -189,22 +199,23 @@ class AdapterConsumer(Consumer): if msg_id: msg_reply(msg_id, rval, None) except Exception as e: - logging.exception("Exception during message handling") + logging.exception('Exception during message handling') if msg_id: msg_reply(msg_id, None, sys.exc_info()) return class Publisher(messaging.Publisher): - """Publisher base class""" + """Publisher base class.""" pass class TopicAdapterConsumer(AdapterConsumer): - """Consumes messages on a specific topic""" - exchange_type = "topic" + """Consumes messages on a specific topic.""" - def __init__(self, connection=None, topic="broadcast", proxy=None): + exchange_type = 'topic' + + def __init__(self, connection=None, topic='broadcast', proxy=None): self.queue = topic self.routing_key = topic self.exchange = FLAGS.control_exchange @@ -214,27 +225,29 @@ class TopicAdapterConsumer(AdapterConsumer): class FanoutAdapterConsumer(AdapterConsumer): - """Consumes messages from a fanout exchange""" - exchange_type = "fanout" + """Consumes messages from a fanout exchange.""" - def __init__(self, connection=None, topic="broadcast", proxy=None): - self.exchange = "%s_fanout" % topic + exchange_type = 'fanout' + + def __init__(self, connection=None, topic='broadcast', proxy=None): + self.exchange = '%s_fanout' % topic self.routing_key = topic unique = uuid.uuid4().hex - self.queue = "%s_fanout_%s" % (topic, unique) + self.queue = '%s_fanout_%s' % (topic, unique) self.durable = False - LOG.info(_("Created '%(exchange)s' fanout exchange " - "with '%(key)s' routing key"), - dict(exchange=self.exchange, key=self.routing_key)) + LOG.info(_('Created "%(exchange)s" fanout exchange ' + 'with "%(key)s" routing key'), + dict(exchange=self.exchange, key=self.routing_key)) super(FanoutAdapterConsumer, self).__init__(connection=connection, topic=topic, proxy=proxy) class TopicPublisher(Publisher): - """Publishes messages on a specific topic""" - exchange_type = "topic" + """Publishes messages on a specific topic.""" - def __init__(self, connection=None, topic="broadcast"): + exchange_type = 'topic' + + def __init__(self, connection=None, topic='broadcast'): self.routing_key = topic self.exchange = FLAGS.control_exchange self.durable = False @@ -243,20 +256,22 @@ class TopicPublisher(Publisher): class FanoutPublisher(Publisher): """Publishes messages to a fanout exchange.""" - exchange_type = "fanout" + + exchange_type = 'fanout' def __init__(self, topic, connection=None): - self.exchange = "%s_fanout" % topic - self.queue = "%s_fanout" % topic + self.exchange = '%s_fanout' % topic + self.queue = '%s_fanout' % topic self.durable = False - LOG.info(_("Creating '%(exchange)s' fanout exchange"), - dict(exchange=self.exchange)) + LOG.info(_('Creating "%(exchange)s" fanout exchange'), + dict(exchange=self.exchange)) super(FanoutPublisher, self).__init__(connection=connection) class DirectConsumer(Consumer): - """Consumes messages directly on a channel specified by msg_id""" - exchange_type = "direct" + """Consumes messages directly on a channel specified by msg_id.""" + + exchange_type = 'direct' def __init__(self, connection=None, msg_id=None): self.queue = msg_id @@ -268,8 +283,9 @@ class DirectConsumer(Consumer): class DirectPublisher(Publisher): - """Publishes messages directly on a channel specified by msg_id""" - exchange_type = "direct" + """Publishes messages directly on a channel specified by msg_id.""" + + exchange_type = 'direct' def __init__(self, connection=None, msg_id=None): self.routing_key = msg_id @@ -279,9 +295,9 @@ class DirectPublisher(Publisher): def msg_reply(msg_id, reply=None, failure=None): - """Sends a reply or an error on the channel signified by msg_id + """Sends a reply or an error on the channel signified by msg_id. - failure should be a sys.exc_info() tuple. + Failure should be a sys.exc_info() tuple. """ if failure: @@ -303,17 +319,20 @@ def msg_reply(msg_id, reply=None, failure=None): class RemoteError(exception.Error): - """Signifies that a remote class has raised an exception + """Signifies that a remote class has raised an exception. Containes a string representation of the type of the original exception, the value of the original exception, and the traceback. These are sent to the parent as a joined string so printing the exception - contains all of the relevent info.""" + contains all of the relevent info. + + """ + def __init__(self, exc_type, value, traceback): self.exc_type = exc_type self.value = value self.traceback = traceback - super(RemoteError, self).__init__("%s %s\n%s" % (exc_type, + super(RemoteError, self).__init__('%s %s\n%s' % (exc_type, value, traceback)) @@ -339,6 +358,7 @@ def _pack_context(msg, context): context out into a bunch of separate keys. If we want to support more arguments in rabbit messages, we may want to do the same for args at some point. + """ context = dict([('_context_%s' % key, value) for (key, value) in context.to_dict().iteritems()]) @@ -346,11 +366,11 @@ def _pack_context(msg, context): def call(context, topic, msg): - """Sends a message on a topic and wait for a response""" - LOG.debug(_("Making asynchronous call on %s ..."), topic) + """Sends a message on a topic and wait for a response.""" + LOG.debug(_('Making asynchronous call on %s ...'), topic) msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) - LOG.debug(_("MSG_ID is %s") % (msg_id)) + LOG.debug(_('MSG_ID is %s') % (msg_id)) _pack_context(msg, context) class WaitMessage(object): @@ -387,8 +407,8 @@ def call(context, topic, msg): def cast(context, topic, msg): - """Sends a message on a topic without waiting for a response""" - LOG.debug(_("Making asynchronous cast on %s..."), topic) + """Sends a message on a topic without waiting for a response.""" + LOG.debug(_('Making asynchronous cast on %s...'), topic) _pack_context(msg, context) conn = Connection.instance() publisher = TopicPublisher(connection=conn, topic=topic) @@ -397,8 +417,8 @@ def cast(context, topic, msg): def fanout_cast(context, topic, msg): - """Sends a message on a fanout exchange without waiting for a response""" - LOG.debug(_("Making asynchronous fanout cast...")) + """Sends a message on a fanout exchange without waiting for a response.""" + LOG.debug(_('Making asynchronous fanout cast...')) _pack_context(msg, context) conn = Connection.instance() publisher = FanoutPublisher(topic, connection=conn) @@ -407,14 +427,14 @@ def fanout_cast(context, topic, msg): def generic_response(message_data, message): - """Logs a result and exits""" + """Logs a result and exits.""" LOG.debug(_('response %s'), message_data) message.ack() sys.exit(0) def send_message(topic, message, wait=True): - """Sends a message for testing""" + """Sends a message for testing.""" msg_id = uuid.uuid4().hex message.update({'_msg_id': msg_id}) LOG.debug(_('topic is %s'), topic) @@ -425,14 +445,14 @@ def send_message(topic, message, wait=True): queue=msg_id, exchange=msg_id, auto_delete=True, - exchange_type="direct", + exchange_type='direct', routing_key=msg_id) consumer.register_callback(generic_response) publisher = messaging.Publisher(connection=Connection.instance(), exchange=FLAGS.control_exchange, durable=False, - exchange_type="topic", + exchange_type='topic', routing_key=topic) publisher.send(message) publisher.close() @@ -441,8 +461,8 @@ def send_message(topic, message, wait=True): consumer.wait() -if __name__ == "__main__": - # NOTE(vish): you can send messages from the command line using - # topic and a json sting representing a dictionary - # for the method +if __name__ == '__main__': + # You can send messages from the command line using + # topic and a json string representing a dictionary + # for the method send_message(sys.argv[1], json.loads(sys.argv[2])) diff --git a/nova/service.py b/nova/service.py index 47c0b96c0a75..2532b9df25a3 100644 --- a/nova/service.py +++ b/nova/service.py @@ -17,9 +17,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Generic Node baseclass for all workers that run on hosts -""" +"""Generic Node baseclass for all workers that run on hosts.""" import inspect import os @@ -30,13 +28,11 @@ from eventlet import event from eventlet import greenthread from eventlet import greenpool -from sqlalchemy.exc import OperationalError - from nova import context from nova import db from nova import exception -from nova import log as logging from nova import flags +from nova import log as logging from nova import rpc from nova import utils from nova import version @@ -79,7 +75,7 @@ class Service(object): def start(self): vcs_string = version.version_string_with_vcs() - logging.audit(_("Starting %(topic)s node (version %(vcs_string)s)"), + logging.audit(_('Starting %(topic)s node (version %(vcs_string)s)'), {'topic': self.topic, 'vcs_string': vcs_string}) self.manager.init_host() self.model_disconnected = False @@ -140,29 +136,24 @@ class Service(object): return getattr(manager, key) @classmethod - def create(cls, - host=None, - binary=None, - topic=None, - manager=None, - report_interval=None, - periodic_interval=None): + def create(cls, host=None, binary=None, topic=None, manager=None, + report_interval=None, periodic_interval=None): """Instantiates class and passes back application object. - Args: - host, defaults to FLAGS.host - binary, defaults to basename of executable - topic, defaults to bin_name - "nova-" part - manager, defaults to FLAGS._manager - report_interval, defaults to FLAGS.report_interval - periodic_interval, defaults to FLAGS.periodic_interval + :param host: defaults to FLAGS.host + :param binary: defaults to basename of executable + :param topic: defaults to bin_name - 'nova-' part + :param manager: defaults to FLAGS._manager + :param report_interval: defaults to FLAGS.report_interval + :param periodic_interval: defaults to FLAGS.periodic_interval + """ if not host: host = FLAGS.host if not binary: binary = os.path.basename(inspect.stack()[-1][1]) if not topic: - topic = binary.rpartition("nova-")[2] + topic = binary.rpartition('nova-')[2] if not manager: manager = FLAGS.get('%s_manager' % topic, None) if not report_interval: @@ -175,12 +166,12 @@ class Service(object): return service_obj def kill(self): - """Destroy the service object in the datastore""" + """Destroy the service object in the datastore.""" self.stop() try: db.service_destroy(context.get_admin_context(), self.service_id) except exception.NotFound: - logging.warn(_("Service killed that has no database entry")) + logging.warn(_('Service killed that has no database entry')) def stop(self): for x in self.timers: @@ -198,7 +189,7 @@ class Service(object): pass def periodic_tasks(self): - """Tasks to be run at a periodic interval""" + """Tasks to be run at a periodic interval.""" self.manager.periodic_tasks(context.get_admin_context()) def report_state(self): @@ -208,8 +199,8 @@ class Service(object): try: service_ref = db.service_get(ctxt, self.service_id) except exception.NotFound: - logging.debug(_("The service database object disappeared, " - "Recreating it.")) + logging.debug(_('The service database object disappeared, ' + 'Recreating it.')) self._create_service_ref(ctxt) service_ref = db.service_get(ctxt, self.service_id) @@ -218,23 +209,24 @@ class Service(object): {'report_count': service_ref['report_count'] + 1}) # TODO(termie): make this pattern be more elegant. - if getattr(self, "model_disconnected", False): + if getattr(self, 'model_disconnected', False): self.model_disconnected = False - logging.error(_("Recovered model server connection!")) + logging.error(_('Recovered model server connection!')) # TODO(vish): this should probably only catch connection errors except Exception: # pylint: disable=W0702 - if not getattr(self, "model_disconnected", False): + if not getattr(self, 'model_disconnected', False): self.model_disconnected = True - logging.exception(_("model server went away")) + logging.exception(_('model server went away')) class WsgiService(object): """Base class for WSGI based services. For each api you define, you must also define these flags: - :_listen: The address on which to listen - :_listen_port: The port on which to listen + :_listen: The address on which to listen + :_listen_port: The port on which to listen + """ def __init__(self, conf, apis): @@ -250,13 +242,14 @@ class WsgiService(object): class ApiService(WsgiService): - """Class for our nova-api service""" + """Class for our nova-api service.""" + @classmethod def create(cls, conf=None): if not conf: conf = wsgi.paste_config_file(FLAGS.api_paste_config) if not conf: - message = (_("No paste configuration found for: %s"), + message = (_('No paste configuration found for: %s'), FLAGS.api_paste_config) raise exception.Error(message) api_endpoints = ['ec2', 'osapi'] @@ -280,11 +273,11 @@ def serve(*services): FLAGS.ParseNewFlags() name = '_'.join(x.binary for x in services) - logging.debug(_("Serving %s"), name) - logging.debug(_("Full set of FLAGS:")) + logging.debug(_('Serving %s'), name) + logging.debug(_('Full set of FLAGS:')) for flag in FLAGS: flag_get = FLAGS.get(flag, None) - logging.debug("%(flag)s : %(flag_get)s" % locals()) + logging.debug('%(flag)s : %(flag_get)s' % locals()) for x in services: x.start() @@ -315,20 +308,20 @@ def serve_wsgi(cls, conf=None): def _run_wsgi(paste_config_file, apis): - logging.debug(_("Using paste.deploy config at: %s"), paste_config_file) + logging.debug(_('Using paste.deploy config at: %s'), paste_config_file) apps = [] for api in apis: config = wsgi.load_paste_configuration(paste_config_file, api) if config is None: - logging.debug(_("No paste configuration for app: %s"), api) + logging.debug(_('No paste configuration for app: %s'), api) continue - logging.debug(_("App Config: %(api)s\n%(config)r") % locals()) - logging.info(_("Running %s API"), api) + logging.debug(_('App Config: %(api)s\n%(config)r') % locals()) + logging.info(_('Running %s API'), api) app = wsgi.load_paste_app(paste_config_file, api) - apps.append((app, getattr(FLAGS, "%s_listen_port" % api), - getattr(FLAGS, "%s_listen" % api))) + apps.append((app, getattr(FLAGS, '%s_listen_port' % api), + getattr(FLAGS, '%s_listen' % api))) if len(apps) == 0: - logging.error(_("No known API applications configured in %s."), + logging.error(_('No known API applications configured in %s.'), paste_config_file) return diff --git a/nova/test.py b/nova/test.py index 3b608520a499..4deb2a17570d 100644 --- a/nova/test.py +++ b/nova/test.py @@ -16,12 +16,12 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Base classes for our unit tests. -Allows overriding of flags for use of fakes, -and some black magic for inline callbacks. -""" +"""Base classes for our unit tests. +Allows overriding of flags for use of fakes, and some black magic for +inline callbacks. + +""" import datetime import functools @@ -52,9 +52,9 @@ flags.DEFINE_bool('fake_tests', True, def skip_if_fake(func): - """Decorator that skips a test if running in fake mode""" + """Decorator that skips a test if running in fake mode.""" def _skipper(*args, **kw): - """Wrapped skipper function""" + """Wrapped skipper function.""" if FLAGS.fake_tests: raise unittest.SkipTest('Test cannot be run in fake mode') else: @@ -63,9 +63,10 @@ def skip_if_fake(func): class TestCase(unittest.TestCase): - """Test case base class for all unit tests""" + """Test case base class for all unit tests.""" + def setUp(self): - """Run before each test method to initialize test environment""" + """Run before each test method to initialize test environment.""" super(TestCase, self).setUp() # NOTE(vish): We need a better method for creating fixtures for tests # now that we have some required db setup for the system @@ -86,8 +87,7 @@ class TestCase(unittest.TestCase): self._original_flags = FLAGS.FlagValuesDict() def tearDown(self): - """Runs after each test method to finalize/tear down test - environment.""" + """Runs after each test method to tear down test environment.""" try: self.mox.UnsetStubs() self.stubs.UnsetAll() @@ -121,7 +121,7 @@ class TestCase(unittest.TestCase): pass def flags(self, **kw): - """Override flag variables for a test""" + """Override flag variables for a test.""" for k, v in kw.iteritems(): if k in self.flag_overrides: self.reset_flags() @@ -131,7 +131,11 @@ class TestCase(unittest.TestCase): setattr(FLAGS, k, v) def reset_flags(self): - """Resets all flag variables for the test. Runs after each test""" + """Resets all flag variables for the test. + + Runs after each test. + + """ FLAGS.Reset() for k, v in self._original_flags.iteritems(): setattr(FLAGS, k, v) @@ -158,7 +162,6 @@ class TestCase(unittest.TestCase): def _monkey_patch_wsgi(self): """Allow us to kill servers spawned by wsgi.Server.""" - # TODO(termie): change these patterns to use functools self.original_start = wsgi.Server.start @functools.wraps(self.original_start) @@ -189,12 +192,13 @@ class TestCase(unittest.TestCase): If you don't care (or don't know) a given value, you can specify the string DONTCARE as the value. This will cause that dict-item to be skipped. + """ def raise_assertion(msg): d1str = str(d1) d2str = str(d2) - base_msg = ("Dictionaries do not match. %(msg)s d1: %(d1str)s " - "d2: %(d2str)s" % locals()) + base_msg = ('Dictionaries do not match. %(msg)s d1: %(d1str)s ' + 'd2: %(d2str)s' % locals()) raise AssertionError(base_msg) d1keys = set(d1.keys()) @@ -202,8 +206,8 @@ class TestCase(unittest.TestCase): if d1keys != d2keys: d1only = d1keys - d2keys d2only = d2keys - d1keys - raise_assertion("Keys in d1 and not d2: %(d1only)s. " - "Keys in d2 and not d1: %(d2only)s" % locals()) + raise_assertion('Keys in d1 and not d2: %(d1only)s. ' + 'Keys in d2 and not d1: %(d2only)s' % locals()) for key in d1keys: d1value = d1[key] @@ -217,19 +221,19 @@ class TestCase(unittest.TestCase): "d2['%(key)s']=%(d2value)s" % locals()) def assertDictListMatch(self, L1, L2): - """Assert a list of dicts are equivalent""" + """Assert a list of dicts are equivalent.""" def raise_assertion(msg): L1str = str(L1) L2str = str(L2) - base_msg = ("List of dictionaries do not match: %(msg)s " - "L1: %(L1str)s L2: %(L2str)s" % locals()) + base_msg = ('List of dictionaries do not match: %(msg)s ' + 'L1: %(L1str)s L2: %(L2str)s' % locals()) raise AssertionError(base_msg) L1count = len(L1) L2count = len(L2) if L1count != L2count: - raise_assertion("Length mismatch: len(L1)=%(L1count)d != " - "len(L2)=%(L2count)d" % locals()) + raise_assertion('Length mismatch: len(L1)=%(L1count)d != ' + 'len(L2)=%(L2count)d' % locals()) for d1, d2 in zip(L1, L2): self.assertDictMatch(d1, d2) diff --git a/nova/utils.py b/nova/utils.py index 76cba1a08cf8..59f694196c13 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -17,9 +17,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -System-level utilities and helper functions. -""" +"""Utilities and helper functions.""" import base64 import datetime @@ -43,9 +41,8 @@ from eventlet import event from eventlet import greenthread from eventlet import semaphore from eventlet.green import subprocess -None + from nova import exception -from nova.exception import ProcessExecutionError from nova import flags from nova import log as logging @@ -56,7 +53,7 @@ FLAGS = flags.FLAGS def import_class(import_str): - """Returns a class from a string including module and class""" + """Returns a class from a string including module and class.""" mod_str, _sep, class_str = import_str.rpartition('.') try: __import__(mod_str) @@ -67,7 +64,7 @@ def import_class(import_str): def import_object(import_str): - """Returns an object including a module or module and class""" + """Returns an object including a module or module and class.""" try: __import__(import_str) return sys.modules[import_str] @@ -99,11 +96,12 @@ def vpn_ping(address, port, timeout=0.05, session_id=None): cli_id = 64 bit identifier ? = unknown, probably flags/padding bit 9 was 1 and the rest were 0 in testing + """ if session_id is None: session_id = random.randint(0, 0xffffffffffffffff) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - data = struct.pack("!BQxxxxxx", 0x38, session_id) + data = struct.pack('!BQxxxxxx', 0x38, session_id) sock.sendto(data, (address, port)) sock.settimeout(timeout) try: @@ -112,7 +110,7 @@ def vpn_ping(address, port, timeout=0.05, session_id=None): return False finally: sock.close() - fmt = "!BQxxxxxQxxxx" + fmt = '!BQxxxxxQxxxx' if len(received) != struct.calcsize(fmt): print struct.calcsize(fmt) return False @@ -122,15 +120,8 @@ def vpn_ping(address, port, timeout=0.05, session_id=None): def fetchfile(url, target): - LOG.debug(_("Fetching %s") % url) -# c = pycurl.Curl() -# fp = open(target, "wb") -# c.setopt(c.URL, url) -# c.setopt(c.WRITEDATA, fp) -# c.perform() -# c.close() -# fp.close() - execute("curl", "--fail", url, "-o", target) + LOG.debug(_('Fetching %s') % url) + execute('curl', '--fail', url, '-o', target) def execute(*cmd, **kwargs): @@ -147,7 +138,7 @@ def execute(*cmd, **kwargs): while attempts > 0: attempts -= 1 try: - LOG.debug(_("Running cmd (subprocess): %s"), ' '.join(cmd)) + LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd)) env = os.environ.copy() if addl_env: env.update(addl_env) @@ -163,20 +154,21 @@ def execute(*cmd, **kwargs): result = obj.communicate() obj.stdin.close() if obj.returncode: - LOG.debug(_("Result was %s") % obj.returncode) + LOG.debug(_('Result was %s') % obj.returncode) if type(check_exit_code) == types.IntType \ and obj.returncode != check_exit_code: (stdout, stderr) = result - raise ProcessExecutionError(exit_code=obj.returncode, - stdout=stdout, - stderr=stderr, - cmd=' '.join(cmd)) + raise exception.ProcessExecutionError( + exit_code=obj.returncode, + stdout=stdout, + stderr=stderr, + cmd=' '.join(cmd)) return result - except ProcessExecutionError: + except exception.ProcessExecutionError: if not attempts: raise else: - LOG.debug(_("%r failed. Retrying."), cmd) + LOG.debug(_('%r failed. Retrying.'), cmd) if delay_on_retry: greenthread.sleep(random.randint(20, 200) / 100.0) finally: @@ -188,13 +180,13 @@ def execute(*cmd, **kwargs): def ssh_execute(ssh, cmd, process_input=None, addl_env=None, check_exit_code=True): - LOG.debug(_("Running cmd (SSH): %s"), ' '.join(cmd)) + LOG.debug(_('Running cmd (SSH): %s'), ' '.join(cmd)) if addl_env: - raise exception.Error("Environment not supported over SSH") + raise exception.Error('Environment not supported over SSH') if process_input: # This is (probably) fixable if we need it... - raise exception.Error("process_input not supported over SSH") + raise exception.Error('process_input not supported over SSH') stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd) channel = stdout_stream.channel @@ -212,7 +204,7 @@ def ssh_execute(ssh, cmd, process_input=None, # exit_status == -1 if no exit code was returned if exit_status != -1: - LOG.debug(_("Result was %s") % exit_status) + LOG.debug(_('Result was %s') % exit_status) if check_exit_code and exit_status != 0: raise exception.ProcessExecutionError(exit_code=exit_status, stdout=stdout, @@ -251,7 +243,7 @@ def debug(arg): def runthis(prompt, *cmd, **kwargs): - LOG.debug(_("Running %s"), (" ".join(cmd))) + LOG.debug(_('Running %s'), (' '.join(cmd))) rv, err = execute(*cmd, **kwargs) @@ -266,48 +258,49 @@ def generate_mac(): random.randint(0x00, 0x7f), random.randint(0x00, 0xff), random.randint(0x00, 0xff)] - return ':'.join(map(lambda x: "%02x" % x, mac)) + return ':'.join(map(lambda x: '%02x' % x, mac)) # Default symbols to use for passwords. Avoids visually confusing characters. # ~6 bits per symbol -DEFAULT_PASSWORD_SYMBOLS = ("23456789" # Removed: 0,1 - "ABCDEFGHJKLMNPQRSTUVWXYZ" # Removed: I, O - "abcdefghijkmnopqrstuvwxyz") # Removed: l +DEFAULT_PASSWORD_SYMBOLS = ('23456789' # Removed: 0,1 + 'ABCDEFGHJKLMNPQRSTUVWXYZ' # Removed: I, O + 'abcdefghijkmnopqrstuvwxyz') # Removed: l # ~5 bits per symbol -EASIER_PASSWORD_SYMBOLS = ("23456789" # Removed: 0, 1 - "ABCDEFGHJKLMNPQRSTUVWXYZ") # Removed: I, O +EASIER_PASSWORD_SYMBOLS = ('23456789' # Removed: 0, 1 + 'ABCDEFGHJKLMNPQRSTUVWXYZ') # Removed: I, O def generate_password(length=20, symbols=DEFAULT_PASSWORD_SYMBOLS): """Generate a random password from the supplied symbols. Believed to be reasonably secure (with a reasonable password length!) + """ r = random.SystemRandom() - return "".join([r.choice(symbols) for _i in xrange(length)]) + return ''.join([r.choice(symbols) for _i in xrange(length)]) def last_octet(address): - return int(address.split(".")[-1]) + return int(address.split('.')[-1]) def get_my_linklocal(interface): try: - if_str = execute("ip", "-f", "inet6", "-o", "addr", "show", interface) - condition = "\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link" + if_str = execute('ip', '-f', 'inet6', '-o', 'addr', 'show', interface) + condition = '\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link' links = [re.search(condition, x) for x in if_str[0].split('\n')] address = [w.group(1) for w in links if w is not None] if address[0] is not None: return address[0] else: - raise exception.Error(_("Link Local address is not found.:%s") + raise exception.Error(_('Link Local address is not found.:%s') % if_str) except Exception as ex: raise exception.Error(_("Couldn't get Link Local IP of %(interface)s" - " :%(ex)s") % locals()) + " :%(ex)s") % locals()) def to_global_ipv6(prefix, mac): @@ -319,15 +312,15 @@ def to_global_ipv6(prefix, mac): return (mac64_addr ^ netaddr.IPAddress('::0200:0:0:0') | maskIP).\ format() except TypeError: - raise TypeError(_("Bad mac for to_global_ipv6: %s") % mac) + raise TypeError(_('Bad mac for to_global_ipv6: %s') % mac) def to_mac(ipv6_address): address = netaddr.IPAddress(ipv6_address) - mask1 = netaddr.IPAddress("::ffff:ffff:ffff:ffff") - mask2 = netaddr.IPAddress("::0200:0:0:0") + mask1 = netaddr.IPAddress('::ffff:ffff:ffff:ffff') + mask2 = netaddr.IPAddress('::0200:0:0:0') mac64 = netaddr.EUI(int(address & mask1 ^ mask2)).words - return ":".join(["%02x" % i for i in mac64[0:3] + mac64[5:8]]) + return ':'.join(['%02x' % i for i in mac64[0:3] + mac64[5:8]]) def utcnow(): @@ -341,7 +334,7 @@ utcnow.override_time = None def is_older_than(before, seconds): - """Return True if before is older than seconds""" + """Return True if before is older than seconds.""" return utcnow() - before > datetime.timedelta(seconds=seconds) @@ -379,7 +372,7 @@ def isotime(at=None): def parse_isotime(timestr): - """Turn an iso formatted time back into a datetime""" + """Turn an iso formatted time back into a datetime.""" return datetime.datetime.strptime(timestr, TIME_FORMAT) @@ -433,16 +426,19 @@ class LazyPluggable(object): class LoopingCallDone(Exception): - """The poll-function passed to LoopingCall can raise this exception to + """Exception to break out and stop a LoopingCall. + + The poll-function passed to LoopingCall can raise this exception to break out of the loop normally. This is somewhat analogous to StopIteration. An optional return-value can be included as the argument to the exception; this return-value will be returned by LoopingCall.wait() + """ def __init__(self, retvalue=True): - """:param retvalue: Value that LoopingCall.wait() should return""" + """:param retvalue: Value that LoopingCall.wait() should return.""" self.retvalue = retvalue @@ -493,7 +489,7 @@ def xhtml_escape(value): http://github.com/facebook/tornado/blob/master/tornado/escape.py """ - return saxutils.escape(value, {'"': """}) + return saxutils.escape(value, {'"': '"'}) def utf8(value): @@ -504,7 +500,7 @@ def utf8(value): """ if isinstance(value, unicode): - return value.encode("utf-8") + return value.encode('utf-8') assert isinstance(value, str) return value @@ -554,7 +550,7 @@ class _NoopContextManager(object): def synchronized(name, external=False): - """Synchronization decorator + """Synchronization decorator. Decorating a method like so: @synchronized('mylock') @@ -578,6 +574,7 @@ def synchronized(name, external=False): multiple processes. This means that if two different workers both run a a method decorated with @synchronized('mylock', external=True), only one of them will execute at a time. + """ def wrap(f): @@ -590,13 +587,13 @@ def synchronized(name, external=False): _semaphores[name] = semaphore.Semaphore() sem = _semaphores[name] LOG.debug(_('Attempting to grab semaphore "%(lock)s" for method ' - '"%(method)s"...' % {"lock": name, - "method": f.__name__})) + '"%(method)s"...' % {'lock': name, + 'method': f.__name__})) with sem: if external: LOG.debug(_('Attempting to grab file lock "%(lock)s" for ' 'method "%(method)s"...' % - {"lock": name, "method": f.__name__})) + {'lock': name, 'method': f.__name__})) lock_file_path = os.path.join(FLAGS.lock_path, 'nova-%s.lock' % name) lock = lockfile.FileLock(lock_file_path) @@ -617,21 +614,23 @@ def synchronized(name, external=False): def get_from_path(items, path): - """ Returns a list of items matching the specified path. Takes an - XPath-like expression e.g. prop1/prop2/prop3, and for each item in items, - looks up items[prop1][prop2][prop3]. Like XPath, if any of the + """Returns a list of items matching the specified path. + + Takes an XPath-like expression e.g. prop1/prop2/prop3, and for each item + in items, looks up items[prop1][prop2][prop3]. Like XPath, if any of the intermediate results are lists it will treat each list item individually. A 'None' in items or any child expressions will be ignored, this function will not throw because of None (anywhere) in items. The returned list - will contain no None values.""" + will contain no None values. + """ if path is None: - raise exception.Error("Invalid mini_xpath") + raise exception.Error('Invalid mini_xpath') - (first_token, sep, remainder) = path.partition("/") + (first_token, sep, remainder) = path.partition('/') - if first_token == "": - raise exception.Error("Invalid mini_xpath") + if first_token == '': + raise exception.Error('Invalid mini_xpath') results = [] @@ -645,7 +644,7 @@ def get_from_path(items, path): for item in items: if item is None: continue - get_method = getattr(item, "get", None) + get_method = getattr(item, 'get', None) if get_method is None: continue child = get_method(first_token) @@ -666,7 +665,7 @@ def get_from_path(items, path): def flatten_dict(dict_, flattened=None): - """Recursively flatten a nested dictionary""" + """Recursively flatten a nested dictionary.""" flattened = flattened or {} for key, value in dict_.iteritems(): if hasattr(value, 'iteritems'): @@ -677,9 +676,7 @@ def flatten_dict(dict_, flattened=None): def partition_dict(dict_, keys): - """Return two dicts, one containing only `keys` the other containing - everything but `keys` - """ + """Return two dicts, one with `keys` the other with everything else.""" intersection = {} difference = {} for key, value in dict_.iteritems(): @@ -691,9 +688,7 @@ def partition_dict(dict_, keys): def map_dict_keys(dict_, key_map): - """Return a dictionary in which the dictionaries keys are mapped to - new keys. - """ + """Return a dict in which the dictionaries keys are mapped to new keys.""" mapped = {} for key, value in dict_.iteritems(): mapped_key = key_map[key] if key in key_map else key @@ -702,15 +697,15 @@ def map_dict_keys(dict_, key_map): def subset_dict(dict_, keys): - """Return a dict that only contains a subset of keys""" + """Return a dict that only contains a subset of keys.""" subset = partition_dict(dict_, keys)[0] return subset def check_isinstance(obj, cls): - """Checks that obj is of type cls, and lets PyLint infer types""" + """Checks that obj is of type cls, and lets PyLint infer types.""" if isinstance(obj, cls): return obj - raise Exception(_("Expected object of type: %s") % (str(cls))) + raise Exception(_('Expected object of type: %s') % (str(cls))) # TODO(justinsb): Can we make this better?? return cls() # Ugly PyLint hack diff --git a/nova/version.py b/nova/version.py index c43d12cf86cb..1f8d08e8c244 100644 --- a/nova/version.py +++ b/nova/version.py @@ -21,9 +21,9 @@ except ImportError: 'revision_id': 'LOCALREVISION', 'revno': 0} + NOVA_VERSION = ['2011', '3'] YEAR, COUNT = NOVA_VERSION - FINAL = False # This becomes true at Release Candidate time @@ -39,8 +39,8 @@ def version_string(): def vcs_version_string(): - return "%s:%s" % (version_info['branch_nick'], version_info['revision_id']) + return '%s:%s' % (version_info['branch_nick'], version_info['revision_id']) def version_string_with_vcs(): - return "%s-%s" % (canonical_version_string(), vcs_version_string()) + return '%s-%s' % (canonical_version_string(), vcs_version_string()) diff --git a/nova/wsgi.py b/nova/wsgi.py index de2e0749fbdf..119fcbe4c3b2 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -17,9 +17,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Utility methods for working with WSGI servers -""" +"""Utility methods for working with WSGI servers.""" import os import sys @@ -33,7 +31,6 @@ import routes.middleware import webob import webob.dec import webob.exc - from paste import deploy from nova import exception @@ -66,7 +63,7 @@ class Server(object): def start(self, application, port, host='0.0.0.0', backlog=128): """Run a WSGI server with the given application.""" arg0 = sys.argv[0] - logging.audit(_("Starting %(arg0)s on %(host)s:%(port)s") % locals()) + logging.audit(_('Starting %(arg0)s on %(host)s:%(port)s') % locals()) socket = eventlet.listen((host, port), backlog=backlog) self.pool.spawn_n(self._run, application, socket) @@ -87,30 +84,31 @@ class Server(object): class Request(webob.Request): def best_match_content_type(self): - """ - Determine the most acceptable content-type based on the - query extension then the Accept header + """Determine the most acceptable content-type. + + Based on the query extension then the Accept header. + """ - parts = self.path.rsplit(".", 1) + parts = self.path.rsplit('.', 1) if len(parts) > 1: format = parts[1] - if format in ["json", "xml"]: - return "application/{0}".format(parts[1]) + if format in ['json', 'xml']: + return 'application/{0}'.format(parts[1]) - ctypes = ["application/json", "application/xml"] + ctypes = ['application/json', 'application/xml'] bm = self.accept.best_match(ctypes) - return bm or "application/json" + return bm or 'application/json' def get_content_type(self): try: - ct = self.headers["Content-Type"] - assert ct in ("application/xml", "application/json") + ct = self.headers['Content-Type'] + assert ct in ('application/xml', 'application/json') return ct except Exception: - raise webob.exc.HTTPBadRequest("Invalid content type") + raise webob.exc.HTTPBadRequest('Invalid content type') class Application(object): @@ -118,7 +116,7 @@ class Application(object): @classmethod def factory(cls, global_config, **local_config): - """Used for paste app factories in paste.deploy config fles. + """Used for paste app factories in paste.deploy config files. Any local configuration (that is, values under the [app:APPNAME] section of the paste config) will be passed into the `__init__` method @@ -173,8 +171,9 @@ class Application(object): See the end of http://pythonpaste.org/webob/modules/dec.html for more info. + """ - raise NotImplementedError(_("You must implement __call__")) + raise NotImplementedError(_('You must implement __call__')) class Middleware(Application): @@ -184,11 +183,12 @@ class Middleware(Application): initialized that will be called next. By default the middleware will simply call its wrapped app, or you can override __call__ to customize its behavior. + """ @classmethod def factory(cls, global_config, **local_config): - """Used for paste app factories in paste.deploy config fles. + """Used for paste app factories in paste.deploy config files. Any local configuration (that is, values under the [filter:APPNAME] section of the paste config) will be passed into the `__init__` method @@ -240,20 +240,24 @@ class Middleware(Application): class Debug(Middleware): - """Helper class that can be inserted into any WSGI application chain - to get information about the request and response.""" + """Helper class for debugging a WSGI application. + + Can be inserted into any WSGI application chain to get information + about the request and response. + + """ @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): - print ("*" * 40) + " REQUEST ENVIRON" + print ('*' * 40) + ' REQUEST ENVIRON' for key, value in req.environ.items(): - print key, "=", value + print key, '=', value print resp = req.get_response(self.application) - print ("*" * 40) + " RESPONSE HEADERS" + print ('*' * 40) + ' RESPONSE HEADERS' for (key, value) in resp.headers.iteritems(): - print key, "=", value + print key, '=', value print resp.app_iter = self.print_generator(resp.app_iter) @@ -262,11 +266,8 @@ class Debug(Middleware): @staticmethod def print_generator(app_iter): - """ - Iterator that prints the contents of a wrapper string iterator - when iterated. - """ - print ("*" * 40) + " BODY" + """Iterator that prints the contents of a wrapper string.""" + print ('*' * 40) + ' BODY' for part in app_iter: sys.stdout.write(part) sys.stdout.flush() @@ -275,13 +276,10 @@ class Debug(Middleware): class Router(object): - """ - WSGI middleware that maps incoming requests to WSGI apps. - """ + """WSGI middleware that maps incoming requests to WSGI apps.""" def __init__(self, mapper): - """ - Create a router for the given routes.Mapper. + """Create a router for the given routes.Mapper. Each route in `mapper` must specify a 'controller', which is a WSGI app to call. You'll probably want to specify an 'action' as @@ -293,15 +291,16 @@ class Router(object): sc = ServerController() # Explicit mapping of one route to a controller+action - mapper.connect(None, "/svrlist", controller=sc, action="list") + mapper.connect(None, '/svrlist', controller=sc, action='list') # Actions are all implicitly defined - mapper.resource("server", "servers", controller=sc) + mapper.resource('server', 'servers', controller=sc) # Pointing to an arbitrary WSGI app. You can specify the # {path_info:.*} parameter so the target app can be handed just that # section of the URL. - mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp()) + mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp()) + """ self.map = mapper self._router = routes.middleware.RoutesMiddleware(self._dispatch, @@ -309,19 +308,22 @@ class Router(object): @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): - """ - Route the incoming request to a controller based on self.map. + """Route the incoming request to a controller based on self.map. + If no match, return a 404. + """ return self._router @staticmethod @webob.dec.wsgify(RequestClass=Request) def _dispatch(req): - """ + """Dispatch the request to the appropriate controller. + Called by self._router after matching the incoming request to a route and putting the information into req.environ. Either returns 404 or the routed WSGI app's response. + """ match = req.environ['wsgiorg.routing_args'][1] if not match: @@ -331,19 +333,19 @@ class Router(object): class Controller(object): - """ + """WSGI app that dispatched to methods. + WSGI app that reads routing information supplied by RoutesMiddleware and calls the requested action method upon itself. All action methods must, in addition to their normal parameters, accept a 'req' argument which is the incoming wsgi.Request. They raise a webob.exc exception, or return a dict which will be serialized by requested content type. + """ @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): - """ - Call the method specified in req.environ by RoutesMiddleware. - """ + """Call the method specified in req.environ by RoutesMiddleware.""" arg_dict = req.environ['wsgiorg.routing_args'][1] action = arg_dict['action'] method = getattr(self, action) @@ -361,7 +363,7 @@ class Controller(object): body = self._serialize(result, content_type, default_xmlns) response = webob.Response() - response.headers["Content-Type"] = content_type + response.headers['Content-Type'] = content_type response.body = body msg_dict = dict(url=req.url, status=response.status_int) msg = _("%(url)s returned with HTTP %(status)d") % msg_dict @@ -371,12 +373,13 @@ class Controller(object): return result def _serialize(self, data, content_type, default_xmlns): - """ - Serialize the given dict to the provided content_type. + """Serialize the given dict to the provided content_type. + Uses self._serialization_metadata if it exists, which is a dict mapping MIME types to information needed to serialize to that type. + """ - _metadata = getattr(type(self), "_serialization_metadata", {}) + _metadata = getattr(type(self), '_serialization_metadata', {}) serializer = Serializer(_metadata, default_xmlns) try: @@ -385,12 +388,13 @@ class Controller(object): raise webob.exc.HTTPNotAcceptable() def _deserialize(self, data, content_type): - """ - Deserialize the request body to the specefied content type. + """Deserialize the request body to the specefied content type. + Uses self._serialization_metadata if it exists, which is a dict mapping MIME types to information needed to serialize to that type. + """ - _metadata = getattr(type(self), "_serialization_metadata", {}) + _metadata = getattr(type(self), '_serialization_metadata', {}) serializer = Serializer(_metadata) return serializer.deserialize(data, content_type) @@ -400,23 +404,22 @@ class Controller(object): class Serializer(object): - """ - Serializes and deserializes dictionaries to certain MIME types. - """ + """Serializes and deserializes dictionaries to certain MIME types.""" def __init__(self, metadata=None, default_xmlns=None): - """ - Create a serializer based on the given WSGI environment. + """Create a serializer based on the given WSGI environment. + 'metadata' is an optional dict mapping MIME types to information needed to serialize a dictionary to that type. + """ self.metadata = metadata or {} self.default_xmlns = default_xmlns def _get_serialize_handler(self, content_type): handlers = { - "application/json": self._to_json, - "application/xml": self._to_xml, + 'application/json': self._to_json, + 'application/xml': self._to_xml, } try: @@ -425,29 +428,27 @@ class Serializer(object): raise exception.InvalidContentType() def serialize(self, data, content_type): - """ - Serialize a dictionary into a string of the specified content type. - """ + """Serialize a dictionary into the specified content type.""" return self._get_serialize_handler(content_type)(data) def deserialize(self, datastring, content_type): - """ - Deserialize a string to a dictionary. + """Deserialize a string to a dictionary. The string must be in the format of a supported MIME type. + """ return self.get_deserialize_handler(content_type)(datastring) def get_deserialize_handler(self, content_type): handlers = { - "application/json": self._from_json, - "application/xml": self._from_xml, + 'application/json': self._from_json, + 'application/xml': self._from_xml, } try: return handlers[content_type] except Exception: - raise exception.InvalidContentType(_("Invalid content type %s" + raise exception.InvalidContentType(_('Invalid content type %s' % content_type)) def _from_json(self, datastring): @@ -460,11 +461,11 @@ class Serializer(object): return {node.nodeName: self._from_xml_node(node, plurals)} def _from_xml_node(self, node, listnames): - """ - Convert a minidom node to a simple Python type. + """Convert a minidom node to a simple Python type. listnames is a collection of names of XML nodes whose subnodes should be considered list items. + """ if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3: return node.childNodes[0].nodeValue @@ -571,7 +572,6 @@ def paste_config_file(basename): * /etc/nova, which may not be diffrerent from state_path on your distro """ - configfiles = [basename, os.path.join(FLAGS.state_path, 'etc', 'nova', basename), os.path.join(FLAGS.state_path, 'etc', basename), @@ -587,7 +587,7 @@ def load_paste_configuration(filename, appname): filename = os.path.abspath(filename) config = None try: - config = deploy.appconfig("config:%s" % filename, name=appname) + config = deploy.appconfig('config:%s' % filename, name=appname) except LookupError: pass return config @@ -598,7 +598,7 @@ def load_paste_app(filename, appname): filename = os.path.abspath(filename) app = None try: - app = deploy.loadapp("config:%s" % filename, name=appname) + app = deploy.loadapp('config:%s' % filename, name=appname) except LookupError: pass return app From 6eacc130af49ced7a1e5ce511c7096dd7563b4b2 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 20 Apr 2011 12:08:24 -0700 Subject: [PATCH 53/66] cleanups per code review --- nova/utils.py | 4 ++-- nova/wsgi.py | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/nova/utils.py b/nova/utils.py index 59f694196c13..b783f6c14592 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -182,11 +182,11 @@ def ssh_execute(ssh, cmd, process_input=None, addl_env=None, check_exit_code=True): LOG.debug(_('Running cmd (SSH): %s'), ' '.join(cmd)) if addl_env: - raise exception.Error('Environment not supported over SSH') + raise exception.Error(_('Environment not supported over SSH')) if process_input: # This is (probably) fixable if we need it... - raise exception.Error('process_input not supported over SSH') + raise exception.Error(_('process_input not supported over SSH')) stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd) channel = stdout_stream.channel diff --git a/nova/wsgi.py b/nova/wsgi.py index 119fcbe4c3b2..418087641faa 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -89,7 +89,6 @@ class Request(webob.Request): Based on the query extension then the Accept header. """ - parts = self.path.rsplit('.', 1) if len(parts) > 1: From 8bf11973b0de6a57f18ac48452e3f8b36adac565 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 20 Apr 2011 12:26:15 -0700 Subject: [PATCH 54/66] docstring cleanup, nova/image dir --- nova/image/fake.py | 9 +++--- nova/image/glance.py | 70 +++++++++++++++++-------------------------- nova/image/local.py | 8 +++-- nova/image/s3.py | 43 +++++++++++++------------- nova/image/service.py | 63 +++++++++++++++++++------------------- 5 files changed, 92 insertions(+), 101 deletions(-) diff --git a/nova/image/fake.py b/nova/image/fake.py index e02b4127eb17..3bc2a828716c 100644 --- a/nova/image/fake.py +++ b/nova/image/fake.py @@ -14,6 +14,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + """Implementation of an fake image service""" import copy @@ -69,14 +70,14 @@ class FakeImageService(service.BaseImageService): image = self.images.get(image_id) if image: return copy.deepcopy(image) - LOG.warn("Unable to find image id %s. Have images: %s", + LOG.warn('Unable to find image id %s. Have images: %s', image_id, self.images) raise exception.NotFound def create(self, context, data): """Store the image data and return the new image id. - :raises Duplicate if the image already exist. + :raises: Duplicate if the image already exist. """ image_id = int(data['id']) @@ -88,7 +89,7 @@ class FakeImageService(service.BaseImageService): def update(self, context, image_id, data): """Replace the contents of the given image with the new data. - :raises NotFound if the image does not exist. + :raises: NotFound if the image does not exist. """ image_id = int(image_id) @@ -99,7 +100,7 @@ class FakeImageService(service.BaseImageService): def delete(self, context, image_id): """Delete the given image. - :raises NotFound if the image does not exist. + :raises: NotFound if the image does not exist. """ image_id = int(image_id) diff --git a/nova/image/glance.py b/nova/image/glance.py index 1a80bb2af996..81661b3b0e0b 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -14,6 +14,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + """Implementation of an image service that uses Glance as the backend""" from __future__ import absolute_import @@ -31,16 +32,18 @@ from nova.image import service LOG = logging.getLogger('nova.image.glance') + FLAGS = flags.FLAGS + GlanceClient = utils.import_class('glance.client.Client') class GlanceImageService(service.BaseImageService): """Provides storage and retrieval of disk image objects within Glance.""" - GLANCE_ONLY_ATTRS = ["size", "location", "disk_format", - "container_format"] + GLANCE_ONLY_ATTRS = ['size', 'location', 'disk_format', + 'container_format'] # NOTE(sirp): Overriding to use _translate_to_service provided by # BaseImageService @@ -56,9 +59,7 @@ class GlanceImageService(service.BaseImageService): self.client = client def index(self, context): - """ - Calls out to Glance for a list of images available - """ + """Calls out to Glance for a list of images available.""" # NOTE(sirp): We need to use `get_images_detailed` and not # `get_images` here because we need `is_public` and `properties` # included so we can filter by user @@ -71,9 +72,7 @@ class GlanceImageService(service.BaseImageService): return filtered def detail(self, context): - """ - Calls out to Glance for a list of detailed image information - """ + """Calls out to Glance for a list of detailed image information.""" filtered = [] image_metas = self.client.get_images_detailed() for image_meta in image_metas: @@ -83,9 +82,7 @@ class GlanceImageService(service.BaseImageService): return filtered def show(self, context, image_id): - """ - Returns a dict containing image data for the given opaque image id. - """ + """Returns a dict with image data for the given opaque image id.""" try: image_meta = self.client.get_image_meta(image_id) except glance_exception.NotFound: @@ -98,9 +95,7 @@ class GlanceImageService(service.BaseImageService): return base_image_meta def show_by_name(self, context, name): - """ - Returns a dict containing image data for the given name. - """ + """Returns a dict containing image data for the given name.""" # TODO(vish): replace this with more efficient call when glance # supports it. image_metas = self.detail(context) @@ -110,9 +105,7 @@ class GlanceImageService(service.BaseImageService): raise exception.NotFound def get(self, context, image_id, data): - """ - Calls out to Glance for metadata and data and writes data. - """ + """Calls out to Glance for metadata and data and writes data.""" try: image_meta, image_chunks = self.client.get_image(image_id) except glance_exception.NotFound: @@ -125,16 +118,16 @@ class GlanceImageService(service.BaseImageService): return base_image_meta def create(self, context, image_meta, data=None): - """ - Store the image data and return the new image id. + """Store the image data and return the new image id. + + :raises: AlreadyExists if the image already exist. - :raises AlreadyExists if the image already exist. """ # Translate Base -> Service - LOG.debug(_("Creating image in Glance. Metadata passed in %s"), + LOG.debug(_('Creating image in Glance. Metadata passed in %s'), image_meta) sent_service_image_meta = self._translate_to_service(image_meta) - LOG.debug(_("Metadata after formatting for Glance %s"), + LOG.debug(_('Metadata after formatting for Glance %s'), sent_service_image_meta) recv_service_image_meta = self.client.add_image( @@ -142,14 +135,15 @@ class GlanceImageService(service.BaseImageService): # Translate Service -> Base base_image_meta = self._translate_to_base(recv_service_image_meta) - LOG.debug(_("Metadata returned from Glance formatted for Base %s"), + LOG.debug(_('Metadata returned from Glance formatted for Base %s'), base_image_meta) return base_image_meta def update(self, context, image_id, image_meta, data=None): """Replace the contents of the given image with the new data. - :raises NotFound if the image does not exist. + :raises: NotFound if the image does not exist. + """ # NOTE(vish): show is to check if image is available self.show(context, image_id) @@ -162,10 +156,10 @@ class GlanceImageService(service.BaseImageService): return base_image_meta def delete(self, context, image_id): - """ - Delete the given image. + """Delete the given image. + + :raises: NotFound if the image does not exist. - :raises NotFound if the image does not exist. """ # NOTE(vish): show is to check if image is available self.show(context, image_id) @@ -176,16 +170,12 @@ class GlanceImageService(service.BaseImageService): return result def delete_all(self): - """ - Clears out all images - """ + """Clears out all images.""" pass @classmethod def _translate_to_base(cls, image_meta): - """Overriding the base translation to handle conversion to datetime - objects - """ + """Override translation to handle conversion to datetime objects.""" image_meta = service.BaseImageService._propertify_metadata( image_meta, cls.SERVICE_IMAGE_ATTRS) image_meta = _convert_timestamps_to_datetimes(image_meta) @@ -194,9 +184,7 @@ class GlanceImageService(service.BaseImageService): # utility functions def _convert_timestamps_to_datetimes(image_meta): - """ - Returns image with known timestamp fields converted to datetime objects - """ + """Returns image with timestamp fields converted to datetime objects.""" for attr in ['created_at', 'updated_at', 'deleted_at']: if image_meta.get(attr): image_meta[attr] = _parse_glance_iso8601_timestamp( @@ -205,10 +193,8 @@ def _convert_timestamps_to_datetimes(image_meta): def _parse_glance_iso8601_timestamp(timestamp): - """ - Parse a subset of iso8601 timestamps into datetime objects - """ - iso_formats = ["%Y-%m-%dT%H:%M:%S.%f", "%Y-%m-%dT%H:%M:%S"] + """Parse a subset of iso8601 timestamps into datetime objects.""" + iso_formats = ['%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M:%S'] for iso_format in iso_formats: try: @@ -216,5 +202,5 @@ def _parse_glance_iso8601_timestamp(timestamp): except ValueError: pass - raise ValueError(_("%(timestamp)s does not follow any of the " - "signatures: %(ISO_FORMATS)s") % locals()) + raise ValueError(_('%(timestamp)s does not follow any of the ' + 'signatures: %(ISO_FORMATS)s') % locals()) diff --git a/nova/image/local.py b/nova/image/local.py index fa5e93346a0a..d59c3970f321 100644 --- a/nova/image/local.py +++ b/nova/image/local.py @@ -23,14 +23,15 @@ import shutil from nova import exception from nova import flags from nova import log as logging -from nova.image import service from nova import utils +from nova.image import service FLAGS = flags.FLAGS flags.DEFINE_string('images_path', '$state_path/images', 'path to decrypted images') + LOG = logging.getLogger('nova.image.local') @@ -57,7 +58,7 @@ class LocalImageService(service.BaseImageService): unhexed_image_id = int(image_dir, 16) except ValueError: LOG.error( - _("%s is not in correct directory naming format"\ + _('%s is not in correct directory naming format'\ % image_dir)) else: images.append(unhexed_image_id) @@ -148,7 +149,8 @@ class LocalImageService(service.BaseImageService): def delete(self, context, image_id): """Delete the given image. - Raises NotFound if the image does not exist. + + :raises: NotFound if the image does not exist. """ # NOTE(vish): show is to check if image is available diff --git a/nova/image/s3.py b/nova/image/s3.py index 2a02d4674599..4a4ee44d822c 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -16,19 +16,16 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Proxy AMI-related calls from the cloud controller, to the running -objectstore service. -""" +"""Proxy AMI-related calls from cloud controller to objectstore service.""" import binascii -import eventlet import os import shutil import tarfile import tempfile from xml.etree import ElementTree +import eventlet import boto.s3.connection from nova import crypto @@ -46,7 +43,7 @@ flags.DEFINE_string('image_decryption_dir', '/tmp', class S3ImageService(service.BaseImageService): - """Wraps an existing image service to support s3 based register""" + """Wraps an existing image service to support s3 based register.""" def __init__(self, service=None, *args, **kwargs): if service is None: service = utils.import_object(FLAGS.image_service) @@ -54,7 +51,11 @@ class S3ImageService(service.BaseImageService): self.service.__init__(*args, **kwargs) def create(self, context, metadata, data=None): - """metadata['properties'] should contain image_location""" + """Create an image. + + metadata['properties'] should contain image_location. + + """ image = self._s3_create(context, metadata) return image @@ -100,12 +101,12 @@ class S3ImageService(service.BaseImageService): return local_filename def _s3_create(self, context, metadata): - """Gets a manifext from s3 and makes an image""" + """Gets a manifext from s3 and makes an image.""" image_path = tempfile.mkdtemp(dir=FLAGS.image_decryption_dir) image_location = metadata['properties']['image_location'] - bucket_name = image_location.split("/")[0] + bucket_name = image_location.split('/')[0] manifest_path = image_location[len(bucket_name) + 1:] bucket = self._conn(context).get_bucket(bucket_name) key = bucket.get_key(manifest_path) @@ -116,7 +117,7 @@ class S3ImageService(service.BaseImageService): image_type = 'machine' try: - kernel_id = manifest.find("machine_configuration/kernel_id").text + kernel_id = manifest.find('machine_configuration/kernel_id').text if kernel_id == 'true': image_format = 'aki' image_type = 'kernel' @@ -125,7 +126,7 @@ class S3ImageService(service.BaseImageService): kernel_id = None try: - ramdisk_id = manifest.find("machine_configuration/ramdisk_id").text + ramdisk_id = manifest.find('machine_configuration/ramdisk_id').text if ramdisk_id == 'true': image_format = 'ari' image_type = 'ramdisk' @@ -134,7 +135,7 @@ class S3ImageService(service.BaseImageService): ramdisk_id = None try: - arch = manifest.find("machine_configuration/architecture").text + arch = manifest.find('machine_configuration/architecture').text except Exception: arch = 'x86_64' @@ -160,7 +161,7 @@ class S3ImageService(service.BaseImageService): def delayed_create(): """This handles the fetching and decrypting of the part files.""" parts = [] - for fn_element in manifest.find("image").getiterator("filename"): + for fn_element in manifest.find('image').getiterator('filename'): part = self._download_file(bucket, fn_element.text, image_path) parts.append(part) @@ -174,9 +175,9 @@ class S3ImageService(service.BaseImageService): metadata['properties']['image_state'] = 'decrypting' self.service.update(context, image_id, metadata) - hex_key = manifest.find("image/ec2_encrypted_key").text + hex_key = manifest.find('image/ec2_encrypted_key').text encrypted_key = binascii.a2b_hex(hex_key) - hex_iv = manifest.find("image/ec2_encrypted_iv").text + hex_iv = manifest.find('image/ec2_encrypted_iv').text encrypted_iv = binascii.a2b_hex(hex_iv) # FIXME(vish): grab key from common service so this can run on @@ -214,7 +215,7 @@ class S3ImageService(service.BaseImageService): process_input=encrypted_key, check_exit_code=False) if err: - raise exception.Error(_("Failed to decrypt private key: %s") + raise exception.Error(_('Failed to decrypt private key: %s') % err) iv, err = utils.execute('openssl', 'rsautl', @@ -223,8 +224,8 @@ class S3ImageService(service.BaseImageService): process_input=encrypted_iv, check_exit_code=False) if err: - raise exception.Error(_("Failed to decrypt initialization " - "vector: %s") % err) + raise exception.Error(_('Failed to decrypt initialization ' + 'vector: %s') % err) _out, err = utils.execute('openssl', 'enc', '-d', '-aes-128-cbc', @@ -234,14 +235,14 @@ class S3ImageService(service.BaseImageService): '-out', '%s' % (decrypted_filename,), check_exit_code=False) if err: - raise exception.Error(_("Failed to decrypt image file " - "%(image_file)s: %(err)s") % + raise exception.Error(_('Failed to decrypt image file ' + '%(image_file)s: %(err)s') % {'image_file': encrypted_filename, 'err': err}) @staticmethod def _untarzip_image(path, filename): - tar_file = tarfile.open(filename, "r|gz") + tar_file = tarfile.open(filename, 'r|gz') tar_file.extractall(path) image_file = tar_file.getnames()[0] tar_file.close() diff --git a/nova/image/service.py b/nova/image/service.py index fddc72409b3a..3e2357df8a56 100644 --- a/nova/image/service.py +++ b/nova/image/service.py @@ -20,7 +20,7 @@ from nova import utils class BaseImageService(object): - """Base class for providing image search and retrieval services + """Base class for providing image search and retrieval services. ImageService exposes two concepts of metadata: @@ -35,7 +35,9 @@ class BaseImageService(object): This means that ImageServices will return BASE_IMAGE_ATTRS as keys in the metadata dict, all other attributes will be returned as keys in the nested 'properties' dict. + """ + BASE_IMAGE_ATTRS = ['id', 'name', 'created_at', 'updated_at', 'deleted_at', 'deleted', 'status', 'is_public'] @@ -45,23 +47,18 @@ class BaseImageService(object): SERVICE_IMAGE_ATTRS = [] def index(self, context): - """ - Returns a sequence of mappings of id and name information about - images. + """List images. - :rtype: array - :retval: a sequence of mappings with the following signature + :returnsl: a sequence of mappings with the following signature {'id': opaque id of image, 'name': name of image} """ raise NotImplementedError def detail(self, context): - """ - Returns a sequence of mappings of detailed information about images. + """Detailed information about an images. - :rtype: array - :retval: a sequence of mappings with the following signature + :returns: a sequence of mappings with the following signature {'id': opaque id of image, 'name': name of image, 'created_at': creation datetime object, @@ -77,15 +74,14 @@ class BaseImageService(object): NotImplementedError, in which case Nova will emulate this method with repeated calls to show() for each image received from the index() method. + """ raise NotImplementedError def show(self, context, image_id): - """ - Returns a dict containing image metadata for the given opaque image id. - - :retval a mapping with the following signature: + """Detailed information about an image. + :returns: a mapping with the following signature: {'id': opaque id of image, 'name': name of image, 'created_at': creation datetime object, @@ -96,31 +92,32 @@ class BaseImageService(object): 'is_public': boolean indicating if image is public }, ... - :raises NotFound if the image does not exist + :raises: NotFound if the image does not exist + """ raise NotImplementedError def get(self, context, data): - """ - Returns a dict containing image metadata and writes image data to data. + """Get an image. :param data: a file-like object to hold binary image data - + :returns: a dict containing image metadata, writes image data to data. :raises NotFound if the image does not exist + """ raise NotImplementedError def create(self, context, metadata, data=None): - """ - Store the image metadata and data and return the new image metadata. + """Store the image metadata and data. - :raises AlreadyExists if the image already exist. + :returns: the new image metadata. + :raises: AlreadyExists if the image already exist. """ raise NotImplementedError def update(self, context, image_id, metadata, data=None): - """Update the given image metadata and data and return the metadata + """Update the given image metadata and data and return the metadata. :raises NotFound if the image does not exist. @@ -128,8 +125,7 @@ class BaseImageService(object): raise NotImplementedError def delete(self, context, image_id): - """ - Delete the given image. + """Delete the given image. :raises NotFound if the image does not exist. @@ -138,12 +134,14 @@ class BaseImageService(object): @staticmethod def _is_image_available(context, image_meta): - """ + """Check image availability. + Images are always available if they are public or if the user is an admin. Otherwise, we filter by project_id (if present) and then fall-back to images owned by user. + """ # FIXME(sirp): We should be filtering by user_id on the Glance side # for security; however, we can't do that until we get authn/authz @@ -169,29 +167,32 @@ class BaseImageService(object): This is used by subclasses to expose only a metadata dictionary that is the same across ImageService implementations. + """ return cls._propertify_metadata(metadata, cls.BASE_IMAGE_ATTRS) @classmethod def _translate_to_service(cls, metadata): - """Return a metadata dictionary that is usable by the ImageService - subclass. + """Return a metadata dict that is usable by the ImageService subclass. As an example, Glance has additional attributes (like 'location'); the BaseImageService considers these properties, but we need to translate these back to first-class attrs for sending to Glance. This method handles this by allowing you to specify the attributes an ImageService considers first-class. + """ if not cls.SERVICE_IMAGE_ATTRS: - raise NotImplementedError(_("Cannot use this without specifying " - "SERVICE_IMAGE_ATTRS for subclass")) + raise NotImplementedError(_('Cannot use this without specifying ' + 'SERVICE_IMAGE_ATTRS for subclass')) return cls._propertify_metadata(metadata, cls.SERVICE_IMAGE_ATTRS) @staticmethod def _propertify_metadata(metadata, keys): - """Return a dict with any unrecognized keys placed in the nested - 'properties' dict. + """Move unknown keys to a nested 'properties' dict. + + :returns: a new dict with the keys moved. + """ flattened = utils.flatten_dict(metadata) attributes, properties = utils.partition_dict(flattened, keys) From 42b139f740c08cce04d898c4ce7c85030733927f Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 20 Apr 2011 12:26:17 -0700 Subject: [PATCH 55/66] fixes per review --- nova/image/s3.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/image/s3.py b/nova/image/s3.py index 4a4ee44d822c..c38c58d9513a 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -25,8 +25,8 @@ import tarfile import tempfile from xml.etree import ElementTree -import eventlet import boto.s3.connection +import eventlet from nova import crypto from nova import exception @@ -44,6 +44,7 @@ flags.DEFINE_string('image_decryption_dir', '/tmp', class S3ImageService(service.BaseImageService): """Wraps an existing image service to support s3 based register.""" + def __init__(self, service=None, *args, **kwargs): if service is None: service = utils.import_object(FLAGS.image_service) From ce8fd6b5e3ef0c757c76bbe9db37c696a1d2c11c Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 20 Apr 2011 12:26:17 -0700 Subject: [PATCH 56/66] more changes per review --- nova/image/local.py | 5 ++--- nova/image/service.py | 10 +++++----- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/nova/image/local.py b/nova/image/local.py index d59c3970f321..50f00bee1739 100644 --- a/nova/image/local.py +++ b/nova/image/local.py @@ -57,9 +57,8 @@ class LocalImageService(service.BaseImageService): try: unhexed_image_id = int(image_dir, 16) except ValueError: - LOG.error( - _('%s is not in correct directory naming format'\ - % image_dir)) + LOG.error(_('%s is not in correct directory naming format') + % image_dir) else: images.append(unhexed_image_id) return images diff --git a/nova/image/service.py b/nova/image/service.py index 3e2357df8a56..ab6749049a12 100644 --- a/nova/image/service.py +++ b/nova/image/service.py @@ -49,8 +49,8 @@ class BaseImageService(object): def index(self, context): """List images. - :returnsl: a sequence of mappings with the following signature - {'id': opaque id of image, 'name': name of image} + :returns: a sequence of mappings with the following signature + {'id': opaque id of image, 'name': name of image} """ raise NotImplementedError @@ -102,7 +102,7 @@ class BaseImageService(object): :param data: a file-like object to hold binary image data :returns: a dict containing image metadata, writes image data to data. - :raises NotFound if the image does not exist + :raises: NotFound if the image does not exist """ raise NotImplementedError @@ -119,7 +119,7 @@ class BaseImageService(object): def update(self, context, image_id, metadata, data=None): """Update the given image metadata and data and return the metadata. - :raises NotFound if the image does not exist. + :raises: NotFound if the image does not exist. """ raise NotImplementedError @@ -127,7 +127,7 @@ class BaseImageService(object): def delete(self, context, image_id): """Delete the given image. - :raises NotFound if the image does not exist. + :raises: NotFound if the image does not exist. """ raise NotImplementedError From 2ea651dad0265807119716046767b85cf769ca05 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Wed, 20 Apr 2011 16:27:33 -0400 Subject: [PATCH 57/66] Exit early if tests fail, before pep8 is run. --- run_tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/run_tests.sh b/run_tests.sh index 4f85dfe6dbd0..610cf1f27065 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -96,7 +96,7 @@ then fi fi -run_tests +run_tests || exit # Also run pep8 if no options were provided. if [ -z "$noseargs" ]; then From 7d8698ad551b756a9dfc7058e6d836de65a64945 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 20 Apr 2011 16:21:37 -0700 Subject: [PATCH 58/66] in doesn't work properly on instance_ref --- nova/api/ec2/cloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index bd4c9dcd4a90..4785d812ab71 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -187,7 +187,7 @@ class CloudController(object): 'mpi': mpi}} for image_type in ['kernel', 'ramdisk']: - if '%s_id' % image_type in instance_ref: + if instance_ref.get('%s_id' % image_type): ec2_id = self._image_ec2_id(instance_ref['%s_id' % image_type], self._image_type(image_type)) data['meta-data']['%s-id' % image_type] = ec2_id From 783cea4dc4497176b57b7a718a29bde102fb92bc Mon Sep 17 00:00:00 2001 From: Eldar Nugaev Date: Thu, 21 Apr 2011 04:31:17 +0400 Subject: [PATCH 59/66] style fix --- nova/virt/libvirt_conn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index d5a88ebedcba..a42433fed222 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -168,7 +168,7 @@ def _get_network_info(instance): networks = db.network_get_all_by_instance(admin_context, instance['id']) flavor = db.instance_type_get_by_id(admin_context, - instance['instance_type_id']) + instance['instance_type_id']) network_info = [] for network in networks: From 2217872ff5e8e5b53af0b38064a3cdbc2c783ebb Mon Sep 17 00:00:00 2001 From: Eldar Nugaev Date: Thu, 21 Apr 2011 05:22:09 +0400 Subject: [PATCH 60/66] pep8 cleaning --- nova/compute/manager.py | 47 ++++++++++--------- .../015_add_auto_assign_to_floating_ips.py | 1 + nova/network/api.py | 6 +-- 3 files changed, 28 insertions(+), 26 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index c1dc06557259..13a082b441ab 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -37,8 +37,6 @@ terminating it. import datetime import os -import random -import string import socket import sys import tempfile @@ -54,7 +52,7 @@ from nova import rpc from nova import utils from nova.compute import power_state from nova.virt import driver -from nova.network import api as network_api +from nova import network FLAGS = flags.FLAGS flags.DEFINE_string('instances_path', '$state_path/instances', @@ -74,8 +72,8 @@ flags.DEFINE_integer('live_migration_retry_count', 30, flags.DEFINE_integer("rescue_timeout", 0, "Automatically unrescue an instance after N seconds." " Set to 0 to disable.") -flags.DEFINE_bool('auto_assign_floating_ip', False, 'Autoassigning floating' - ' ip to VM') +flags.DEFINE_bool('auto_assign_floating_ip', False, + 'Autoassigning floating ip to VM') LOG = logging.getLogger('nova.compute.manager') @@ -135,7 +133,7 @@ class ComputeManager(manager.SchedulerDependentManager): self.network_manager = utils.import_object(FLAGS.network_manager) self.volume_manager = utils.import_object(FLAGS.volume_manager) - self.network_api = network_api.API() + self.network_api = network.API() super(ComputeManager, self).__init__(service_name="compute", *args, **kwargs) @@ -248,18 +246,18 @@ class ComputeManager(manager.SchedulerDependentManager): instance_id, power_state.SHUTDOWN) - if not FLAGS.stub_network: - if FLAGS.auto_assign_floating_ip: - public_ip = self.network_api.allocate_floating_ip(context) + if not FLAGS.stub_network and FLAGS.auto_assign_floating_ip: + public_ip = self.network_api.allocate_floating_ip(context) - self.db.floating_ip_set_auto_assigned(context, public_ip) - fixed_ip = self.db.fixed_ip_get_by_address(context, address) - floating_ip = self.db.floating_ip_get_by_address(context, - public_ip) + self.db.floating_ip_set_auto_assigned(context, public_ip) + fixed_ip = self.db.fixed_ip_get_by_address(context, address) + floating_ip = self.db.floating_ip_get_by_address(context, + public_ip) - self.network_api.associate_floating_ip(context, floating_ip, - fixed_ip, - affect_auto_assigned=True) + self.network_api.associate_floating_ip(context, + floating_ip, + fixed_ip, + affect_auto_assigned=True) self._update_state(context, instance_id) @exception.wrap_exception @@ -280,14 +278,17 @@ class ComputeManager(manager.SchedulerDependentManager): # NOTE(vish): Right now we don't really care if the ip is # disassociated. We may need to worry about # checking this later. - self.network_api.disassociate_floating_ip(context, address, - affect_auto_assigned=True) - if FLAGS.auto_assign_floating_ip \ - and floating_ip.get('auto_assigned'): + self.network_api.disassociate_floating_ip(context, + address, + True) + if (FLAGS.auto_assign_floating_ip + and floating_ip.get('auto_assigned')): LOG.debug(_("Deallocating floating ip %s"), - floating_ip['address'], context=context) - self.network_api.release_floating_ip(context, address, - affect_auto_assigned=True) + floating_ip['address'], + context=context) + self.network_api.release_floating_ip(context, + address, + True) address = fixed_ip['address'] if address: diff --git a/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py b/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py index f3767b29ffc9..64d6df77f708 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py @@ -22,6 +22,7 @@ from migrate import * meta = MetaData() + c_auto_assigned = Column('auto_assigned', Boolean, default=False) diff --git a/nova/network/api.py b/nova/network/api.py index 61db646ae142..1d8193b287da 100644 --- a/nova/network/api.py +++ b/nova/network/api.py @@ -52,7 +52,7 @@ class API(base.Base): "args": {"project_id": context.project_id}}) def release_floating_ip(self, context, address, - affect_auto_assigned = False): + affect_auto_assigned=False): floating_ip = self.db.floating_ip_get_by_address(context, address) if not affect_auto_assigned and floating_ip.get('auto_assigned'): return @@ -66,7 +66,7 @@ class API(base.Base): "args": {"floating_address": floating_ip['address']}}) def associate_floating_ip(self, context, floating_ip, fixed_ip, - affect_auto_assigned = False): + affect_auto_assigned=False): if isinstance(fixed_ip, str) or isinstance(fixed_ip, unicode): fixed_ip = self.db.fixed_ip_get_by_address(context, fixed_ip) floating_ip = self.db.floating_ip_get_by_address(context, floating_ip) @@ -97,7 +97,7 @@ class API(base.Base): "fixed_address": fixed_ip['address']}}) def disassociate_floating_ip(self, context, address, - affect_auto_assigned = False): + affect_auto_assigned=False): floating_ip = self.db.floating_ip_get_by_address(context, address) if not affect_auto_assigned and floating_ip.get('auto_assigned'): return From ba9edf8d6d93290d1f1e85bb3a51e3a69e3f0822 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 20 Apr 2011 21:06:56 -0700 Subject: [PATCH 61/66] put up and down in the right dir --- doc/source/{ => devref}/down.sh | 0 doc/source/{ => devref}/up.sh | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename doc/source/{ => devref}/down.sh (100%) rename doc/source/{ => devref}/up.sh (100%) diff --git a/doc/source/down.sh b/doc/source/devref/down.sh similarity index 100% rename from doc/source/down.sh rename to doc/source/devref/down.sh diff --git a/doc/source/up.sh b/doc/source/devref/up.sh similarity index 100% rename from doc/source/up.sh rename to doc/source/devref/up.sh From e1f37b81e805c087947c87a9bc341dd60e7e481c Mon Sep 17 00:00:00 2001 From: Eldar Nugaev Date: Thu, 21 Apr 2011 15:49:47 +0400 Subject: [PATCH 62/66] style fixing --- nova/compute/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 13a082b441ab..864b3a816872 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -48,11 +48,11 @@ from nova import exception from nova import flags from nova import log as logging from nova import manager +from nova import network from nova import rpc from nova import utils from nova.compute import power_state from nova.virt import driver -from nova import network FLAGS = flags.FLAGS flags.DEFINE_string('instances_path', '$state_path/instances', From ea11033935192ee26ea6d0d0dad47a0a624b17a0 Mon Sep 17 00:00:00 2001 From: Jimmy Bergman Date: Thu, 21 Apr 2011 15:23:36 +0200 Subject: [PATCH 63/66] Add privateIpAddress and ipAddress to EC2 API DescribeInstances response. --- nova/api/ec2/cloud.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 4785d812ab71..4aa9732909ac 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -726,7 +726,9 @@ class CloudController(object): instance['mac_address']) i['privateDnsName'] = fixed_addr + i['privateIpAddress'] = fixed_addr i['publicDnsName'] = floating_addr + i['ipAddress'] = floating_addr or fixed_addr i['dnsName'] = i['publicDnsName'] or i['privateDnsName'] i['keyName'] = instance['key_name'] From e6b76ce6886a1404739a972d106248a67df4f02a Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 21 Apr 2011 07:35:30 -0700 Subject: [PATCH 64/66] use simpler interfaces --- doc/source/devref/interfaces | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/doc/source/devref/interfaces b/doc/source/devref/interfaces index 2aae39558ea9..b7116aeb72b6 100644 --- a/doc/source/devref/interfaces +++ b/doc/source/devref/interfaces @@ -1,18 +1,17 @@ +# This file describes the network interfaces available on your system +# and how to activate them. For more information, see interfaces(5). + # The loopback network interface auto lo iface lo inet loopback # The primary network interface +auto eth0 +iface eth0 inet manual + up ifconfig $IFACE 0.0.0.0 up + down ifconfig $IFACE down + auto br0 iface br0 inet dhcp bridge_ports eth0 - bridge_fd 9 ## from the libvirt docs (forward delay time) - bridge_hello 2 ## from the libvirt docs (hello time) - bridge_maxage 12 ## from the libvirt docs (maximum message age) - bridge_stp off ## from the libvirt docs (spanning tree protocol) -iface eth0 inet manual - up ifconfig $IFACE 0.0.0.0 up - up ip link set $IFACE promisc on - down ip link set $IFACE promisc off - down ifconfig $IFACE down From 2d82195d59240ea53d4726879d2a28a5872e58f7 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 21 Apr 2011 07:39:49 -0700 Subject: [PATCH 65/66] use vpn filter in basic filtering so cloudpipe works with iptables driver --- nova/virt/libvirt_conn.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 9c8d64446e75..3dcb8ae42dda 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -1734,11 +1734,16 @@ class NWFilterFirewall(FirewallDriver): logging.info('ensuring static filters') self._ensure_static_filters() + if instance['image_id'] == str(FLAGS.vpn_image_id): + base_filter = 'nova-vpn' + else: + base_filter = 'nova-base' + for (network, mapping) in network_info: nic_id = mapping['mac'].replace(':', '') instance_filter_name = self._instance_filter_name(instance, nic_id) self._define_filter(self._filter_container(instance_filter_name, - ['nova-base'])) + [base_filter])) def _ensure_static_filters(self): if self.static_filters_configured: @@ -1749,11 +1754,12 @@ class NWFilterFirewall(FirewallDriver): 'no-ip-spoofing', 'no-arp-spoofing', 'allow-dhcp-server'])) + self._define_filter(self._filter_container('nova-vpn', + ['allow-dhcp-server'])) self._define_filter(self.nova_base_ipv4_filter) self._define_filter(self.nova_base_ipv6_filter) self._define_filter(self.nova_dhcp_filter) self._define_filter(self.nova_ra_filter) - self._define_filter(self.nova_vpn_filter) if FLAGS.allow_project_net_traffic: self._define_filter(self.nova_project_filter) if FLAGS.use_ipv6: @@ -1767,14 +1773,6 @@ class NWFilterFirewall(FirewallDriver): ''.join(["" % (f,) for f in filters])) return xml - nova_vpn_filter = ''' - 2086015e-cf03-11df-8c5d-080027c27973 - - - - - ''' - def nova_base_ipv4_filter(self): retval = "" for protocol in ['tcp', 'udp', 'icmp']: From 5904cba617038600f3d8e7f65c71485abb163927 Mon Sep 17 00:00:00 2001 From: Eldar Nugaev Date: Thu, 21 Apr 2011 22:23:40 +0400 Subject: [PATCH 66/66] style cleaning --- .../versions/015_add_auto_assign_to_floating_ips.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py b/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py index 64d6df77f708..29b26b3dd9ae 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py @@ -31,7 +31,9 @@ def upgrade(migrate_engine): # bind migrate_engine to your metadata meta.bind = migrate_engine - floating_ips = Table('floating_ips', meta, autoload=True, - autoload_with=migrate_engine) + floating_ips = Table('floating_ips', + meta, + autoload=True, + autoload_with=migrate_engine) floating_ips.create_column(c_auto_assigned)