From 5326d7f76b48e93bd74d9539febe1f41bbf3f286 Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Wed, 28 Jul 2010 22:41:49 -0700
Subject: [PATCH 01/36] Fix deprecation warning in AuthManager. __new__ isn't
 allowed to take args.

---
 nova/auth/manager.py | 6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index 2da53a7366da..2360c1a5c26a 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -24,7 +24,6 @@ import logging
 import os
 import shutil
 import string
-import sys
 import tempfile
 import uuid
 import zipfile
@@ -322,11 +321,10 @@ class AuthManager(object):
     need to be more accessible, such as vpn ips and ports.
     """
     _instance=None
-    def __new__(cls, *args, **kwargs):
+    def __new__(cls):
         """Returns the AuthManager singleton"""
         if not cls._instance:
-            cls._instance = super(AuthManager, cls).__new__(
-                    cls, *args, **kwargs)
+            cls._instance = super(AuthManager, cls).__new__(cls)
         return cls._instance
 
     def __init__(self, driver=None, *args, **kwargs):

From f8e7f79833b545a2812d0161f769271621fdf33c Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Wed, 28 Jul 2010 23:19:07 -0700
Subject: [PATCH 02/36] oops retry and add extra exception check

---
 nova/auth/manager.py | 2 +-
 nova/utils.py        | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index 2360c1a5c26a..b690176bbab1 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -321,7 +321,7 @@ class AuthManager(object):
     need to be more accessible, such as vpn ips and ports.
     """
     _instance=None
-    def __new__(cls):
+    def __new__(cls, *args, **kwargs):
         """Returns the AuthManager singleton"""
         if not cls._instance:
             cls._instance = super(AuthManager, cls).__new__(cls)
diff --git a/nova/utils.py b/nova/utils.py
index 0016b656e3e5..0b23de7cd3cb 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -41,7 +41,7 @@ def import_class(import_str):
     try:
         __import__(mod_str)
         return getattr(sys.modules[mod_str], class_str)
-    except (ImportError, AttributeError):
+    except (ImportError, ValueError, AttributeError):
         raise exception.NotFound('Class %s cannot be found' % class_str)
 
 def fetchfile(url, target):

From 16e89bad15f5665a5f46c0bdcdfab1b7f3df4039 Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Thu, 29 Jul 2010 17:58:44 -0700
Subject: [PATCH 03/36] flag for libvirt type

---
 nova/compute/libvirt.xml.template |  2 +-
 nova/virt/libvirt_conn.py         | 13 +++++++++----
 2 files changed, 10 insertions(+), 5 deletions(-)

diff --git a/nova/compute/libvirt.xml.template b/nova/compute/libvirt.xml.template
index a763e8a4d7a4..a17cd8fae923 100644
--- a/nova/compute/libvirt.xml.template
+++ b/nova/compute/libvirt.xml.template
@@ -1,7 +1,7 @@
 <domain type='kvm'>
     <name>%(name)s</name>
     <os>
-    <type>hvm</type>
+    <type>%(type)s</type>
         <kernel>%(basepath)s/kernel</kernel>
         <initrd>%(basepath)s/ramdisk</initrd>
         <cmdline>root=/dev/vda1 console=ttyS0</cmdline>
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index c545e4190de8..e37444f63eca 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -48,6 +48,10 @@ flags.DEFINE_string('libvirt_xml_template',
                     utils.abspath('compute/libvirt.xml.template'),
                     'Libvirt XML Template')
 
+flags.DEFINE_string('libvirt_type',
+                    'hvm',
+                    'Libvirt virtualization type (hvm, qemu, etc)')
+
 def get_connection(read_only):
     # These are loaded late so that there's no need to install these
     # libraries when not using libvirt.
@@ -235,6 +239,7 @@ class LibvirtConnection(object):
 
         # TODO(termie): lazy lazy hack because xml is annoying
         xml_info['nova'] = json.dumps(instance.datamodel.copy())
+        xml_info['type'] = FLAGS.libvirt_type
         libvirt_xml = libvirt_xml % xml_info
         logging.debug("Finished the toXML method")
 
@@ -255,7 +260,7 @@ class LibvirtConnection(object):
         """
         Note that this function takes an instance ID, not an Instance, so
         that it can be called by monitor.
-        
+
         Returns a list of all block devices for this domain.
         """
         domain = self._conn.lookupByName(instance_id)
@@ -298,7 +303,7 @@ class LibvirtConnection(object):
         """
         Note that this function takes an instance ID, not an Instance, so
         that it can be called by monitor.
-        
+
         Returns a list of all network interfaces for this instance.
         """
         domain = self._conn.lookupByName(instance_id)
@@ -341,7 +346,7 @@ class LibvirtConnection(object):
         """
         Note that this function takes an instance ID, not an Instance, so
         that it can be called by monitor.
-        """        
+        """
         domain = self._conn.lookupByName(instance_id)
         return domain.blockStats(disk)
 
@@ -350,6 +355,6 @@ class LibvirtConnection(object):
         """
         Note that this function takes an instance ID, not an Instance, so
         that it can be called by monitor.
-        """        
+        """
         domain = self._conn.lookupByName(instance_id)
         return domain.interfaceStats(interface)

From 80b79a923bc6fb331daaf6960e6353c700b89c41 Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Fri, 30 Jul 2010 11:19:03 -0700
Subject: [PATCH 04/36] use the right tag

---
 nova/compute/libvirt.xml.template | 5 ++---
 nova/virt/libvirt_conn.py         | 5 ++---
 2 files changed, 4 insertions(+), 6 deletions(-)

diff --git a/nova/compute/libvirt.xml.template b/nova/compute/libvirt.xml.template
index a17cd8fae923..307f9d03adb5 100644
--- a/nova/compute/libvirt.xml.template
+++ b/nova/compute/libvirt.xml.template
@@ -1,7 +1,7 @@
-<domain type='kvm'>
+<domain type='%(type)s'>
     <name>%(name)s</name>
     <os>
-    <type>%(type)s</type>
+    <type>hvm</type>
         <kernel>%(basepath)s/kernel</kernel>
         <initrd>%(basepath)s/ramdisk</initrd>
         <cmdline>root=/dev/vda1 console=ttyS0</cmdline>
@@ -12,7 +12,6 @@
     <memory>%(memory_kb)s</memory>
     <vcpu>%(vcpus)s</vcpu>
     <devices>
-        <emulator>/usr/bin/kvm</emulator>
         <disk type='file'>
             <source file='%(basepath)s/disk'/>
             <target dev='vda' bus='virtio'/>
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index e37444f63eca..74ab1f8951f9 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -25,7 +25,6 @@ import json
 import logging
 import os.path
 import shutil
-import sys
 
 from twisted.internet import defer
 from twisted.internet import task
@@ -49,8 +48,8 @@ flags.DEFINE_string('libvirt_xml_template',
                     'Libvirt XML Template')
 
 flags.DEFINE_string('libvirt_type',
-                    'hvm',
-                    'Libvirt virtualization type (hvm, qemu, etc)')
+                    'kvm',
+                    'Libvirt domain type (kvm, qemu, etc)')
 
 def get_connection(read_only):
     # These are loaded late so that there's no need to install these

From 1934cbb0413f074213b1aeeda605d9b49055c581 Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Fri, 30 Jul 2010 15:19:41 -0700
Subject: [PATCH 05/36] Fixes access key passing in curl statement.

---
 nova/auth/manager.py      |  4 ++++
 nova/endpoint/images.py   | 18 +++++++++++-------
 nova/virt/images.py       | 14 +++++++++-----
 nova/virt/libvirt_conn.py | 16 ++++++++--------
 4 files changed, 32 insertions(+), 20 deletions(-)

diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index 2da53a7366da..ca9f4fc86caa 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -419,6 +419,10 @@ class AuthManager(object):
                 raise exception.NotAuthorized('Signature does not match')
         return (user, project)
 
+    def get_access_key(self, user, project):
+        """Get an access key that includes user and project"""
+        return "%s:%s" % (User.safe_id(user), Project.safe_id(project))
+
     def is_superuser(self, user):
         """Checks for superuser status, allowing user to bypass rbac
 
diff --git a/nova/endpoint/images.py b/nova/endpoint/images.py
index 32f7cc228609..fe7cb5d113ac 100644
--- a/nova/endpoint/images.py
+++ b/nova/endpoint/images.py
@@ -27,6 +27,7 @@ import urllib
 
 from nova import flags
 from nova import utils
+from nova.auth import manager
 
 
 FLAGS = flags.FLAGS
@@ -75,13 +76,16 @@ def deregister(context, image_id):
             query_args=qs({'image_id': image_id}))
 
 def conn(context):
-    return boto.s3.connection.S3Connection (
-        aws_access_key_id=str('%s:%s' % (context.user.access, context.project.name)),
-        aws_secret_access_key=str(context.user.secret),
-        is_secure=False,
-        calling_format=boto.s3.connection.OrdinaryCallingFormat(),
-        port=FLAGS.s3_port,
-        host=FLAGS.s3_host)
+    access = manager.AuthManager().get_access_key(context.user,
+                                                  context.project)
+    secret = str(context.user.secret)
+    calling = boto.s3.connection.OrdinaryCallingFormat()
+    return boto.s3.connection.S3Connection(aws_access_key_id=access,
+                                           aws_secret_access_key=secret,
+                                           is_secure=False,
+                                           calling_format=calling,
+                                           port=FLAGS.s3_port,
+                                           host=FLAGS.s3_host)
 
 
 def qs(params):
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 92210e242d34..872eb6d6a91e 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -27,6 +27,7 @@ import time
 from nova import flags
 from nova import process
 from nova.auth import signer
+from nova.auth import manager
 
 FLAGS = flags.FLAGS
 
@@ -34,14 +35,14 @@ flags.DEFINE_bool('use_s3', True,
                   'whether to get images from s3 or use local copy')
 
 
-def fetch(image, path, user):
+def fetch(image, path, user, project):
     if FLAGS.use_s3:
         f = _fetch_s3_image
     else:
         f = _fetch_local_image
-    return f(image, path, user)
+    return f(image, path, user, project)
 
-def _fetch_s3_image(image, path, user):
+def _fetch_s3_image(image, path, user, project):
     url = _image_url('%s/image' % image)
 
     # This should probably move somewhere else, like e.g. a download_as
@@ -51,8 +52,11 @@ def _fetch_s3_image(image, path, user):
     headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
 
     uri = '/' + url.partition('/')[2]
-    auth = signer.Signer(user.secret.encode()).s3_authorization(headers, 'GET', uri)
-    headers['Authorization'] = 'AWS %s:%s' % (user.access, auth)
+    access = manager.AuthManager().get_access_key(user, project)
+    signature = signer.Signer(user.secret.encode()).s3_authorization(headers,
+                                                                     'GET',
+                                                                     uri)
+    headers['Authorization'] = 'AWS %s:%s' % (access, signature)
 
     cmd = ['/usr/bin/curl', '--silent', url]
     for (k,v) in headers.iteritems():
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index c545e4190de8..b3d514add71d 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -25,7 +25,6 @@ import json
 import logging
 import os.path
 import shutil
-import sys
 
 from twisted.internet import defer
 from twisted.internet import task
@@ -187,12 +186,13 @@ class LibvirtConnection(object):
         f.close()
 
         user = manager.AuthManager().get_user(data['user_id'])
+        project = manager.AuthManager().get_project(data['project_id'])
         if not os.path.exists(basepath('disk')):
-           yield images.fetch(data['image_id'], basepath('disk-raw'), user)
+           yield images.fetch(data['image_id'], basepath('disk-raw'), user, project)
         if not os.path.exists(basepath('kernel')):
-           yield images.fetch(data['kernel_id'], basepath('kernel'), user)
+           yield images.fetch(data['kernel_id'], basepath('kernel'), user, project)
         if not os.path.exists(basepath('ramdisk')):
-           yield images.fetch(data['ramdisk_id'], basepath('ramdisk'), user)
+           yield images.fetch(data['ramdisk_id'], basepath('ramdisk'), user, project)
 
         execute = lambda cmd, input=None: \
                   process.simple_execute(cmd=cmd,
@@ -255,7 +255,7 @@ class LibvirtConnection(object):
         """
         Note that this function takes an instance ID, not an Instance, so
         that it can be called by monitor.
-        
+
         Returns a list of all block devices for this domain.
         """
         domain = self._conn.lookupByName(instance_id)
@@ -298,7 +298,7 @@ class LibvirtConnection(object):
         """
         Note that this function takes an instance ID, not an Instance, so
         that it can be called by monitor.
-        
+
         Returns a list of all network interfaces for this instance.
         """
         domain = self._conn.lookupByName(instance_id)
@@ -341,7 +341,7 @@ class LibvirtConnection(object):
         """
         Note that this function takes an instance ID, not an Instance, so
         that it can be called by monitor.
-        """        
+        """
         domain = self._conn.lookupByName(instance_id)
         return domain.blockStats(disk)
 
@@ -350,6 +350,6 @@ class LibvirtConnection(object):
         """
         Note that this function takes an instance ID, not an Instance, so
         that it can be called by monitor.
-        """        
+        """
         domain = self._conn.lookupByName(instance_id)
         return domain.interfaceStats(interface)

From 490a97783b97c5753692099c4d7f609e29a8f74e Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Fri, 30 Jul 2010 15:36:11 -0700
Subject: [PATCH 06/36] use user.access instead of user.id

---
 nova/auth/manager.py | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index ca9f4fc86caa..bf3a3556dd64 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -421,7 +421,9 @@ class AuthManager(object):
 
     def get_access_key(self, user, project):
         """Get an access key that includes user and project"""
-        return "%s:%s" % (User.safe_id(user), Project.safe_id(project))
+        if not isinstance(user, User):
+            user = self.get_user(user)
+        return "%s:%s" % (user.access, Project.safe_id(project))
 
     def is_superuser(self, user):
         """Checks for superuser status, allowing user to bypass rbac

From 04d6595d9b4c77f1fcaf01a7763caf11046ab164 Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Fri, 30 Jul 2010 16:15:09 -0700
Subject: [PATCH 07/36] another try on fix boto

---
 nova/auth/signer.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/nova/auth/signer.py b/nova/auth/signer.py
index 3b9bc8f2c823..634f22f0d783 100644
--- a/nova/auth/signer.py
+++ b/nova/auth/signer.py
@@ -48,7 +48,8 @@ import hashlib
 import hmac
 import logging
 import urllib
-import boto
+import boto       # NOTE(vish): for new boto
+import boto.utils # NOTE(vish): for old boto
 
 from nova.exception import Error
 

From ed76ee9e823071c1c94db10907cc6a2bd725a999 Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Fri, 30 Jul 2010 18:32:12 -0700
Subject: [PATCH 08/36] Fixes nova volumes.  The async commands yield properly.
  Simplified the call to create volume in cloud.  Added some notes

---
 nova/endpoint/cloud.py | 11 +++++------
 nova/volume/service.py | 43 +++++++++++++++++++++++-------------------
 2 files changed, 29 insertions(+), 25 deletions(-)

diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py
index 67fc04502eed..0ee278f84065 100644
--- a/nova/endpoint/cloud.py
+++ b/nova/endpoint/cloud.py
@@ -295,17 +295,16 @@ class CloudController(object):
         return v
 
     @rbac.allow('projectmanager', 'sysadmin')
+    @defer.inlineCallbacks
     def create_volume(self, context, size, **kwargs):
         # TODO(vish): refactor this to create the volume object here and tell service to create it
-        res = rpc.call(FLAGS.volume_topic, {"method": "create_volume",
+        result = yield rpc.call(FLAGS.volume_topic, {"method": "create_volume",
                                  "args" : {"size": size,
                                            "user_id": context.user.id,
                                            "project_id": context.project.id}})
-        def _format_result(result):
-            volume = self._get_volume(context, result['result'])
-            return {'volumeSet': [self.format_volume(context, volume)]}
-        res.addCallback(_format_result)
-        return res
+        # NOTE(vish): rpc returned value is in the result key in the dictionary
+        volume = self._get_volume(context, result['result'])
+        defer.returnValue({'volumeSet': [self.format_volume(context, volume)]})
 
     def _get_address(self, context, public_ip):
         # FIXME(vish) this should move into network.py
diff --git a/nova/volume/service.py b/nova/volume/service.py
index 87a47f40abe7..54496ad8dd1b 100644
--- a/nova/volume/service.py
+++ b/nova/volume/service.py
@@ -104,6 +104,7 @@ class VolumeService(service.Service):
                 pass
 
     @validate.rangetest(size=(0, 1000))
+    @defer.inlineCallbacks
     def create_volume(self, size, user_id, project_id):
         """
         Creates an exported volume (fake or real),
@@ -111,11 +112,12 @@ class VolumeService(service.Service):
         Volume at this point has size, owner, and zone.
         """
         logging.debug("Creating volume of size: %s" % (size))
-        vol = self.volume_class.create(size, user_id, project_id)
+        vol = yield self.volume_class.create(size, user_id, project_id)
         datastore.Redis.instance().sadd('volumes', vol['volume_id'])
         datastore.Redis.instance().sadd('volumes:%s' % (FLAGS.storage_name), vol['volume_id'])
-        self._restart_exports()
-        return vol['volume_id']
+        logging.debug("restarting exports")
+        yield self._restart_exports()
+        defer.returnValue(vol['volume_id'])
 
     def by_node(self, node_id):
         """ returns a list of volumes for a node """
@@ -128,6 +130,7 @@ class VolumeService(service.Service):
         for volume_id in datastore.Redis.instance().smembers('volumes'):
             yield self.volume_class(volume_id=volume_id)
 
+    @defer.inlineCallbacks
     def delete_volume(self, volume_id):
         logging.debug("Deleting volume with id of: %s" % (volume_id))
         vol = get_volume(volume_id)
@@ -135,19 +138,18 @@ class VolumeService(service.Service):
             raise exception.Error("Volume is still attached")
         if vol['node_name'] != FLAGS.storage_name:
             raise exception.Error("Volume is not local to this node")
-        vol.destroy()
+        yield vol.destroy()
         datastore.Redis.instance().srem('volumes', vol['volume_id'])
         datastore.Redis.instance().srem('volumes:%s' % (FLAGS.storage_name), vol['volume_id'])
-        return True
+        defer.returnValue(True)
 
     @defer.inlineCallbacks
     def _restart_exports(self):
         if FLAGS.fake_storage:
             return
-        yield process.simple_execute(
-                "sudo vblade-persist auto all")
-        yield process.simple_execute(
-                "sudo vblade-persist start all")
+        yield process.simple_execute("sudo vblade-persist auto all")
+        # NOTE(vish): this command sometimes sends output to stderr for warnings
+        yield process.simple_execute("sudo vblade-persist start all", error_ok=1)
 
     @defer.inlineCallbacks
     def _init_volume_group(self):
@@ -173,6 +175,7 @@ class Volume(datastore.BasicModel):
         return {"volume_id": self.volume_id}
 
     @classmethod
+    @defer.inlineCallbacks
     def create(cls, size, user_id, project_id):
         volume_id = utils.generate_uid('vol')
         vol = cls(volume_id)
@@ -188,13 +191,12 @@ class Volume(datastore.BasicModel):
         vol['attach_status'] = "detached"  # attaching | attached | detaching | detached
         vol['delete_on_termination'] = 'False'
         vol.save()
-        vol.create_lv()
-        vol._setup_export()
+        yield vol._create_lv()
+        yield vol._setup_export()
         # TODO(joshua) - We need to trigger a fanout message for aoe-discover on all the nodes
-        # TODO(joshua
         vol['status'] = "available"
         vol.save()
-        return vol
+        defer.returnValue(vol)
 
     def start_attach(self, instance_id, mountpoint):
         """ """
@@ -223,16 +225,18 @@ class Volume(datastore.BasicModel):
         self['attach_status'] = "detached"
         self.save()
 
+    @defer.inlineCallbacks
     def destroy(self):
         try:
-            self._remove_export()
-        except:
+            yield self._remove_export()
+        except Exception as ex:
+            logging.debug("Ingnoring failure to remove export %s" % ex)
             pass
-        self._delete_lv()
+        yield self._delete_lv()
         super(Volume, self).destroy()
 
     @defer.inlineCallbacks
-    def create_lv(self):
+    def _create_lv(self):
         if str(self['size']) == '0':
             sizestr = '100M'
         else:
@@ -248,13 +252,14 @@ class Volume(datastore.BasicModel):
                 "sudo lvremove -f %s/%s" % (FLAGS.volume_group,
                                             self['volume_id']))
 
+    @defer.inlineCallbacks
     def _setup_export(self):
         (shelf_id, blade_id) = get_next_aoe_numbers()
         self['aoe_device'] = "e%s.%s" % (shelf_id, blade_id)
         self['shelf_id'] = shelf_id
         self['blade_id'] = blade_id
         self.save()
-        self._exec_export()
+        yield self._exec_export()
 
     @defer.inlineCallbacks
     def _exec_export(self):
@@ -277,7 +282,7 @@ class Volume(datastore.BasicModel):
 
 
 class FakeVolume(Volume):
-    def create_lv(self):
+    def _create_lv(self):
         pass
 
     def _exec_export(self):

From a3cc377f9dbe57195ef5f49f3f02a2178dc50cb1 Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Fri, 30 Jul 2010 19:33:07 -0700
Subject: [PATCH 09/36] Fix Tests

---
 nova/tests/volume_unittest.py | 20 ++++++++++----------
 nova/volume/service.py        |  2 +-
 2 files changed, 11 insertions(+), 11 deletions(-)

diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py
index b536ac383eea..0f4f0e34d268 100644
--- a/nova/tests/volume_unittest.py
+++ b/nova/tests/volume_unittest.py
@@ -42,15 +42,14 @@ class VolumeTestCase(test.TrialTestCase):
         vol_size = '0'
         user_id = 'fake'
         project_id = 'fake'
-        volume_id = self.volume.create_volume(vol_size, user_id, project_id)
+        volume_id = yield self.volume.create_volume(vol_size, user_id, project_id)
         # TODO(termie): get_volume returns differently than create_volume
         self.assertEqual(volume_id,
                          volume_service.get_volume(volume_id)['volume_id'])
 
         rv = self.volume.delete_volume(volume_id)
-        self.assertRaises(exception.Error,
-                          volume_service.get_volume,
-                          volume_id)
+        self.assertFailure(volume_service.get_volume(volume_id),
+                           exception.Error)
 
     def test_too_big_volume(self):
         vol_size = '1001'
@@ -68,13 +67,14 @@ class VolumeTestCase(test.TrialTestCase):
         total_slots = FLAGS.slots_per_shelf * num_shelves
         vols = []
         for i in xrange(total_slots):
-            vid = self.volume.create_volume(vol_size, user_id, project_id)
+            vid = yield self.volume.create_volume(vol_size, user_id, project_id)
             vols.append(vid)
-        self.assertRaises(volume_service.NoMoreVolumes,
-                          self.volume.create_volume,
-                          vol_size, user_id, project_id)
+        self.assertFailure(self.volume.create_volume(vol_size,
+                                                     user_id,
+                                                     project_id),
+                           volume_service.NoMoreVolumes)
         for id in vols:
-            self.volume.delete_volume(id)
+            yield self.volume.delete_volume(id)
 
     def test_run_attach_detach_volume(self):
         # Create one volume and one compute to test with
@@ -83,7 +83,7 @@ class VolumeTestCase(test.TrialTestCase):
         user_id = "fake"
         project_id = 'fake'
         mountpoint = "/dev/sdf"
-        volume_id = self.volume.create_volume(vol_size, user_id, project_id)
+        volume_id = yield self.volume.create_volume(vol_size, user_id, project_id)
 
         volume_obj = volume_service.get_volume(volume_id)
         volume_obj.start_attach(instance_id, mountpoint)
diff --git a/nova/volume/service.py b/nova/volume/service.py
index 54496ad8dd1b..e12f675a772c 100644
--- a/nova/volume/service.py
+++ b/nova/volume/service.py
@@ -103,8 +103,8 @@ class VolumeService(service.Service):
             except Exception, err:
                 pass
 
-    @validate.rangetest(size=(0, 1000))
     @defer.inlineCallbacks
+    @validate.rangetest(size=(0, 1000))
     def create_volume(self, size, user_id, project_id):
         """
         Creates an exported volume (fake or real),

From 83c4a429d29b7d69128d90504f6febc2efe1d3a3 Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Tue, 3 Aug 2010 01:29:31 -0700
Subject: [PATCH 10/36] Fixed instance model associations to host (node) and
 added association to ip

---
 nova/compute/model.py        |  44 ++++++------
 nova/datastore.py            |  12 ++--
 nova/tests/model_unittest.py | 135 +++++++++++++++++------------------
 3 files changed, 95 insertions(+), 96 deletions(-)

diff --git a/nova/compute/model.py b/nova/compute/model.py
index 212830d3c589..7dd130ca381f 100644
--- a/nova/compute/model.py
+++ b/nova/compute/model.py
@@ -41,9 +41,6 @@ True
 """
 
 import datetime
-import logging
-import time
-import redis
 import uuid
 
 from nova import datastore
@@ -72,19 +69,22 @@ class InstanceDirectory(object):
         for instance_id in datastore.Redis.instance().smembers('project:%s:instances' % project):
             yield Instance(instance_id)
 
-    def by_node(self, node_id):
+    @datastore.absorb_connection_error
+    def by_node(self, node):
         """returns a list of instances for a node"""
+        for instance_id in datastore.Redis.instance().smembers('node:%s:instances' % node):
+            yield Instance(instance_id)
 
-        for instance in self.all:
-            if instance['node_name'] == node_id:
-                yield instance
-
-    def by_ip(self, ip_address):
+    def by_ip(self, ip):
         """returns an instance object that is using the IP"""
-        for instance in self.all:
-            if instance['private_dns_name'] == ip_address:
-                return instance
-        return None
+        # NOTE(vish): The ip association should be just a single value, but
+        #             to maintain consistency it is using the standard
+        #             association and the ugly method for retrieving
+        #             the first item in the set below.
+        result = datastore.Redis.instance().smembers('ip:%s:instances' % ip)
+        if not result:
+            return None
+        return Instance(list(result)[0])
 
     def by_volume(self, volume_id):
         """returns the instance a volume is attached to"""
@@ -122,7 +122,8 @@ class Instance(datastore.BasicModel):
                 'instance_id': self.instance_id,
                 'node_name': 'unassigned',
                 'project_id': 'unassigned',
-                'user_id': 'unassigned'}
+                'user_id': 'unassigned',
+                'private_dns_name': 'unassigned'}
 
     @property
     def identifier(self):
@@ -148,19 +149,22 @@ class Instance(datastore.BasicModel):
         """Call into superclass to save object, then save associations"""
         # NOTE(todd): doesn't track migration between projects/nodes,
         #             it just adds the first one
-        should_update_project = self.is_new_record()
-        should_update_node = self.is_new_record()
+        is_new = self.is_new_record()
+        node_set = (self.state['node_name'] != 'unassigned' and
+                    self.initial_state['node_name'] == 'unassigned')
         success = super(Instance, self).save()
-        if success and should_update_project:
+        if success and is_new:
             self.associate_with("project", self.project)
-        if success and should_update_node:
-            self.associate_with("node", self['node_name'])
+            self.associate_with("ip", self.state['private_dns_name'])
+        if success and node_set:
+            self.associate_with("node", self.state['node_name'])
         return True
 
     def destroy(self):
         """Destroy associations, then destroy the object"""
         self.unassociate_with("project", self.project)
-        self.unassociate_with("node", self['node_name'])
+        self.unassociate_with("node", self.state['node_name'])
+        self.unassociate_with("ip", self.state['private_dns_name'])
         return super(Instance, self).destroy()
 
 class Host(datastore.BasicModel):
diff --git a/nova/datastore.py b/nova/datastore.py
index 9c259233470d..51ef7a758197 100644
--- a/nova/datastore.py
+++ b/nova/datastore.py
@@ -90,13 +90,15 @@ class BasicModel(object):
 
     @absorb_connection_error
     def __init__(self):
-        self.initial_state = {}
-        self.state = Redis.instance().hgetall(self.__redis_key)
-        if self.state:
-            self.initial_state = self.state
+        state = Redis.instance().hgetall(self.__redis_key)
+        if state:
+            self.initial_state = state
+            self.state = dict(self.initial_state)
         else:
+            self.initial_state = {}
             self.state = self.default_state()
 
+
     def default_state(self):
         """You probably want to define this in your subclass"""
         return {}
@@ -239,7 +241,7 @@ class BasicModel(object):
         for key, val in self.state.iteritems():
             Redis.instance().hset(self.__redis_key, key, val)
         self.add_to_index()
-        self.initial_state = self.state
+        self.initial_state = dict(self.state)
         return True
 
     @absorb_connection_error
diff --git a/nova/tests/model_unittest.py b/nova/tests/model_unittest.py
index 6825cfe2a3cc..dc2441c24c6d 100644
--- a/nova/tests/model_unittest.py
+++ b/nova/tests/model_unittest.py
@@ -19,9 +19,7 @@
 from datetime import datetime, timedelta
 import logging
 import time
-from twisted.internet import defer
 
-from nova import exception
 from nova import flags
 from nova import test
 from nova import utils
@@ -49,9 +47,9 @@ class ModelTestCase(test.TrialTestCase):
         inst['user_id'] = 'fake'
         inst['project_id'] = 'fake'
         inst['instance_type'] = 'm1.tiny'
-        inst['node_name'] = FLAGS.node_name
         inst['mac_address'] = utils.generate_mac()
         inst['ami_launch_index'] = 0
+        inst['private_dns_name'] = '10.0.0.1'
         inst.save()
         return inst
 
@@ -71,118 +69,126 @@ class ModelTestCase(test.TrialTestCase):
         session_token.save()
         return session_token
 
-    @defer.inlineCallbacks
     def test_create_instance(self):
         """store with create_instace, then test that a load finds it"""
-        instance = yield self.create_instance()
-        old = yield model.Instance(instance.identifier)
+        instance = self.create_instance()
+        old = model.Instance(instance.identifier)
         self.assertFalse(old.is_new_record())
 
-    @defer.inlineCallbacks
     def test_delete_instance(self):
         """create, then destroy, then make sure loads a new record"""
-        instance = yield self.create_instance()
-        yield instance.destroy()
-        newinst = yield model.Instance('i-test')
+        instance = self.create_instance()
+        instance.destroy()
+        newinst = model.Instance('i-test')
         self.assertTrue(newinst.is_new_record())
 
-    @defer.inlineCallbacks
     def test_instance_added_to_set(self):
-        """create, then check that it is listed for the project"""
-        instance = yield self.create_instance()
+        """create, then check that it is listed in global set"""
+        instance = self.create_instance()
         found = False
         for x in model.InstanceDirectory().all:
             if x.identifier == 'i-test':
                 found = True
         self.assert_(found)
 
-    @defer.inlineCallbacks
     def test_instance_associates_project(self):
         """create, then check that it is listed for the project"""
-        instance = yield self.create_instance()
+        instance = self.create_instance()
         found = False
         for x in model.InstanceDirectory().by_project(instance.project):
             if x.identifier == 'i-test':
                 found = True
         self.assert_(found)
 
-    @defer.inlineCallbacks
+    def test_instance_associates_ip(self):
+        """create, then check that it is listed for the ip"""
+        instance = self.create_instance()
+        found = False
+        x = model.InstanceDirectory().by_ip(instance['private_dns_name'])
+        self.assertEqual(x.identifier, 'i-test')
+
+    def test_instance_associates_node(self):
+        """create, then check that it is listed for the node_name"""
+        instance = self.create_instance()
+        found = False
+        for x in model.InstanceDirectory().by_node(FLAGS.node_name):
+            if x.identifier == 'i-test':
+                found = True
+        self.assertFalse(found)
+        instance['node_name'] = 'test_node'
+        instance.save()
+        for x in model.InstanceDirectory().by_node('test_node'):
+            if x.identifier == 'i-test':
+                found = True
+        self.assert_(found)
+
+
     def test_host_class_finds_hosts(self):
-        host = yield self.create_host()
+        host = self.create_host()
         self.assertEqual('testhost', model.Host.lookup('testhost').identifier)
 
-    @defer.inlineCallbacks
     def test_host_class_doesnt_find_missing_hosts(self):
-        rv = yield model.Host.lookup('woahnelly')
+        rv = model.Host.lookup('woahnelly')
         self.assertEqual(None, rv)
 
-    @defer.inlineCallbacks
     def test_create_host(self):
         """store with create_host, then test that a load finds it"""
-        host = yield self.create_host()
-        old = yield model.Host(host.identifier)
+        host = self.create_host()
+        old = model.Host(host.identifier)
         self.assertFalse(old.is_new_record())
 
-    @defer.inlineCallbacks
     def test_delete_host(self):
         """create, then destroy, then make sure loads a new record"""
-        instance = yield self.create_host()
-        yield instance.destroy()
-        newinst = yield model.Host('testhost')
+        instance = self.create_host()
+        instance.destroy()
+        newinst = model.Host('testhost')
         self.assertTrue(newinst.is_new_record())
 
-    @defer.inlineCallbacks
     def test_host_added_to_set(self):
         """create, then check that it is included in list"""
-        instance = yield self.create_host()
+        instance = self.create_host()
         found = False
         for x in model.Host.all():
             if x.identifier == 'testhost':
                 found = True
         self.assert_(found)
 
-    @defer.inlineCallbacks
     def test_create_daemon_two_args(self):
         """create a daemon with two arguments"""
-        d = yield self.create_daemon()
+        d = self.create_daemon()
         d = model.Daemon('testhost', 'nova-testdaemon')
         self.assertFalse(d.is_new_record())
 
-    @defer.inlineCallbacks
     def test_create_daemon_single_arg(self):
         """Create a daemon using the combined host:bin format"""
-        d = yield model.Daemon("testhost:nova-testdaemon")
+        d = model.Daemon("testhost:nova-testdaemon")
         d.save()
         d = model.Daemon('testhost:nova-testdaemon')
         self.assertFalse(d.is_new_record())
 
-    @defer.inlineCallbacks
     def test_equality_of_daemon_single_and_double_args(self):
         """Create a daemon using the combined host:bin arg, find with 2"""
-        d = yield model.Daemon("testhost:nova-testdaemon")
+        d = model.Daemon("testhost:nova-testdaemon")
         d.save()
         d = model.Daemon('testhost', 'nova-testdaemon')
         self.assertFalse(d.is_new_record())
 
-    @defer.inlineCallbacks
     def test_equality_daemon_of_double_and_single_args(self):
         """Create a daemon using the combined host:bin arg, find with 2"""
-        d = yield self.create_daemon()
+        d = self.create_daemon()
         d = model.Daemon('testhost:nova-testdaemon')
         self.assertFalse(d.is_new_record())
 
-    @defer.inlineCallbacks
     def test_delete_daemon(self):
         """create, then destroy, then make sure loads a new record"""
-        instance = yield self.create_daemon()
-        yield instance.destroy()
-        newinst = yield model.Daemon('testhost', 'nova-testdaemon')
+        instance = self.create_daemon()
+        instance.destroy()
+        newinst = model.Daemon('testhost', 'nova-testdaemon')
         self.assertTrue(newinst.is_new_record())
 
-    @defer.inlineCallbacks
     def test_daemon_heartbeat(self):
         """Create a daemon, sleep, heartbeat, check for update"""
-        d = yield self.create_daemon()
+        d = self.create_daemon()
         ts = d['updated_at']
         time.sleep(2)
         d.heartbeat()
@@ -190,70 +196,62 @@ class ModelTestCase(test.TrialTestCase):
         ts2 = d2['updated_at']
         self.assert_(ts2 > ts)
 
-    @defer.inlineCallbacks
     def test_daemon_added_to_set(self):
         """create, then check that it is included in list"""
-        instance = yield self.create_daemon()
+        instance = self.create_daemon()
         found = False
         for x in model.Daemon.all():
             if x.identifier == 'testhost:nova-testdaemon':
                 found = True
         self.assert_(found)
 
-    @defer.inlineCallbacks
     def test_daemon_associates_host(self):
         """create, then check that it is listed for the host"""
-        instance = yield self.create_daemon()
+        instance = self.create_daemon()
         found = False
         for x in model.Daemon.by_host('testhost'):
             if x.identifier == 'testhost:nova-testdaemon':
                 found = True
         self.assertTrue(found)
 
-    @defer.inlineCallbacks
     def test_create_session_token(self):
         """create"""
-        d = yield self.create_session_token()
+        d = self.create_session_token()
         d = model.SessionToken(d.token)
         self.assertFalse(d.is_new_record())
 
-    @defer.inlineCallbacks
     def test_delete_session_token(self):
         """create, then destroy, then make sure loads a new record"""
-        instance = yield self.create_session_token()
-        yield instance.destroy()
-        newinst = yield model.SessionToken(instance.token)
+        instance = self.create_session_token()
+        instance.destroy()
+        newinst = model.SessionToken(instance.token)
         self.assertTrue(newinst.is_new_record())
 
-    @defer.inlineCallbacks
     def test_session_token_added_to_set(self):
         """create, then check that it is included in list"""
-        instance = yield self.create_session_token()
+        instance = self.create_session_token()
         found = False
         for x in model.SessionToken.all():
             if x.identifier == instance.token:
                 found = True
         self.assert_(found)
 
-    @defer.inlineCallbacks
     def test_session_token_associates_user(self):
         """create, then check that it is listed for the user"""
-        instance = yield self.create_session_token()
+        instance = self.create_session_token()
         found = False
         for x in model.SessionToken.associated_to('user', 'testuser'):
             if x.identifier == instance.identifier:
                 found = True
         self.assertTrue(found)
 
-    @defer.inlineCallbacks
     def test_session_token_generation(self):
-        instance = yield model.SessionToken.generate('username', 'TokenType')
+        instance = model.SessionToken.generate('username', 'TokenType')
         self.assertFalse(instance.is_new_record())
 
-    @defer.inlineCallbacks
     def test_find_generated_session_token(self):
-        instance = yield model.SessionToken.generate('username', 'TokenType')
-        found = yield model.SessionToken.lookup(instance.identifier)
+        instance = model.SessionToken.generate('username', 'TokenType')
+        found = model.SessionToken.lookup(instance.identifier)
         self.assert_(found)
 
     def test_update_session_token_expiry(self):
@@ -264,34 +262,29 @@ class ModelTestCase(test.TrialTestCase):
         expiry = utils.parse_isotime(instance['expiry'])
         self.assert_(expiry > datetime.utcnow())
 
-    @defer.inlineCallbacks
     def test_session_token_lookup_when_expired(self):
-        instance = yield model.SessionToken.generate("testuser")
+        instance = model.SessionToken.generate("testuser")
         instance['expiry'] = datetime.utcnow().strftime(utils.TIME_FORMAT)
         instance.save()
         inst = model.SessionToken.lookup(instance.identifier)
         self.assertFalse(inst)
 
-    @defer.inlineCallbacks
     def test_session_token_lookup_when_not_expired(self):
-        instance = yield model.SessionToken.generate("testuser")
+        instance = model.SessionToken.generate("testuser")
         inst = model.SessionToken.lookup(instance.identifier)
         self.assert_(inst)
 
-    @defer.inlineCallbacks
     def test_session_token_is_expired_when_expired(self):
-        instance = yield model.SessionToken.generate("testuser")
+        instance = model.SessionToken.generate("testuser")
         instance['expiry'] = datetime.utcnow().strftime(utils.TIME_FORMAT)
         self.assert_(instance.is_expired())
 
-    @defer.inlineCallbacks
     def test_session_token_is_expired_when_not_expired(self):
-        instance = yield model.SessionToken.generate("testuser")
+        instance = model.SessionToken.generate("testuser")
         self.assertFalse(instance.is_expired())
 
-    @defer.inlineCallbacks
     def test_session_token_ttl(self):
-        instance = yield model.SessionToken.generate("testuser")
+        instance = model.SessionToken.generate("testuser")
         now = datetime.utcnow()
         delta = timedelta(hours=1)
         instance['expiry'] = (now + delta).strftime(utils.TIME_FORMAT)

From ecf8608a84960496c6c8e350f99d53537e4581c8 Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Tue, 3 Aug 2010 14:31:47 -0700
Subject: [PATCH 11/36] Huge network refactor, Round I

Made network into its own binary
Made simple network a plugabble class
Fixed unittests
Moved various classes around
Moved mac generation into network class
---
 bin/nova-dhcpbridge                           |  22 +--
 bin/nova-network                              |  11 +-
 nova/compute/service.py                       |   6 +-
 nova/endpoint/cloud.py                        | 129 +++++++++--------
 nova/{compute => network}/exception.py        |   2 +-
 nova/{compute => network}/linux_net.py        |   0
 nova/{compute/network.py => network/model.py} | 118 +++++----------
 nova/network/service.py                       | 137 +++++++++++++++++-
 nova/tests/network_unittest.py                |  99 +++++++------
 9 files changed, 317 insertions(+), 207 deletions(-)
 rename nova/{compute => network}/exception.py (94%)
 rename nova/{compute => network}/linux_net.py (100%)
 rename nova/{compute/network.py => network/model.py} (83%)

diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index 0db241b5e4e7..b3e7d456aebb 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -35,32 +35,34 @@ sys.path.append(os.path.abspath(os.path.join(__file__, "../../")))
 from nova import flags
 from nova import rpc
 from nova import utils
-from nova.compute import linux_net
-from nova.compute import network
-
+from nova.network import linux_net
+from nova.network import model
+from nova.network import service
 
 FLAGS = flags.FLAGS
 
 
 def add_lease(mac, ip, hostname, interface):
     if FLAGS.fake_rabbit:
-        network.lease_ip(ip)
+        service.VlanNetworkService().lease_ip(ip)
     else:
-        rpc.cast(FLAGS.cloud_topic, {"method": "lease_ip",
-                "args" : {"address": ip}})
+        rpc.cast("%s.%s" (FLAGS.network_topic, FLAGS.node_name),
+                 {"method": "lease_ip",
+                  "args" : {"fixed_ip": ip}})
 
 def old_lease(mac, ip, hostname, interface):
     logging.debug("Adopted old lease or got a change of mac/hostname")
 
 def del_lease(mac, ip, hostname, interface):
     if FLAGS.fake_rabbit:
-        network.release_ip(ip)
+        service.VlanNetworkService().release_ip(ip)
     else:
-        rpc.cast(FLAGS.cloud_topic, {"method": "release_ip",
-                "args" : {"address": ip}})
+        rpc.cast("%s.%s" (FLAGS.network_topic, FLAGS.node_name),
+                 {"method": "release_ip",
+                  "args" : {"fixed_ip": ip}})
 
 def init_leases(interface):
-    net = network.get_network_by_interface(interface)
+    net = model.get_network_by_interface(interface)
     res = ""
     for host_name in net.hosts:
         res += "%s\n" % linux_net.hostDHCP(net, host_name, net.hosts[host_name])
diff --git a/bin/nova-network b/bin/nova-network
index 52d6cb70a590..b2e2cf173f5a 100755
--- a/bin/nova-network
+++ b/bin/nova-network
@@ -21,12 +21,19 @@
   Twistd daemon for the nova network nodes.
 """
 
+from nova import flags
 from nova import twistd
-from nova.network import service
+from nova import utils
 
 
+FLAGS = flags.FLAGS
+
+flags.DEFINE_string('network_service',
+                    'nova.network.service.VlanNetworkService',
+                    'Service Class for Networking')
+
 if __name__ == '__main__':
     twistd.serve(__file__)
 
 if __name__ == '__builtin__':
-    application = service.NetworkService.create()
+    application = utils.import_class(FLAGS.network_service).create()
diff --git a/nova/compute/service.py b/nova/compute/service.py
index 9b162edc7e5b..a22240b052e4 100644
--- a/nova/compute/service.py
+++ b/nova/compute/service.py
@@ -39,9 +39,9 @@ from nova import service
 from nova import utils
 from nova.compute import disk
 from nova.compute import model
-from nova.compute import network
 from nova.compute import power_state
 from nova.compute.instance_types import INSTANCE_TYPES
+from nova.network import model as net_model
 from nova.objectstore import image # for image_path flag
 from nova.virt import connection as virt_connection
 from nova.volume import service as volume_service
@@ -117,10 +117,10 @@ class ComputeService(service.Service):
         """ launch a new instance with specified options """
         logging.debug("Starting instance %s..." % (instance_id))
         inst = self.instdir.get(instance_id)
-        if not FLAGS.simple_network:
+        if inst.get('network_type', 'dhcp') == 'dhcp':
             # TODO: Get the real security group of launch in here
             security_group = "default"
-            net = network.BridgedNetwork.get_network_for_project(inst['user_id'],
+            net = net_model.BridgedNetwork.get_network_for_project(inst['user_id'],
                                                              inst['project_id'],
                                             security_group).express()
         inst['node_name'] = FLAGS.node_name
diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py
index 67fc04502eed..f605eec2c6a4 100644
--- a/nova/endpoint/cloud.py
+++ b/nova/endpoint/cloud.py
@@ -36,11 +36,9 @@ from nova import utils
 from nova.auth import rbac
 from nova.auth import manager
 from nova.compute import model
-from nova.compute import network
 from nova.compute.instance_types import INSTANCE_TYPES
-from nova.compute import service as compute_service
 from nova.endpoint import images
-from nova.volume import service as volume_service
+from nova.volume import service
 
 
 FLAGS = flags.FLAGS
@@ -64,7 +62,6 @@ class CloudController(object):
 """
     def __init__(self):
         self.instdir = model.InstanceDirectory()
-        self.network = network.PublicNetworkController()
         self.setup()
 
     @property
@@ -76,7 +73,7 @@ class CloudController(object):
     def volumes(self):
         """ returns a list of all volumes """
         for volume_id in datastore.Redis.instance().smembers("volumes"):
-            volume = volume_service.get_volume(volume_id)
+            volume = service.get_volume(volume_id)
             yield volume
 
     def __str__(self):
@@ -222,7 +219,7 @@ class CloudController(object):
                 callback=_complete)
             return d
 
-        except users.UserError, e:
+        except manager.UserError as e:
             raise
 
     @rbac.allow('all')
@@ -331,7 +328,7 @@ class CloudController(object):
         raise exception.NotFound('Instance %s could not be found' % instance_id)
 
     def _get_volume(self, context, volume_id):
-        volume = volume_service.get_volume(volume_id)
+        volume = service.get_volume(volume_id)
         if context.user.is_admin() or volume['project_id'] == context.project.id:
             return volume
         raise exception.NotFound('Volume %s could not be found' % volume_id)
@@ -472,29 +469,34 @@ class CloudController(object):
 
     @rbac.allow('netadmin')
     def allocate_address(self, context, **kwargs):
-        address = self.network.allocate_ip(
-                                context.user.id, context.project.id, 'public')
-        return defer.succeed({'addressSet': [{'publicIp' : address}]})
+        alloc_result = rpc.call(self._get_network_host(context),
+                         {"method": "allocate_elastic_ip"})
+        public_ip = alloc_result['result']
+        return defer.succeed({'addressSet': [{'publicIp' : public_ip}]})
 
     @rbac.allow('netadmin')
     def release_address(self, context, public_ip, **kwargs):
-        self.network.deallocate_ip(public_ip)
+        # NOTE(vish): Should we make sure this works?
+        rpc.cast(self._get_network_host(context),
+                         {"method": "deallocate_elastic_ip",
+                          "args": {"elastic_ip": public_ip}})
         return defer.succeed({'releaseResponse': ["Address released."]})
 
     @rbac.allow('netadmin')
-    def associate_address(self, context, instance_id, **kwargs):
+    def associate_address(self, context, instance_id, public_ip, **kwargs):
         instance = self._get_instance(context, instance_id)
-        self.network.associate_address(
-                            kwargs['public_ip'],
-                            instance['private_dns_name'],
-                            instance_id)
+        address = self._get_address(context, public_ip)
+        rpc.cast(self._get_network_host(context),
+                         {"method": "associate_elastic_ip",
+                          "args": {"elastic_ip": address['public_ip'],
+                                   "fixed_ip": instance['private_dns_name'],
+                                   "instance_id": instance['instance_id']}})
         return defer.succeed({'associateResponse': ["Address associated."]})
 
     @rbac.allow('netadmin')
     def disassociate_address(self, context, public_ip, **kwargs):
         address = self._get_address(context, public_ip)
         self.network.disassociate_address(public_ip)
-        # TODO - Strip the IP from the instance
         return defer.succeed({'disassociateResponse': ["Address disassociated."]})
 
     def release_ip(self, context, private_ip, **kwargs):
@@ -505,7 +507,13 @@ class CloudController(object):
         self.network.lease_ip(private_ip)
         return defer.succeed({'leaseResponse': ["Address leased."]})
 
+    def get_network_host(self, context):
+        # FIXME(vish): this is temporary until we store net hosts for project
+        import socket
+        return socket.gethostname()
+
     @rbac.allow('projectmanager', 'sysadmin')
+    @defer.inlineCallbacks
     def run_instances(self, context, **kwargs):
         # make sure user can access the image
         # vpn image is private so it doesn't show up on lists
@@ -539,14 +547,25 @@ class CloudController(object):
             key_data = key_pair.public_key
         # TODO: Get the real security group of launch in here
         security_group = "default"
-        if FLAGS.simple_network:
-            bridge_name = FLAGS.simple_network_bridge
-        else:
-            net = network.BridgedNetwork.get_network_for_project(
-                    context.user.id, context.project.id, security_group)
-            bridge_name = net['bridge_name']
+        create_result = yield rpc.call(FLAGS.network_topic,
+                 {"method": "create_network",
+                  "args": {"user_id": context.user.id,
+                           "project_id": context.project.id,
+                           "security_group": security_group}})
+        bridge_name = create_result['result']
+        net_host = self._get_network_host(context)
         for num in range(int(kwargs['max_count'])):
+            vpn = False
+            if image_id  == FLAGS.vpn_image_id:
+                vpn = True
+            allocate_result = yield rpc.call(net_host,
+                     {"method": "allocate_fixed_ip",
+                      "args": {"user_id": context.user.id,
+                               "project_id": context.project.id,
+                               "vpn": vpn}})
             inst = self.instdir.new()
+            inst['mac_address'] = allocate_result['result']['mac_address']
+            inst['private_dns_name'] = allocate_result['result']['ip_address']
             inst['image_id'] = image_id
             inst['kernel_id'] = kernel_id
             inst['ramdisk_id'] = ramdisk_id
@@ -558,24 +577,9 @@ class CloudController(object):
             inst['key_name'] = kwargs.get('key_name', '')
             inst['user_id'] = context.user.id
             inst['project_id'] = context.project.id
-            inst['mac_address'] = utils.generate_mac()
             inst['ami_launch_index'] = num
             inst['bridge_name'] = bridge_name
-            if FLAGS.simple_network:
-                address = network.allocate_simple_ip()
-            else:
-                if inst['image_id'] == FLAGS.vpn_image_id:
-                    address = network.allocate_vpn_ip(
-                            inst['user_id'],
-                            inst['project_id'],
-                            mac=inst['mac_address'])
-                else:
-                    address = network.allocate_ip(
-                            inst['user_id'],
-                            inst['project_id'],
-                            mac=inst['mac_address'])
-            inst['private_dns_name'] = str(address)
-            # TODO: allocate expresses on the router node
+
             inst.save()
             rpc.cast(FLAGS.compute_topic,
                  {"method": "run_instance",
@@ -583,8 +587,7 @@ class CloudController(object):
             logging.debug("Casting to node for %s's instance with IP of %s" %
                       (context.user.name, inst['private_dns_name']))
         # TODO: Make Network figure out the network name from ip.
-        return defer.succeed(self._format_instances(
-                                context, reservation_id))
+        defer.returnValue(self._format_instances(context, reservation_id))
 
     @rbac.allow('projectmanager', 'sysadmin')
     def terminate_instances(self, context, instance_id, **kwargs):
@@ -594,26 +597,34 @@ class CloudController(object):
             try:
                 instance = self._get_instance(context, i)
             except exception.NotFound:
-                logging.warning("Instance %s was not found during terminate" % i)
+                logging.warning("Instance %s was not found during terminate"
+                                % i)
                 continue
-            try:
-                self.network.disassociate_address(
-                    instance.get('public_dns_name', 'bork'))
-            except:
-                pass
-            if instance.get('private_dns_name', None):
-                logging.debug("Deallocating address %s" % instance.get('private_dns_name', None))
-                if FLAGS.simple_network:
-                    network.deallocate_simple_ip(instance.get('private_dns_name', None))
-                else:
-                    try:
-                        self.network.deallocate_ip(instance.get('private_dns_name', None))
-                    except Exception, _err:
-                        pass
-            if instance.get('node_name', 'unassigned') != 'unassigned':  #It's also internal default
+            elastic_ip = instance.get('public_dns_name', None)
+            if elastic_ip:
+                logging.debug("Deallocating address %s" % elastic_ip)
+                # NOTE(vish): Right now we don't really care if the ip is
+                #             disassociated.  We may need to worry about
+                #             checking this later.  Perhaps in the scheduler?
+                rpc.cast(self._get_network_host(context),
+                         {"method": "disassociate_elastic_ip",
+                          "args": {"elastic_ip": elastic_ip}})
+
+            fixed_ip = instance.get('private_dns_name', None)
+            if fixed_ip:
+                logging.debug("Deallocating address %s" % fixed_ip)
+                # NOTE(vish): Right now we don't really care if the ip is
+                #             actually removed.  We may need to worry about
+                #             checking this later.  Perhaps in the scheduler?
+                rpc.cast(self._get_network_host(context),
+                         {"method": "deallocate_fixed_ip",
+                          "args": {"elastic_ip": elastic_ip}})
+
+            if instance.get('node_name', 'unassigned') != 'unassigned':
+                # NOTE(joshua?): It's also internal default
                 rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
-                             {"method": "terminate_instance",
-                              "args" : {"instance_id": i}})
+                         {"method": "terminate_instance",
+                          "args": {"instance_id": i}})
             else:
                 instance.destroy()
         return defer.succeed(True)
diff --git a/nova/compute/exception.py b/nova/network/exception.py
similarity index 94%
rename from nova/compute/exception.py
rename to nova/network/exception.py
index 13e4f0a51670..5722e9672939 100644
--- a/nova/compute/exception.py
+++ b/nova/network/exception.py
@@ -17,7 +17,7 @@
 #    under the License.
 
 """
-Exceptions for Compute Node errors, mostly network addressing.
+Exceptions for network errors.
 """
 
 from nova.exception import Error
diff --git a/nova/compute/linux_net.py b/nova/network/linux_net.py
similarity index 100%
rename from nova/compute/linux_net.py
rename to nova/network/linux_net.py
diff --git a/nova/compute/network.py b/nova/network/model.py
similarity index 83%
rename from nova/compute/network.py
rename to nova/network/model.py
index 62d892e58273..5346549b88bb 100644
--- a/nova/compute/network.py
+++ b/nova/network/model.py
@@ -17,7 +17,7 @@
 #    under the License.
 
 """
-Classes for network control, including VLANs, DHCP, and IP allocation.
+Model Classes for network control, including VLANs, DHCP, and IP allocation.
 """
 
 import IPy
@@ -26,12 +26,12 @@ import os
 import time
 
 from nova import datastore
-from nova import exception
+from nova import exception as nova_exception
 from nova import flags
 from nova import utils
 from nova.auth import manager
-from nova.compute import exception as compute_exception
-from nova.compute import linux_net
+from nova.network import exception
+from nova.network import linux_net
 
 
 FLAGS = flags.FLAGS
@@ -53,26 +53,6 @@ flags.DEFINE_integer('cnt_vpn_clients', 5,
 flags.DEFINE_integer('cloudpipe_start_port', 12000,
                         'Starting port for mapped CloudPipe external ports')
 
-flags.DEFINE_boolean('simple_network', False,
-                       'Use simple networking instead of vlans')
-flags.DEFINE_string('simple_network_bridge', 'br100',
-                       'Bridge for simple network instances')
-flags.DEFINE_list('simple_network_ips', ['192.168.0.2'],
-                       'Available ips for simple network')
-flags.DEFINE_string('simple_network_template',
-                    utils.abspath('compute/interfaces.template'),
-                    'Template file for simple network')
-flags.DEFINE_string('simple_network_netmask', '255.255.255.0',
-                       'Netmask for simple network')
-flags.DEFINE_string('simple_network_network', '192.168.0.0',
-                       'Network for simple network')
-flags.DEFINE_string('simple_network_gateway', '192.168.0.1',
-                       'Broadcast for simple network')
-flags.DEFINE_string('simple_network_broadcast', '192.168.0.255',
-                       'Broadcast for simple network')
-flags.DEFINE_string('simple_network_dns', '8.8.4.4',
-                       'Dns for simple network')
-
 logging.getLogger().setLevel(logging.DEBUG)
 
 
@@ -156,7 +136,6 @@ class Vlan(datastore.BasicModel):
 
 # CLEANUP:
 # TODO(ja): Save the IPs at the top of each subnet for cloudpipe vpn clients
-# TODO(ja): use singleton for usermanager instead of self.manager in vlanpool et al
 # TODO(ja): does vlanpool "keeper" need to know the min/max - shouldn't FLAGS always win?
 # TODO(joshua): Save the IPs at the top of each subnet for cloudpipe vpn clients
 
@@ -241,7 +220,7 @@ class BaseNetwork(datastore.BasicModel):
         for idx in range(self.num_static_ips, len(self.network)-(1 + FLAGS.cnt_vpn_clients)):
             address = str(self.network[idx])
             if not address in self.hosts.keys():
-                yield str(address)
+                yield address
 
     @property
     def num_static_ips(self):
@@ -253,7 +232,7 @@ class BaseNetwork(datastore.BasicModel):
             self._add_host(user_id, project_id, address, mac)
             self.express(address=address)
             return address
-        raise compute_exception.NoMoreAddresses("Project %s with network %s" %
+        raise exception.NoMoreAddresses("Project %s with network %s" %
                                                 (project_id, str(self.network)))
 
     def lease_ip(self, ip_str):
@@ -261,7 +240,7 @@ class BaseNetwork(datastore.BasicModel):
 
     def release_ip(self, ip_str):
         if not ip_str in self.assigned:
-            raise compute_exception.AddressNotAllocated()
+            raise exception.AddressNotAllocated()
         self.deexpress(address=ip_str)
         self._rem_host(ip_str)
 
@@ -349,14 +328,14 @@ class DHCPNetwork(BridgedNetwork):
             logging.debug("Not launching dnsmasq: no hosts.")
         self.express_cloudpipe()
 
-    def allocate_vpn_ip(self, mac):
+    def allocate_vpn_ip(self, user_id, project_id, mac):
         address = str(self.network[2])
-        self._add_host(self['user_id'], self['project_id'], address, mac)
+        self._add_host(user_id, project_id, address, mac)
         self.express(address=address)
         return address
 
     def express_cloudpipe(self):
-        private_ip = self.network[2]
+        private_ip = str(self.network[2])
         linux_net.confirm_rule("FORWARD -d %s -p udp --dport 1194 -j ACCEPT"
                                % (private_ip, ))
         linux_net.confirm_rule("PREROUTING -t nat -d %s -p udp --dport %s -j DNAT --to %s:1194"
@@ -441,14 +420,14 @@ class PublicNetworkController(BaseNetwork):
 
     def associate_address(self, public_ip, private_ip, instance_id):
         if not public_ip in self.assigned:
-            raise compute_exception.AddressNotAllocated()
+            raise exception.AddressNotAllocated()
         # TODO(joshua): Keep an index going both ways
         for addr in self.host_objs:
             if addr.get('private_ip', None) == private_ip:
-                raise compute_exception.AddressAlreadyAssociated()
+                raise exception.AddressAlreadyAssociated()
         addr = self.get_host(public_ip)
         if addr.get('private_ip', 'available') != 'available':
-            raise compute_exception.AddressAlreadyAssociated()
+            raise exception.AddressAlreadyAssociated()
         addr['private_ip'] = private_ip
         addr['instance_id'] = instance_id
         addr.save()
@@ -456,10 +435,10 @@ class PublicNetworkController(BaseNetwork):
 
     def disassociate_address(self, public_ip):
         if not public_ip in self.assigned:
-            raise compute_exception.AddressNotAllocated()
+            raise exception.AddressNotAllocated()
         addr = self.get_host(public_ip)
         if addr.get('private_ip', 'available') == 'available':
-            raise compute_exception.AddressNotAssociated()
+            raise exception.AddressNotAssociated()
         self.deexpress(address=public_ip)
         addr['private_ip'] = 'available'
         addr['instance_id'] = 'available'
@@ -535,63 +514,36 @@ def get_vlan_for_project(project_id):
                 return vlan
             else:
                 return Vlan.create(project_id, vnum)
-    raise compute_exception.AddressNotAllocated("Out of VLANs")
+    raise exception.AddressNotAllocated("Out of VLANs")
+
+def get_project_network(project_id, security_group='default'):
+    """ get a project's private network, allocating one if needed """
+    project = manager.AuthManager().get_project(project_id)
+    if not project:
+        raise nova_exception.NotFound("Project %s doesn't exist." % project_id)
+    manager_id = project.project_manager_id
+    return DHCPNetwork.get_network_for_project(manager_id,
+                                               project.id,
+                                               security_group)
 
-def get_network_by_interface(iface, security_group='default'):
-    vlan = iface.rpartition("br")[2]
-    return get_project_network(Vlan.dict_by_vlan().get(vlan), security_group)
 
 def get_network_by_address(address):
+    # TODO(vish): This is completely the wrong way to do this, but
+    #             I'm getting the network binary working before I
+    #             tackle doing this the right way.
     logging.debug("Get Network By Address: %s" % address)
     for project in manager.AuthManager().get_projects():
         net = get_project_network(project.id)
         if address in net.assigned:
             logging.debug("Found %s in %s" % (address, project.id))
             return net
-    raise compute_exception.AddressNotAllocated()
-
-def allocate_simple_ip():
-    redis = datastore.Redis.instance()
-    if not redis.exists('ips') and not len(redis.keys('instances:*')):
-        for address in FLAGS.simple_network_ips:
-            redis.sadd('ips', address)
-    address = redis.spop('ips')
-    if not address:
-        raise exception.NoMoreAddresses()
-    return address
-
-def deallocate_simple_ip(address):
-    datastore.Redis.instance().sadd('ips', address)
+    raise exception.AddressNotAllocated()
 
 
-def allocate_vpn_ip(user_id, project_id, mac):
-    return get_project_network(project_id).allocate_vpn_ip(mac)
-
-def allocate_ip(user_id, project_id, mac):
-    return get_project_network(project_id).allocate_ip(user_id, project_id, mac)
-
-def deallocate_ip(address):
-    return get_network_by_address(address).deallocate_ip(address)
-
-def release_ip(address):
-    return get_network_by_address(address).release_ip(address)
-
-def lease_ip(address):
-    return get_network_by_address(address).lease_ip(address)
-
-def get_project_network(project_id, security_group='default'):
-    """ get a project's private network, allocating one if needed """
-    # TODO(todd): It looks goofy to get a project from a UserManager.
-    #             Refactor to still use the LDAP backend, but not User specific.
-    project = manager.AuthManager().get_project(project_id)
-    if not project:
-        raise exception.Error("Project %s doesn't exist, uhoh." %
-                                   project_id)
-    return DHCPNetwork.get_network_for_project(project.project_manager_id,
-                                               project.id, security_group)
+def get_network_by_interface(iface, security_group='default'):
+    vlan = iface.rpartition("br")[2]
+    project_id = Vlan.dict_by_vlan().get(vlan)
+    return get_project_network(project_id, security_group)
+
 
 
-def restart_nets():
-    """ Ensure the network for each user is enabled"""
-    for project in manager.AuthManager().get_projects():
-        get_project_network(project.id).express()
diff --git a/nova/network/service.py b/nova/network/service.py
index 9d87e05e6f12..97976a752fbc 100644
--- a/nova/network/service.py
+++ b/nova/network/service.py
@@ -22,14 +22,141 @@ Network Nodes are responsible for allocating ips and setting up network
 
 import logging
 
+from nova import datastore
+from nova import exception as nova_exception
 from nova import flags
 from nova import service
-
+from nova import utils
+from nova.auth import manager
+from nova.network import exception
+from nova.network import model
 
 FLAGS = flags.FLAGS
 
-class NetworkService(service.Service):
-    """Allocates ips and sets up networks"""
+flags.DEFINE_string('flat_network_bridge', 'br100',
+                    'Bridge for simple network instances')
+flags.DEFINE_list('flat_network_ips',
+                  ['192.168.0.2','192.168.0.3','192.168.0.4'],
+                  'Available ips for simple network')
+flags.DEFINE_string('flat_network_network', '192.168.0.0',
+                       'Network for simple network')
+flags.DEFINE_string('flat_network_netmask', '255.255.255.0',
+                       'Netmask for simple network')
+flags.DEFINE_string('flat_network_gateway', '192.168.0.1',
+                       'Broadcast for simple network')
+flags.DEFINE_string('flat_network_broadcast', '192.168.0.255',
+                       'Broadcast for simple network')
+flags.DEFINE_string('flat_network_dns', '8.8.4.4',
+                       'Dns for simple network')
+
+
+class BaseNetworkService(service.Service):
+    """Implements common network service functionality
+
+    This class must be subclassed.
+    """
+    def __init__(self, *args, **kwargs):
+        self.network = model.PublicNetworkController()
+
+    def create_network(self, user_id, project_id, security_group='default',
+                       *args, **kwargs):
+        """Subclass implements creating network and returns network data"""
+        raise NotImplementedError()
+
+    def allocate_fixed_ip(self, user_id, project_id, *args, **kwargs):
+        """Subclass implements getting fixed ip from the pool"""
+        raise NotImplementedError()
+
+    def deallocate_fixed_ip(self, fixed_ip, *args, **kwargs):
+        """Subclass implements return of ip to the pool"""
+        raise NotImplementedError()
+
+    def allocate_elastic_ip(self):
+        """Gets a elastic ip from the pool"""
+        return self.network.allocate_ip()
+
+    def associate_elastic_ip(self, elastic_ip, fixed_ip, instance_id):
+        """Associates an elastic ip to a fixed ip"""
+        self.network.associate_address(elastic_ip, fixed_ip, instance_id)
+
+    def disassociate_elastic_ip(self, elastic_ip):
+        """Disassociates a elastic ip"""
+        self.network.disassociate_address(elastic_ip)
+
+    def deallocate_elastic_ip(self, elastic_ip):
+        """Returns a elastic ip to the pool"""
+        self.network.deallocate_ip(elastic_ip)
+
+
+class FlatNetworkService(BaseNetworkService):
+    def create_network(self, user_id, project_id, security_group='default',
+                       *args, **kwargs):
+        """Creates network and returns bridge
+
+        Flat network service simply returns a common bridge regardless of
+        project.
+        """
+        return {'network_type': 'injected',
+                'bridge_name': FLAGS.flat_network_bridge,
+                'network_network': FLAGS.flat_network_network,
+                'network_netmask': FLAGS.flat_network_netmask,
+                'network_gateway': FLAGS.flat_network_gateway,
+                'network_broadcast': FLAGS.flat_network_broadcast,
+                'network_dns': FLAGS.flat_network_dns}
+
+    def allocate_fixed_ip(self, user_id, project_id, *args, **kwargs):
+        """Gets a fixed ip from the pool
+
+        Flat network just grabs the next available ip from the pool
+        """
+        redis = datastore.Redis.instance()
+        if not redis.exists('ips') and not len(redis.keys('instances:*')):
+            for fixed_ip in FLAGS.flat_network_ips:
+                redis.sadd('ips', fixed_ip)
+        fixed_ip = redis.spop('ips')
+        if not fixed_ip:
+            raise exception.NoMoreAddresses()
+        return {'mac': utils.generate_mac(),
+                'ip' : str(fixed_ip)}
+
+    def deallocate_fixed_ip(self, fixed_ip, *args, **kwargs):
+        """Returns an ip to the pool"""
+        datastore.Redis.instance().sadd('ips', fixed_ip)
+
+class VlanNetworkService(BaseNetworkService):
+    """Allocates ips and sets up networks"""
+    def create_network(self, user_id, project_id, security_group='default',
+                       *args, **kwargs):
+        """Creates network and returns bridge"""
+        net = model.get_project_network(project_id, security_group)
+        return {'network_type': 'dhcp',
+                'bridge_name': net['bridge_name']}
+
+    def allocate_fixed_ip(self, user_id, project_id, vpn=False, *args, **kwargs):
+        """Gets a fixed ip from the pool """
+        mac = utils.generate_mac()
+        net = model.get_project_network(project_id)
+        if vpn:
+            fixed_ip = net.allocate_vpn_ip(user_id, project_id, mac)
+        else:
+            fixed_ip = net.allocate_ip(user_id, project_id, mac)
+        return {'mac': mac,
+                'ip' : fixed_ip}
+
+    def deallocate_fixed_ip(self, fixed_ip,
+                            *args, **kwargs):
+        """Returns an ip to the pool"""
+        model.get_network_by_address(fixed_ip).deallocate_ip(fixed_ip)
+
+    def lease_ip(self, address):
+        return self. __get_network_by_address(address).lease_ip(address)
+
+    def release_ip(self, address):
+        return model.get_network_by_address(address).release_ip(address)
+
+    def restart_nets(self):
+        """ Ensure the network for each user is enabled"""
+        for project in manager.AuthManager().get_projects():
+            model.get_project_network(project.id).express()
+
 
-    def __init__(self):
-        logging.debug("Network node working")
diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py
index f24eefb0d7f5..4b2f6c649dcc 100644
--- a/nova/tests/network_unittest.py
+++ b/nova/tests/network_unittest.py
@@ -24,8 +24,9 @@ from nova import flags
 from nova import test
 from nova import utils
 from nova.auth import manager
-from nova.compute import network
-from nova.compute.exception import NoMoreAddresses
+from nova.network import model
+from nova.network import service
+from nova.network.exception import NoMoreAddresses
 
 FLAGS = flags.FLAGS
 
@@ -52,7 +53,8 @@ class NetworkTestCase(test.TrialTestCase):
             self.projects.append(self.manager.create_project(name,
                                                              'netuser',
                                                              name))
-        self.network = network.PublicNetworkController()
+        self.network = model.PublicNetworkController()
+        self.service = service.VlanNetworkService()
 
     def tearDown(self):
         super(NetworkTestCase, self).tearDown()
@@ -66,16 +68,17 @@ class NetworkTestCase(test.TrialTestCase):
         self.assertTrue(IPy.IP(address) in pubnet)
         self.assertTrue(IPy.IP(address) in self.network.network)
 
-    def test_allocate_deallocate_ip(self):
-        address = network.allocate_ip(
-                self.user.id, self.projects[0].id, utils.generate_mac())
+    def test_allocate_deallocate_fixed_ip(self):
+        result  = self.service.allocate_fixed_ip(
+                self.user.id, self.projects[0].id)
+        address = result['ip']
+        mac = result['mac']
         logging.debug("Was allocated %s" % (address))
-        net = network.get_project_network(self.projects[0].id, "default")
+        net = model.get_project_network(self.projects[0].id, "default")
         self.assertEqual(True, is_in_project(address, self.projects[0].id))
-        mac = utils.generate_mac()
         hostname = "test-host"
         self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name)
-        rv = network.deallocate_ip(address)
+        rv = self.service.deallocate_fixed_ip(address)
 
         # Doesn't go away until it's dhcp released
         self.assertEqual(True, is_in_project(address, self.projects[0].id))
@@ -84,15 +87,18 @@ class NetworkTestCase(test.TrialTestCase):
         self.assertEqual(False, is_in_project(address, self.projects[0].id))
 
     def test_range_allocation(self):
-        mac = utils.generate_mac()
-        secondmac = utils.generate_mac()
         hostname = "test-host"
-        address = network.allocate_ip(
-                    self.user.id, self.projects[0].id, mac)
-        secondaddress = network.allocate_ip(
-                self.user, self.projects[1].id, secondmac)
-        net = network.get_project_network(self.projects[0].id, "default")
-        secondnet = network.get_project_network(self.projects[1].id, "default")
+        result = self.service.allocate_fixed_ip(
+                    self.user.id, self.projects[0].id)
+        mac = result['mac']
+        address = result['ip']
+        result = self.service.allocate_fixed_ip(
+                self.user, self.projects[1].id)
+        secondmac = result['mac']
+        secondaddress = result['ip']
+
+        net = model.get_project_network(self.projects[0].id, "default")
+        secondnet = model.get_project_network(self.projects[1].id, "default")
 
         self.assertEqual(True, is_in_project(address, self.projects[0].id))
         self.assertEqual(True, is_in_project(secondaddress, self.projects[1].id))
@@ -103,46 +109,50 @@ class NetworkTestCase(test.TrialTestCase):
         self.dnsmasq.issue_ip(secondmac, secondaddress,
                                 hostname, secondnet.bridge_name)
 
-        rv = network.deallocate_ip(address)
+        rv = self.service.deallocate_fixed_ip(address)
         self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
         self.assertEqual(False, is_in_project(address, self.projects[0].id))
 
         # First address release shouldn't affect the second
         self.assertEqual(True, is_in_project(secondaddress, self.projects[1].id))
 
-        rv = network.deallocate_ip(secondaddress)
+        rv = self.service.deallocate_fixed_ip(secondaddress)
         self.dnsmasq.release_ip(secondmac, secondaddress,
                                 hostname, secondnet.bridge_name)
         self.assertEqual(False, is_in_project(secondaddress, self.projects[1].id))
 
     def test_subnet_edge(self):
-        secondaddress = network.allocate_ip(self.user.id, self.projects[0].id,
-                                utils.generate_mac())
+        result = self.service.allocate_fixed_ip(self.user.id,
+                                                       self.projects[0].id)
+        firstaddress = result['ip']
         hostname = "toomany-hosts"
         for i in range(1,5):
             project_id = self.projects[i].id
-            mac = utils.generate_mac()
-            mac2 = utils.generate_mac()
-            mac3 = utils.generate_mac()
-            address = network.allocate_ip(
-                    self.user, project_id, mac)
-            address2 = network.allocate_ip(
-                    self.user, project_id, mac2)
-            address3 = network.allocate_ip(
-                    self.user, project_id, mac3)
+            result = self.service.allocate_fixed_ip(
+                    self.user, project_id)
+            mac = result['mac']
+            address = result['ip']
+            result = self.service.allocate_fixed_ip(
+                    self.user, project_id)
+            mac2 = result['mac']
+            address2 = result['ip']
+            result = self.service.allocate_fixed_ip(
+                   self.user, project_id)
+            mac3 = result['mac']
+            address3 = result['ip']
             self.assertEqual(False, is_in_project(address, self.projects[0].id))
             self.assertEqual(False, is_in_project(address2, self.projects[0].id))
             self.assertEqual(False, is_in_project(address3, self.projects[0].id))
-            rv = network.deallocate_ip(address)
-            rv = network.deallocate_ip(address2)
-            rv = network.deallocate_ip(address3)
-            net = network.get_project_network(project_id, "default")
+            rv = self.service.deallocate_fixed_ip(address)
+            rv = self.service.deallocate_fixed_ip(address2)
+            rv = self.service.deallocate_fixed_ip(address3)
+            net = model.get_project_network(project_id, "default")
             self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
             self.dnsmasq.release_ip(mac2, address2, hostname, net.bridge_name)
             self.dnsmasq.release_ip(mac3, address3, hostname, net.bridge_name)
-        net = network.get_project_network(self.projects[0].id, "default")
-        rv = network.deallocate_ip(secondaddress)
-        self.dnsmasq.release_ip(mac, secondaddress, hostname, net.bridge_name)
+        net = model.get_project_network(self.projects[0].id, "default")
+        rv = self.service.deallocate_fixed_ip(firstaddress)
+        self.dnsmasq.release_ip(mac, firstaddress, hostname, net.bridge_name)
 
     def test_release_before_deallocate(self):
         pass
@@ -169,7 +179,7 @@ class NetworkTestCase(test.TrialTestCase):
                                                NUM_RESERVED_VPN_IPS)
         usable addresses
         """
-        net = network.get_project_network(self.projects[0].id, "default")
+        net = model.get_project_network(self.projects[0].id, "default")
 
         # Determine expected number of available IP addresses
         num_static_ips = net.num_static_ips
@@ -183,22 +193,23 @@ class NetworkTestCase(test.TrialTestCase):
         macs = {}
         addresses = {}
         for i in range(0, (num_available_ips - 1)):
-            macs[i] = utils.generate_mac()
-            addresses[i] = network.allocate_ip(self.user.id, self.projects[0].id, macs[i])
+            result = self.service.allocate_fixed_ip(self.user.id, self.projects[0].id)
+            macs[i] = result['mac']
+            addresses[i] = result['ip']
             self.dnsmasq.issue_ip(macs[i], addresses[i], hostname, net.bridge_name)
 
-        self.assertRaises(NoMoreAddresses, network.allocate_ip, self.user.id, self.projects[0].id, utils.generate_mac())
+        self.assertRaises(NoMoreAddresses, self.service.allocate_fixed_ip, self.user.id, self.projects[0].id)
 
         for i in range(0, (num_available_ips - 1)):
-            rv = network.deallocate_ip(addresses[i])
+            rv = self.service.deallocate_fixed_ip(addresses[i])
             self.dnsmasq.release_ip(macs[i], addresses[i], hostname, net.bridge_name)
 
 def is_in_project(address, project_id):
-    return address in network.get_project_network(project_id).list_addresses()
+    return address in model.get_project_network(project_id).list_addresses()
 
 def _get_project_addresses(project_id):
     project_addresses = []
-    for addr in network.get_project_network(project_id).list_addresses():
+    for addr in model.get_project_network(project_id).list_addresses():
         project_addresses.append(addr)
     return project_addresses
 

From 576dade1d53814416977522637bea9e3c32e5483 Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Tue, 3 Aug 2010 15:13:07 -0700
Subject: [PATCH 12/36] change network_service flag to network_type and don't
 take full class name

---
 bin/nova-network        | 10 ++++++----
 nova/network/service.py |  3 +++
 2 files changed, 9 insertions(+), 4 deletions(-)

diff --git a/bin/nova-network b/bin/nova-network
index b2e2cf173f5a..1d620b5254e2 100755
--- a/bin/nova-network
+++ b/bin/nova-network
@@ -25,15 +25,17 @@ from nova import flags
 from nova import twistd
 from nova import utils
 
+from nova.network import service
 
 FLAGS = flags.FLAGS
 
-flags.DEFINE_string('network_service',
-                    'nova.network.service.VlanNetworkService',
-                    'Service Class for Networking')
 
 if __name__ == '__main__':
     twistd.serve(__file__)
 
 if __name__ == '__builtin__':
-    application = utils.import_class(FLAGS.network_service).create()
+    t = FLAGS.network_type
+    if t == 'flat':
+        application = service.FlatNetworkService.create()
+    elif t == 'vlan':
+        application = service.VlanNetworkService.create()
diff --git a/nova/network/service.py b/nova/network/service.py
index 97976a752fbc..72f7db126c39 100644
--- a/nova/network/service.py
+++ b/nova/network/service.py
@@ -33,6 +33,9 @@ from nova.network import model
 
 FLAGS = flags.FLAGS
 
+flags.DEFINE_string('network_type',
+                    'flat',
+                    'Service Class for Networking')
 flags.DEFINE_string('flat_network_bridge', 'br100',
                     'Bridge for simple network instances')
 flags.DEFINE_list('flat_network_ips',

From 8261e26f78f061de5f5e98f8066da33f9b4e3a23 Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Tue, 3 Aug 2010 15:48:49 -0700
Subject: [PATCH 13/36] use get to retrieve node_name from initial_state

---
 nova/compute/model.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/nova/compute/model.py b/nova/compute/model.py
index 7dd130ca381f..266a93b9a35a 100644
--- a/nova/compute/model.py
+++ b/nova/compute/model.py
@@ -151,7 +151,8 @@ class Instance(datastore.BasicModel):
         #             it just adds the first one
         is_new = self.is_new_record()
         node_set = (self.state['node_name'] != 'unassigned' and
-                    self.initial_state['node_name'] == 'unassigned')
+                    self.initial_state.get('node_name', 'unassigned')
+                    == 'unassigned')
         success = super(Instance, self).save()
         if success and is_new:
             self.associate_with("project", self.project)

From 13ec179c99012ed3d579e19094c0039ccb630796 Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Tue, 3 Aug 2010 17:27:29 -0700
Subject: [PATCH 14/36] created assocaition between project and host, modified
 commands to get host async, simplified calls to network

---
 nova/network/service.py        | 72 ++++++++++++++++++----------------
 nova/tests/network_unittest.py | 30 +++++++-------
 2 files changed, 54 insertions(+), 48 deletions(-)

diff --git a/nova/network/service.py b/nova/network/service.py
index 72f7db126c39..45c2867ca234 100644
--- a/nova/network/service.py
+++ b/nova/network/service.py
@@ -23,7 +23,6 @@ Network Nodes are responsible for allocating ips and setting up network
 import logging
 
 from nova import datastore
-from nova import exception as nova_exception
 from nova import flags
 from nova import service
 from nova import utils
@@ -52,6 +51,13 @@ flags.DEFINE_string('flat_network_broadcast', '192.168.0.255',
 flags.DEFINE_string('flat_network_dns', '8.8.4.4',
                        'Dns for simple network')
 
+def get_host_for_project(project_id):
+    redis = datastore.Redis.instance()
+    host = redis.get(__host_key(project_id))
+
+def __host_key(project_id):
+    return "network_host:%s" % project_id
+
 
 class BaseNetworkService(service.Service):
     """Implements common network service functionality
@@ -61,12 +67,18 @@ class BaseNetworkService(service.Service):
     def __init__(self, *args, **kwargs):
         self.network = model.PublicNetworkController()
 
-    def create_network(self, user_id, project_id, security_group='default',
-                       *args, **kwargs):
-        """Subclass implements creating network and returns network data"""
-        raise NotImplementedError()
+    def get_network_host(self, user_id, project_id, *args, **kwargs):
+        """Safely becomes the host of the projects network"""
+        redis = datastore.Redis.instance()
+        key = __host_key(project_id)
+        if redis.setnx(key, FLAGS.node_name):
+            return FLAGS.node_name
+        else:
+            return redis.get(key)
 
-    def allocate_fixed_ip(self, user_id, project_id, *args, **kwargs):
+    def allocate_fixed_ip(self, user_id, project_id,
+                          security_group='default',
+                          *args, **kwargs):
         """Subclass implements getting fixed ip from the pool"""
         raise NotImplementedError()
 
@@ -92,22 +104,11 @@ class BaseNetworkService(service.Service):
 
 
 class FlatNetworkService(BaseNetworkService):
-    def create_network(self, user_id, project_id, security_group='default',
-                       *args, **kwargs):
-        """Creates network and returns bridge
+    """Basic network where no vlans are used"""
 
-        Flat network service simply returns a common bridge regardless of
-        project.
-        """
-        return {'network_type': 'injected',
-                'bridge_name': FLAGS.flat_network_bridge,
-                'network_network': FLAGS.flat_network_network,
-                'network_netmask': FLAGS.flat_network_netmask,
-                'network_gateway': FLAGS.flat_network_gateway,
-                'network_broadcast': FLAGS.flat_network_broadcast,
-                'network_dns': FLAGS.flat_network_dns}
-
-    def allocate_fixed_ip(self, user_id, project_id, *args, **kwargs):
+    def allocate_fixed_ip(self, user_id, project_id,
+                          security_group='default',
+                          *args, **kwargs):
         """Gets a fixed ip from the pool
 
         Flat network just grabs the next available ip from the pool
@@ -119,23 +120,26 @@ class FlatNetworkService(BaseNetworkService):
         fixed_ip = redis.spop('ips')
         if not fixed_ip:
             raise exception.NoMoreAddresses()
-        return {'mac': utils.generate_mac(),
-                'ip' : str(fixed_ip)}
+        return {'network_type': 'injected',
+                'mac_address': utils.generate_mac(),
+                'private_dns_name': str(fixed_ip),
+                'bridge_name': FLAGS.flat_network_bridge,
+                'network_network': FLAGS.flat_network_network,
+                'network_netmask': FLAGS.flat_network_netmask,
+                'network_gateway': FLAGS.flat_network_gateway,
+                'network_broadcast': FLAGS.flat_network_broadcast,
+                'network_dns': FLAGS.flat_network_dns}
 
     def deallocate_fixed_ip(self, fixed_ip, *args, **kwargs):
         """Returns an ip to the pool"""
         datastore.Redis.instance().sadd('ips', fixed_ip)
 
 class VlanNetworkService(BaseNetworkService):
-    """Allocates ips and sets up networks"""
-    def create_network(self, user_id, project_id, security_group='default',
-                       *args, **kwargs):
-        """Creates network and returns bridge"""
-        net = model.get_project_network(project_id, security_group)
-        return {'network_type': 'dhcp',
-                'bridge_name': net['bridge_name']}
+    """Vlan network with dhcp"""
 
-    def allocate_fixed_ip(self, user_id, project_id, vpn=False, *args, **kwargs):
+    def allocate_fixed_ip(self, user_id, project_id,
+                          security_group='default',
+                          vpn=False, *args, **kwargs):
         """Gets a fixed ip from the pool """
         mac = utils.generate_mac()
         net = model.get_project_network(project_id)
@@ -143,8 +147,10 @@ class VlanNetworkService(BaseNetworkService):
             fixed_ip = net.allocate_vpn_ip(user_id, project_id, mac)
         else:
             fixed_ip = net.allocate_ip(user_id, project_id, mac)
-        return {'mac': mac,
-                'ip' : fixed_ip}
+        return {'network_type': 'dhcp',
+                'bridge_name': net['bridge_name'],
+                'mac_address': mac,
+                'private_dns_name' : fixed_ip}
 
     def deallocate_fixed_ip(self, fixed_ip,
                             *args, **kwargs):
diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py
index 4b2f6c649dcc..a9695f818934 100644
--- a/nova/tests/network_unittest.py
+++ b/nova/tests/network_unittest.py
@@ -71,8 +71,8 @@ class NetworkTestCase(test.TrialTestCase):
     def test_allocate_deallocate_fixed_ip(self):
         result  = self.service.allocate_fixed_ip(
                 self.user.id, self.projects[0].id)
-        address = result['ip']
-        mac = result['mac']
+        address = result['private_dns_name']
+        mac = result['mac_address']
         logging.debug("Was allocated %s" % (address))
         net = model.get_project_network(self.projects[0].id, "default")
         self.assertEqual(True, is_in_project(address, self.projects[0].id))
@@ -90,12 +90,12 @@ class NetworkTestCase(test.TrialTestCase):
         hostname = "test-host"
         result = self.service.allocate_fixed_ip(
                     self.user.id, self.projects[0].id)
-        mac = result['mac']
-        address = result['ip']
+        mac = result['mac_address']
+        address = result['private_dns_name']
         result = self.service.allocate_fixed_ip(
                 self.user, self.projects[1].id)
-        secondmac = result['mac']
-        secondaddress = result['ip']
+        secondmac = result['mac_address']
+        secondaddress = result['private_dns_name']
 
         net = model.get_project_network(self.projects[0].id, "default")
         secondnet = model.get_project_network(self.projects[1].id, "default")
@@ -124,22 +124,22 @@ class NetworkTestCase(test.TrialTestCase):
     def test_subnet_edge(self):
         result = self.service.allocate_fixed_ip(self.user.id,
                                                        self.projects[0].id)
-        firstaddress = result['ip']
+        firstaddress = result['private_dns_name']
         hostname = "toomany-hosts"
         for i in range(1,5):
             project_id = self.projects[i].id
             result = self.service.allocate_fixed_ip(
                     self.user, project_id)
-            mac = result['mac']
-            address = result['ip']
+            mac = result['mac_address']
+            address = result['private_dns_name']
             result = self.service.allocate_fixed_ip(
                     self.user, project_id)
-            mac2 = result['mac']
-            address2 = result['ip']
+            mac2 = result['mac_address']
+            address2 = result['private_dns_name']
             result = self.service.allocate_fixed_ip(
                    self.user, project_id)
-            mac3 = result['mac']
-            address3 = result['ip']
+            mac3 = result['mac_address']
+            address3 = result['private_dns_name']
             self.assertEqual(False, is_in_project(address, self.projects[0].id))
             self.assertEqual(False, is_in_project(address2, self.projects[0].id))
             self.assertEqual(False, is_in_project(address3, self.projects[0].id))
@@ -194,8 +194,8 @@ class NetworkTestCase(test.TrialTestCase):
         addresses = {}
         for i in range(0, (num_available_ips - 1)):
             result = self.service.allocate_fixed_ip(self.user.id, self.projects[0].id)
-            macs[i] = result['mac']
-            addresses[i] = result['ip']
+            macs[i] = result['mac_address']
+            addresses[i] = result['private_dns_name']
             self.dnsmasq.issue_ip(macs[i], addresses[i], hostname, net.bridge_name)
 
         self.assertRaises(NoMoreAddresses, self.service.allocate_fixed_ip, self.user.id, self.projects[0].id)

From d0ca78ea900d71492212ac531ec75616b02300b0 Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Wed, 4 Aug 2010 11:12:58 -0700
Subject: [PATCH 15/36] it helps to save files BEFORE committing

---
 nova/endpoint/cloud.py    | 60 +++++++++++++++++++++------------------
 nova/virt/libvirt_conn.py | 25 +++++++++-------
 2 files changed, 47 insertions(+), 38 deletions(-)

diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py
index f605eec2c6a4..32cbfdc10def 100644
--- a/nova/endpoint/cloud.py
+++ b/nova/endpoint/cloud.py
@@ -38,6 +38,7 @@ from nova.auth import manager
 from nova.compute import model
 from nova.compute.instance_types import INSTANCE_TYPES
 from nova.endpoint import images
+from nova.network import service as network_service
 from nova.volume import service
 
 
@@ -468,25 +469,31 @@ class CloudController(object):
         return {'addressesSet': addresses}
 
     @rbac.allow('netadmin')
+    @defer.inlineCallbacks
     def allocate_address(self, context, **kwargs):
-        alloc_result = rpc.call(self._get_network_host(context),
+        network_host = yield self._get_network_host(context)
+        alloc_result = rpc.call(network_host,
                          {"method": "allocate_elastic_ip"})
         public_ip = alloc_result['result']
         return defer.succeed({'addressSet': [{'publicIp' : public_ip}]})
 
     @rbac.allow('netadmin')
+    @defer.inlineCallbacks
     def release_address(self, context, public_ip, **kwargs):
         # NOTE(vish): Should we make sure this works?
-        rpc.cast(self._get_network_host(context),
+        network_host = yield self._get_network_host(context)
+        rpc.cast(network_host,
                          {"method": "deallocate_elastic_ip",
                           "args": {"elastic_ip": public_ip}})
         return defer.succeed({'releaseResponse': ["Address released."]})
 
     @rbac.allow('netadmin')
+    @defer.inlineCallbacks
     def associate_address(self, context, instance_id, public_ip, **kwargs):
         instance = self._get_instance(context, instance_id)
         address = self._get_address(context, public_ip)
-        rpc.cast(self._get_network_host(context),
+        network_host = yield self._get_network_host(context)
+        rpc.cast(network_host,
                          {"method": "associate_elastic_ip",
                           "args": {"elastic_ip": address['public_ip'],
                                    "fixed_ip": instance['private_dns_name'],
@@ -494,23 +501,26 @@ class CloudController(object):
         return defer.succeed({'associateResponse': ["Address associated."]})
 
     @rbac.allow('netadmin')
+    @defer.inlineCallbacks
     def disassociate_address(self, context, public_ip, **kwargs):
         address = self._get_address(context, public_ip)
-        self.network.disassociate_address(public_ip)
+        network_host = yield self._get_network_host(context)
+        rpc.cast(network_host,
+                         {"method": "associate_elastic_ip",
+                          "args": {"elastic_ip": address['public_ip']}})
         return defer.succeed({'disassociateResponse': ["Address disassociated."]})
 
-    def release_ip(self, context, private_ip, **kwargs):
-        self.network.release_ip(private_ip)
-        return defer.succeed({'releaseResponse': ["Address released."]})
-
-    def lease_ip(self, context, private_ip, **kwargs):
-        self.network.lease_ip(private_ip)
-        return defer.succeed({'leaseResponse': ["Address leased."]})
-
-    def get_network_host(self, context):
-        # FIXME(vish): this is temporary until we store net hosts for project
-        import socket
-        return socket.gethostname()
+    @defer.inlineCallbacks
+    def _get_network_host(self, context):
+        """Retrieves the network host for a project"""
+        host = network_service.get_host_for_project(context.project.id)
+        if not host:
+            result = yield rpc.call(FLAGS.network_topic,
+                                    {"method": "get_network_host",
+                                     "args": {"user_id": context.user.id,
+                                              "project_id": context.project.id}})
+            host = result['result']
+        defer.returnValue(host)
 
     @rbac.allow('projectmanager', 'sysadmin')
     @defer.inlineCallbacks
@@ -545,27 +555,21 @@ class CloudController(object):
                 raise exception.ApiError('Key Pair %s not found' %
                                          kwargs['key_name'])
             key_data = key_pair.public_key
+        network_host = yield self._get_network_host(context)
         # TODO: Get the real security group of launch in here
         security_group = "default"
-        create_result = yield rpc.call(FLAGS.network_topic,
-                 {"method": "create_network",
-                  "args": {"user_id": context.user.id,
-                           "project_id": context.project.id,
-                           "security_group": security_group}})
-        bridge_name = create_result['result']
-        net_host = self._get_network_host(context)
         for num in range(int(kwargs['max_count'])):
             vpn = False
             if image_id  == FLAGS.vpn_image_id:
                 vpn = True
-            allocate_result = yield rpc.call(net_host,
+            allocate_result = yield rpc.call(network_host,
                      {"method": "allocate_fixed_ip",
                       "args": {"user_id": context.user.id,
                                "project_id": context.project.id,
+                               "security_group": security_group,
                                "vpn": vpn}})
+            allocate_data = allocate_result['result']
             inst = self.instdir.new()
-            inst['mac_address'] = allocate_result['result']['mac_address']
-            inst['private_dns_name'] = allocate_result['result']['ip_address']
             inst['image_id'] = image_id
             inst['kernel_id'] = kernel_id
             inst['ramdisk_id'] = ramdisk_id
@@ -578,7 +582,9 @@ class CloudController(object):
             inst['user_id'] = context.user.id
             inst['project_id'] = context.project.id
             inst['ami_launch_index'] = num
-            inst['bridge_name'] = bridge_name
+            inst['security_group'] = security_group
+            for (key, value) in allocate_data:
+                inst[key] = value
 
             inst.save()
             rpc.cast(FLAGS.compute_topic,
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index c545e4190de8..8133daf0b0e7 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -47,6 +47,9 @@ FLAGS = flags.FLAGS
 flags.DEFINE_string('libvirt_xml_template',
                     utils.abspath('compute/libvirt.xml.template'),
                     'Libvirt XML Template')
+flags.DEFINE_string('injected_network_template',
+                    utils.abspath('compute/interfaces.template'),
+                    'Template file for injected network')
 
 def get_connection(read_only):
     # These are loaded late so that there's no need to install these
@@ -201,14 +204,14 @@ class LibvirtConnection(object):
 
         key = data['key_data']
         net = None
-        if FLAGS.simple_network:
-            with open(FLAGS.simple_network_template) as f:
+        if data.get('network_type', None) == 'injected':
+            with open(FLAGS.injected_network_template) as f:
                 net = f.read() % {'address': data['private_dns_name'],
-                                  'network': FLAGS.simple_network_network,
-                                  'netmask': FLAGS.simple_network_netmask,
-                                  'gateway': FLAGS.simple_network_gateway,
-                                  'broadcast': FLAGS.simple_network_broadcast,
-                                  'dns': FLAGS.simple_network_dns}
+                                  'network': data['network_network'],
+                                  'netmask': data['network_netmask'],
+                                  'gateway': data['network_gateway'],
+                                  'broadcast': data['network_broadcast'],
+                                  'dns': data['network_dns']}
         if key or net:
             logging.info('Injecting data into image %s', data['image_id'])
             yield disk.inject_data(basepath('disk-raw'), key, net, execute=execute)
@@ -255,7 +258,7 @@ class LibvirtConnection(object):
         """
         Note that this function takes an instance ID, not an Instance, so
         that it can be called by monitor.
-        
+
         Returns a list of all block devices for this domain.
         """
         domain = self._conn.lookupByName(instance_id)
@@ -298,7 +301,7 @@ class LibvirtConnection(object):
         """
         Note that this function takes an instance ID, not an Instance, so
         that it can be called by monitor.
-        
+
         Returns a list of all network interfaces for this instance.
         """
         domain = self._conn.lookupByName(instance_id)
@@ -341,7 +344,7 @@ class LibvirtConnection(object):
         """
         Note that this function takes an instance ID, not an Instance, so
         that it can be called by monitor.
-        """        
+        """
         domain = self._conn.lookupByName(instance_id)
         return domain.blockStats(disk)
 
@@ -350,6 +353,6 @@ class LibvirtConnection(object):
         """
         Note that this function takes an instance ID, not an Instance, so
         that it can be called by monitor.
-        """        
+        """
         domain = self._conn.lookupByName(instance_id)
         return domain.interfaceStats(interface)

From b2e220c976b7689a2c5d924395c57012c6b99212 Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Wed, 4 Aug 2010 11:18:46 -0700
Subject: [PATCH 16/36] inline commands use returnValue

---
 nova/endpoint/cloud.py | 16 +++++++++-------
 1 file changed, 9 insertions(+), 7 deletions(-)

diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py
index 32cbfdc10def..3db999fbad12 100644
--- a/nova/endpoint/cloud.py
+++ b/nova/endpoint/cloud.py
@@ -475,7 +475,7 @@ class CloudController(object):
         alloc_result = rpc.call(network_host,
                          {"method": "allocate_elastic_ip"})
         public_ip = alloc_result['result']
-        return defer.succeed({'addressSet': [{'publicIp' : public_ip}]})
+        defer.returnValue({'addressSet': [{'publicIp' : public_ip}]})
 
     @rbac.allow('netadmin')
     @defer.inlineCallbacks
@@ -485,7 +485,7 @@ class CloudController(object):
         rpc.cast(network_host,
                          {"method": "deallocate_elastic_ip",
                           "args": {"elastic_ip": public_ip}})
-        return defer.succeed({'releaseResponse': ["Address released."]})
+        defer.returnValue({'releaseResponse': ["Address released."]})
 
     @rbac.allow('netadmin')
     @defer.inlineCallbacks
@@ -498,7 +498,7 @@ class CloudController(object):
                           "args": {"elastic_ip": address['public_ip'],
                                    "fixed_ip": instance['private_dns_name'],
                                    "instance_id": instance['instance_id']}})
-        return defer.succeed({'associateResponse': ["Address associated."]})
+        defer.returnValue({'associateResponse': ["Address associated."]})
 
     @rbac.allow('netadmin')
     @defer.inlineCallbacks
@@ -508,7 +508,7 @@ class CloudController(object):
         rpc.cast(network_host,
                          {"method": "associate_elastic_ip",
                           "args": {"elastic_ip": address['public_ip']}})
-        return defer.succeed({'disassociateResponse': ["Address disassociated."]})
+        defer.returnValue({'disassociateResponse': ["Address disassociated."]})
 
     @defer.inlineCallbacks
     def _get_network_host(self, context):
@@ -596,8 +596,10 @@ class CloudController(object):
         defer.returnValue(self._format_instances(context, reservation_id))
 
     @rbac.allow('projectmanager', 'sysadmin')
+    @defer.inlineCallbacks
     def terminate_instances(self, context, instance_id, **kwargs):
         logging.debug("Going to start terminating instances")
+        network_host = yield self._get_network_host(context)
         for i in instance_id:
             logging.debug("Going to try and terminate %s" % i)
             try:
@@ -612,7 +614,7 @@ class CloudController(object):
                 # NOTE(vish): Right now we don't really care if the ip is
                 #             disassociated.  We may need to worry about
                 #             checking this later.  Perhaps in the scheduler?
-                rpc.cast(self._get_network_host(context),
+                rpc.cast(network_host,
                          {"method": "disassociate_elastic_ip",
                           "args": {"elastic_ip": elastic_ip}})
 
@@ -622,7 +624,7 @@ class CloudController(object):
                 # NOTE(vish): Right now we don't really care if the ip is
                 #             actually removed.  We may need to worry about
                 #             checking this later.  Perhaps in the scheduler?
-                rpc.cast(self._get_network_host(context),
+                rpc.cast(network_host,
                          {"method": "deallocate_fixed_ip",
                           "args": {"elastic_ip": elastic_ip}})
 
@@ -633,7 +635,7 @@ class CloudController(object):
                           "args": {"instance_id": i}})
             else:
                 instance.destroy()
-        return defer.succeed(True)
+        defer.returnValue(True)
 
     @rbac.allow('projectmanager', 'sysadmin')
     def reboot_instances(self, context, instance_id, **kwargs):

From 311daf14758d8a04c5f73fa4e2911e469a716c1f Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Wed, 4 Aug 2010 11:44:25 -0700
Subject: [PATCH 17/36] don't __ module methods

---
 nova/network/service.py | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/nova/network/service.py b/nova/network/service.py
index 45c2867ca234..b3645d2a3ad3 100644
--- a/nova/network/service.py
+++ b/nova/network/service.py
@@ -53,9 +53,9 @@ flags.DEFINE_string('flat_network_dns', '8.8.4.4',
 
 def get_host_for_project(project_id):
     redis = datastore.Redis.instance()
-    host = redis.get(__host_key(project_id))
+    host = redis.get(_host_key(project_id))
 
-def __host_key(project_id):
+def _host_key(project_id):
     return "network_host:%s" % project_id
 
 
@@ -70,7 +70,7 @@ class BaseNetworkService(service.Service):
     def get_network_host(self, user_id, project_id, *args, **kwargs):
         """Safely becomes the host of the projects network"""
         redis = datastore.Redis.instance()
-        key = __host_key(project_id)
+        key = _host_key(project_id)
         if redis.setnx(key, FLAGS.node_name):
             return FLAGS.node_name
         else:

From e816e7923582d7ac11b7f7a554eec815ea61496e Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Wed, 4 Aug 2010 12:02:23 -0700
Subject: [PATCH 18/36] use deferreds in network

---
 nova/network/service.py | 18 ++++++++++--------
 1 file changed, 10 insertions(+), 8 deletions(-)

diff --git a/nova/network/service.py b/nova/network/service.py
index b3645d2a3ad3..e998c557516c 100644
--- a/nova/network/service.py
+++ b/nova/network/service.py
@@ -22,6 +22,8 @@ Network Nodes are responsible for allocating ips and setting up network
 
 import logging
 
+from twisted.internet import defer
+
 from nova import datastore
 from nova import flags
 from nova import service
@@ -72,9 +74,9 @@ class BaseNetworkService(service.Service):
         redis = datastore.Redis.instance()
         key = _host_key(project_id)
         if redis.setnx(key, FLAGS.node_name):
-            return FLAGS.node_name
+            return defer.succeed(FLAGS.node_name)
         else:
-            return redis.get(key)
+            return defer.succeed(redis.get(key))
 
     def allocate_fixed_ip(self, user_id, project_id,
                           security_group='default',
@@ -120,7 +122,7 @@ class FlatNetworkService(BaseNetworkService):
         fixed_ip = redis.spop('ips')
         if not fixed_ip:
             raise exception.NoMoreAddresses()
-        return {'network_type': 'injected',
+        return defer.succeed({'network_type': 'injected',
                 'mac_address': utils.generate_mac(),
                 'private_dns_name': str(fixed_ip),
                 'bridge_name': FLAGS.flat_network_bridge,
@@ -128,7 +130,7 @@ class FlatNetworkService(BaseNetworkService):
                 'network_netmask': FLAGS.flat_network_netmask,
                 'network_gateway': FLAGS.flat_network_gateway,
                 'network_broadcast': FLAGS.flat_network_broadcast,
-                'network_dns': FLAGS.flat_network_dns}
+                'network_dns': FLAGS.flat_network_dns})
 
     def deallocate_fixed_ip(self, fixed_ip, *args, **kwargs):
         """Returns an ip to the pool"""
@@ -147,18 +149,18 @@ class VlanNetworkService(BaseNetworkService):
             fixed_ip = net.allocate_vpn_ip(user_id, project_id, mac)
         else:
             fixed_ip = net.allocate_ip(user_id, project_id, mac)
-        return {'network_type': 'dhcp',
+        return defer.succeed({'network_type': 'dhcp',
                 'bridge_name': net['bridge_name'],
                 'mac_address': mac,
-                'private_dns_name' : fixed_ip}
+                'private_dns_name' : fixed_ip})
 
     def deallocate_fixed_ip(self, fixed_ip,
                             *args, **kwargs):
         """Returns an ip to the pool"""
-        model.get_network_by_address(fixed_ip).deallocate_ip(fixed_ip)
+        return model.get_network_by_address(fixed_ip).deallocate_ip(fixed_ip)
 
     def lease_ip(self, address):
-        return self. __get_network_by_address(address).lease_ip(address)
+        return model.get_network_by_address(address).lease_ip(address)
 
     def release_ip(self, address):
         return model.get_network_by_address(address).release_ip(address)

From c821709a48eb22db4db182f25f1e405039294d2c Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Wed, 4 Aug 2010 12:18:52 -0700
Subject: [PATCH 19/36] method should return network topic instead of  network
 host

---
 nova/endpoint/cloud.py  | 32 ++++++++++++++++----------------
 nova/network/service.py |  2 +-
 2 files changed, 17 insertions(+), 17 deletions(-)

diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py
index 3db999fbad12..33be0a612a49 100644
--- a/nova/endpoint/cloud.py
+++ b/nova/endpoint/cloud.py
@@ -471,8 +471,8 @@ class CloudController(object):
     @rbac.allow('netadmin')
     @defer.inlineCallbacks
     def allocate_address(self, context, **kwargs):
-        network_host = yield self._get_network_host(context)
-        alloc_result = rpc.call(network_host,
+        network_topic = yield self._get_network_topic(context)
+        alloc_result = rpc.call(network_topic,
                          {"method": "allocate_elastic_ip"})
         public_ip = alloc_result['result']
         defer.returnValue({'addressSet': [{'publicIp' : public_ip}]})
@@ -481,8 +481,8 @@ class CloudController(object):
     @defer.inlineCallbacks
     def release_address(self, context, public_ip, **kwargs):
         # NOTE(vish): Should we make sure this works?
-        network_host = yield self._get_network_host(context)
-        rpc.cast(network_host,
+        network_topic = yield self._get_network_topic(context)
+        rpc.cast(network_topic,
                          {"method": "deallocate_elastic_ip",
                           "args": {"elastic_ip": public_ip}})
         defer.returnValue({'releaseResponse': ["Address released."]})
@@ -492,8 +492,8 @@ class CloudController(object):
     def associate_address(self, context, instance_id, public_ip, **kwargs):
         instance = self._get_instance(context, instance_id)
         address = self._get_address(context, public_ip)
-        network_host = yield self._get_network_host(context)
-        rpc.cast(network_host,
+        network_topic = yield self._get_network_topic(context)
+        rpc.cast(network_topic,
                          {"method": "associate_elastic_ip",
                           "args": {"elastic_ip": address['public_ip'],
                                    "fixed_ip": instance['private_dns_name'],
@@ -504,23 +504,23 @@ class CloudController(object):
     @defer.inlineCallbacks
     def disassociate_address(self, context, public_ip, **kwargs):
         address = self._get_address(context, public_ip)
-        network_host = yield self._get_network_host(context)
-        rpc.cast(network_host,
+        network_topic = yield self._get_network_topic(context)
+        rpc.cast(network_topic,
                          {"method": "associate_elastic_ip",
                           "args": {"elastic_ip": address['public_ip']}})
         defer.returnValue({'disassociateResponse': ["Address disassociated."]})
 
     @defer.inlineCallbacks
-    def _get_network_host(self, context):
+    def _get_network_topic(self, context):
         """Retrieves the network host for a project"""
         host = network_service.get_host_for_project(context.project.id)
         if not host:
             result = yield rpc.call(FLAGS.network_topic,
-                                    {"method": "get_network_host",
+                                    {"method": "get_network_topic",
                                      "args": {"user_id": context.user.id,
                                               "project_id": context.project.id}})
             host = result['result']
-        defer.returnValue(host)
+            defer.returnValue('%s.%s' %(FLAGS.network_topic, host))
 
     @rbac.allow('projectmanager', 'sysadmin')
     @defer.inlineCallbacks
@@ -555,14 +555,14 @@ class CloudController(object):
                 raise exception.ApiError('Key Pair %s not found' %
                                          kwargs['key_name'])
             key_data = key_pair.public_key
-        network_host = yield self._get_network_host(context)
+        network_topic = yield self._get_network_topic(context)
         # TODO: Get the real security group of launch in here
         security_group = "default"
         for num in range(int(kwargs['max_count'])):
             vpn = False
             if image_id  == FLAGS.vpn_image_id:
                 vpn = True
-            allocate_result = yield rpc.call(network_host,
+            allocate_result = yield rpc.call(network_topic,
                      {"method": "allocate_fixed_ip",
                       "args": {"user_id": context.user.id,
                                "project_id": context.project.id,
@@ -599,7 +599,7 @@ class CloudController(object):
     @defer.inlineCallbacks
     def terminate_instances(self, context, instance_id, **kwargs):
         logging.debug("Going to start terminating instances")
-        network_host = yield self._get_network_host(context)
+        network_topic = yield self._get_network_topic(context)
         for i in instance_id:
             logging.debug("Going to try and terminate %s" % i)
             try:
@@ -614,7 +614,7 @@ class CloudController(object):
                 # NOTE(vish): Right now we don't really care if the ip is
                 #             disassociated.  We may need to worry about
                 #             checking this later.  Perhaps in the scheduler?
-                rpc.cast(network_host,
+                rpc.cast(network_topic,
                          {"method": "disassociate_elastic_ip",
                           "args": {"elastic_ip": elastic_ip}})
 
@@ -624,7 +624,7 @@ class CloudController(object):
                 # NOTE(vish): Right now we don't really care if the ip is
                 #             actually removed.  We may need to worry about
                 #             checking this later.  Perhaps in the scheduler?
-                rpc.cast(network_host,
+                rpc.cast(network_topic,
                          {"method": "deallocate_fixed_ip",
                           "args": {"elastic_ip": elastic_ip}})
 
diff --git a/nova/network/service.py b/nova/network/service.py
index e998c557516c..1f36abc2bcfe 100644
--- a/nova/network/service.py
+++ b/nova/network/service.py
@@ -55,7 +55,7 @@ flags.DEFINE_string('flat_network_dns', '8.8.4.4',
 
 def get_host_for_project(project_id):
     redis = datastore.Redis.instance()
-    host = redis.get(_host_key(project_id))
+    return redis.get(_host_key(project_id))
 
 def _host_key(project_id):
     return "network_host:%s" % project_id

From 148f319759fc9f566e0e9020ceb8ea00081ff8c8 Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Wed, 4 Aug 2010 12:48:16 -0700
Subject: [PATCH 20/36] fixes in get public address and extra references to
 self.network

---
 nova/endpoint/cloud.py | 18 ++++++++++--------
 nova/network/model.py  | 12 ++++++------
 2 files changed, 16 insertions(+), 14 deletions(-)

diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py
index 33be0a612a49..957b25b9bad5 100644
--- a/nova/endpoint/cloud.py
+++ b/nova/endpoint/cloud.py
@@ -39,6 +39,7 @@ from nova.compute import model
 from nova.compute.instance_types import INSTANCE_TYPES
 from nova.endpoint import images
 from nova.network import service as network_service
+from nova.network import model as network_model
 from nova.volume import service
 
 
@@ -307,7 +308,7 @@ class CloudController(object):
 
     def _get_address(self, context, public_ip):
         # FIXME(vish) this should move into network.py
-        address = self.network.get_host(public_ip)
+        address = self.network_model.PublicAddress.lookup(public_ip)
         if address and (context.user.is_admin() or address['project_id'] == context.project.id):
             return address
         raise exception.NotFound("Address at ip %s not found" % public_ip)
@@ -416,7 +417,7 @@ class CloudController(object):
                 'code': instance.get('state', 0),
                 'name': instance.get('state_description', 'pending')
             }
-            i['public_dns_name'] = self.network.get_public_ip_for_instance(
+            i['public_dns_name'] = self.network_model.get_public_ip_for_instance(
                                                         i['instance_id'])
             i['private_dns_name'] = instance.get('private_dns_name', None)
             if not i['public_dns_name']:
@@ -451,7 +452,7 @@ class CloudController(object):
 
     def format_addresses(self, context):
         addresses = []
-        for address in self.network.host_objs:
+        for address in self.network_model.PublicAddress.all():
             # TODO(vish): implement a by_project iterator for addresses
             if (context.user.is_admin() or
                 address['project_id'] == self.project.id):
@@ -520,7 +521,7 @@ class CloudController(object):
                                      "args": {"user_id": context.user.id,
                                               "project_id": context.project.id}})
             host = result['result']
-            defer.returnValue('%s.%s' %(FLAGS.network_topic, host))
+        defer.returnValue('%s.%s' %(FLAGS.network_topic, host))
 
     @rbac.allow('projectmanager', 'sysadmin')
     @defer.inlineCallbacks
@@ -608,9 +609,10 @@ class CloudController(object):
                 logging.warning("Instance %s was not found during terminate"
                                 % i)
                 continue
-            elastic_ip = instance.get('public_dns_name', None)
-            if elastic_ip:
-                logging.debug("Deallocating address %s" % elastic_ip)
+            address = self.network_model.get_public_ip_for_instance(i)
+            if address:
+                elastic_ip = address['public_ip']
+                logging.debug("Disassociating address %s" % elastic_ip)
                 # NOTE(vish): Right now we don't really care if the ip is
                 #             disassociated.  We may need to worry about
                 #             checking this later.  Perhaps in the scheduler?
@@ -626,7 +628,7 @@ class CloudController(object):
                 #             checking this later.  Perhaps in the scheduler?
                 rpc.cast(network_topic,
                          {"method": "deallocate_fixed_ip",
-                          "args": {"elastic_ip": elastic_ip}})
+                          "args": {"fixed_ip": fixed_ip}})
 
             if instance.get('node_name', 'unassigned') != 'unassigned':
                 # NOTE(joshua?): It's also internal default
diff --git a/nova/network/model.py b/nova/network/model.py
index 5346549b88bb..7f0fded4c525 100644
--- a/nova/network/model.py
+++ b/nova/network/model.py
@@ -399,12 +399,6 @@ class PublicNetworkController(BaseNetwork):
         for address in self.assigned:
             yield PublicAddress(address)
 
-    def get_public_ip_for_instance(self, instance_id):
-        # FIXME: this should be a lookup - iteration won't scale
-        for address_record in self.host_objs:
-            if address_record.get('instance_id', 'available') == instance_id:
-                return address_record['address']
-
     def get_host(self, host):
         if host in self.assigned:
             return PublicAddress(host)
@@ -547,3 +541,9 @@ def get_network_by_interface(iface, security_group='default'):
 
 
 
+def get_public_ip_for_instance(self, instance_id):
+    # FIXME: this should be a lookup - iteration won't scale
+    for address_record in PublicAddress.all():
+        if address_record.get('instance_id', 'available') == instance_id:
+            return address_record['address']
+

From bfe90c9c26a0c477386f3143c1e9f0563b6a1a97 Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Wed, 4 Aug 2010 12:51:05 -0700
Subject: [PATCH 21/36] reference to self.project instead of context.project +
 self.network_model instead of network_model

---
 nova/endpoint/cloud.py | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py
index 957b25b9bad5..338a52214f24 100644
--- a/nova/endpoint/cloud.py
+++ b/nova/endpoint/cloud.py
@@ -308,7 +308,7 @@ class CloudController(object):
 
     def _get_address(self, context, public_ip):
         # FIXME(vish) this should move into network.py
-        address = self.network_model.PublicAddress.lookup(public_ip)
+        address = network_model.PublicAddress.lookup(public_ip)
         if address and (context.user.is_admin() or address['project_id'] == context.project.id):
             return address
         raise exception.NotFound("Address at ip %s not found" % public_ip)
@@ -417,7 +417,7 @@ class CloudController(object):
                 'code': instance.get('state', 0),
                 'name': instance.get('state_description', 'pending')
             }
-            i['public_dns_name'] = self.network_model.get_public_ip_for_instance(
+            i['public_dns_name'] = network_model.get_public_ip_for_instance(
                                                         i['instance_id'])
             i['private_dns_name'] = instance.get('private_dns_name', None)
             if not i['public_dns_name']:
@@ -452,10 +452,10 @@ class CloudController(object):
 
     def format_addresses(self, context):
         addresses = []
-        for address in self.network_model.PublicAddress.all():
+        for address in network_model.PublicAddress.all():
             # TODO(vish): implement a by_project iterator for addresses
             if (context.user.is_admin() or
-                address['project_id'] == self.project.id):
+                address['project_id'] == context.project.id):
                 address_rv = {
                     'public_ip': address['address'],
                     'instance_id' : address.get('instance_id', 'free')
@@ -609,7 +609,7 @@ class CloudController(object):
                 logging.warning("Instance %s was not found during terminate"
                                 % i)
                 continue
-            address = self.network_model.get_public_ip_for_instance(i)
+            address = network_model.get_public_ip_for_instance(i)
             if address:
                 elastic_ip = address['public_ip']
                 logging.debug("Disassociating address %s" % elastic_ip)

From 4f6d71411ca23a4f92654f000e24fe008f0a00da Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Wed, 4 Aug 2010 12:54:21 -0700
Subject: [PATCH 22/36] use iteritems

---
 nova/endpoint/cloud.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py
index 338a52214f24..789663899f1b 100644
--- a/nova/endpoint/cloud.py
+++ b/nova/endpoint/cloud.py
@@ -584,7 +584,7 @@ class CloudController(object):
             inst['project_id'] = context.project.id
             inst['ami_launch_index'] = num
             inst['security_group'] = security_group
-            for (key, value) in allocate_data:
+            for (key, value) in allocate_data.iteritems():
                 inst[key] = value
 
             inst.save()

From a0eb1b9cc2e33c1a90501daeb3776738689e328f Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Wed, 4 Aug 2010 14:08:23 -0700
Subject: [PATCH 23/36] fix extra reference, method passing to network, various
 errors in elastic_ips

---
 nova/endpoint/cloud.py  | 12 +++++++-----
 nova/network/model.py   |  6 +++++-
 nova/network/service.py |  7 +++++--
 3 files changed, 17 insertions(+), 8 deletions(-)

diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py
index 789663899f1b..1695e4b050c3 100644
--- a/nova/endpoint/cloud.py
+++ b/nova/endpoint/cloud.py
@@ -473,8 +473,10 @@ class CloudController(object):
     @defer.inlineCallbacks
     def allocate_address(self, context, **kwargs):
         network_topic = yield self._get_network_topic(context)
-        alloc_result = rpc.call(network_topic,
-                         {"method": "allocate_elastic_ip"})
+        alloc_result = yield rpc.call(network_topic,
+                         {"method": "allocate_elastic_ip",
+                          "args": {"user_id": context.user.id,
+                                   "project_id": context.project.id}})
         public_ip = alloc_result['result']
         defer.returnValue({'addressSet': [{'publicIp' : public_ip}]})
 
@@ -496,7 +498,7 @@ class CloudController(object):
         network_topic = yield self._get_network_topic(context)
         rpc.cast(network_topic,
                          {"method": "associate_elastic_ip",
-                          "args": {"elastic_ip": address['public_ip'],
+                          "args": {"elastic_ip": address['address'],
                                    "fixed_ip": instance['private_dns_name'],
                                    "instance_id": instance['instance_id']}})
         defer.returnValue({'associateResponse': ["Address associated."]})
@@ -507,8 +509,8 @@ class CloudController(object):
         address = self._get_address(context, public_ip)
         network_topic = yield self._get_network_topic(context)
         rpc.cast(network_topic,
-                         {"method": "associate_elastic_ip",
-                          "args": {"elastic_ip": address['public_ip']}})
+                         {"method": "disassociate_elastic_ip",
+                          "args": {"elastic_ip": address['address']}})
         defer.returnValue({'disassociateResponse': ["Address disassociated."]})
 
     @defer.inlineCallbacks
diff --git a/nova/network/model.py b/nova/network/model.py
index 7f0fded4c525..a1eaf575334f 100644
--- a/nova/network/model.py
+++ b/nova/network/model.py
@@ -412,6 +412,10 @@ class PublicNetworkController(BaseNetwork):
         PublicAddress(host).destroy()
         datastore.Redis.instance().hdel(self._hosts_key, host)
 
+    def deallocate_ip(self, ip_str):
+        # NOTE(vish): cleanup is now done on release by the parent class
+	self.release_ip(ip_str)
+
     def associate_address(self, public_ip, private_ip, instance_id):
         if not public_ip in self.assigned:
             raise exception.AddressNotAllocated()
@@ -541,7 +545,7 @@ def get_network_by_interface(iface, security_group='default'):
 
 
 
-def get_public_ip_for_instance(self, instance_id):
+def get_public_ip_for_instance(instance_id):
     # FIXME: this should be a lookup - iteration won't scale
     for address_record in PublicAddress.all():
         if address_record.get('instance_id', 'available') == instance_id:
diff --git a/nova/network/service.py b/nova/network/service.py
index 1f36abc2bcfe..bf4aa00738a8 100644
--- a/nova/network/service.py
+++ b/nova/network/service.py
@@ -88,9 +88,12 @@ class BaseNetworkService(service.Service):
         """Subclass implements return of ip to the pool"""
         raise NotImplementedError()
 
-    def allocate_elastic_ip(self):
+    def allocate_elastic_ip(self, user_id, project_id):
         """Gets a elastic ip from the pool"""
-        return self.network.allocate_ip()
+        # NOTE(vish): Replicating earlier decision to use 'public' as
+        #             mac address name, although this should probably
+        #             be done inside of the PublicNetworkController
+        return self.network.allocate_ip(user_id, project_id, 'public')
 
     def associate_elastic_ip(self, elastic_ip, fixed_ip, instance_id):
         """Associates an elastic ip to a fixed ip"""

From 9a038d2b81163d3e658e4fb3be4f8c14aa3b5fab Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Wed, 4 Aug 2010 15:44:23 -0700
Subject: [PATCH 24/36] fixed tests, moved compute network config call, added
 notes, made inject option into a boolean

---
 bin/nova-network               |  7 +---
 nova/compute/service.py        | 19 +++++----
 nova/network/service.py        | 75 ++++++++++++++++++++++++++--------
 nova/tests/network_unittest.py | 18 ++++----
 nova/virt/libvirt_conn.py      |  2 +-
 5 files changed, 81 insertions(+), 40 deletions(-)

diff --git a/bin/nova-network b/bin/nova-network
index 1d620b5254e2..ba9063f561fd 100755
--- a/bin/nova-network
+++ b/bin/nova-network
@@ -23,7 +23,6 @@
 
 from nova import flags
 from nova import twistd
-from nova import utils
 
 from nova.network import service
 
@@ -34,8 +33,4 @@ if __name__ == '__main__':
     twistd.serve(__file__)
 
 if __name__ == '__builtin__':
-    t = FLAGS.network_type
-    if t == 'flat':
-        application = service.FlatNetworkService.create()
-    elif t == 'vlan':
-        application = service.VlanNetworkService.create()
+    application = service.type_to_class(FLAGS.network_type).create()
diff --git a/nova/compute/service.py b/nova/compute/service.py
index a22240b052e4..820116453d43 100644
--- a/nova/compute/service.py
+++ b/nova/compute/service.py
@@ -41,7 +41,7 @@ from nova.compute import disk
 from nova.compute import model
 from nova.compute import power_state
 from nova.compute.instance_types import INSTANCE_TYPES
-from nova.network import model as net_model
+from nova.network import service as network_service
 from nova.objectstore import image # for image_path flag
 from nova.virt import connection as virt_connection
 from nova.volume import service as volume_service
@@ -117,12 +117,17 @@ class ComputeService(service.Service):
         """ launch a new instance with specified options """
         logging.debug("Starting instance %s..." % (instance_id))
         inst = self.instdir.get(instance_id)
-        if inst.get('network_type', 'dhcp') == 'dhcp':
-            # TODO: Get the real security group of launch in here
-            security_group = "default"
-            net = net_model.BridgedNetwork.get_network_for_project(inst['user_id'],
-                                                             inst['project_id'],
-                                            security_group).express()
+        # TODO: Get the real security group of launch in here
+        security_group = "default"
+        # NOTE(vish): passing network type allows us to express the
+        #             network without making a call to network to find
+        #             out which type of network to setup
+        network_service.setup_compute_network(
+                                           inst.get('network_type', 'vlan'),
+                                           inst['user_id'],
+                                           inst['project_id'],
+                                           security_group)
+
         inst['node_name'] = FLAGS.node_name
         inst.save()
         # TODO(vish) check to make sure the availability zone matches
diff --git a/nova/network/service.py b/nova/network/service.py
index bf4aa00738a8..57b8bbb78b11 100644
--- a/nova/network/service.py
+++ b/nova/network/service.py
@@ -20,8 +20,6 @@
 Network Nodes are responsible for allocating ips and setting up network
 """
 
-import logging
-
 from twisted.internet import defer
 
 from nova import datastore
@@ -29,6 +27,7 @@ from nova import flags
 from nova import service
 from nova import utils
 from nova.auth import manager
+from nova.exception import NotFound
 from nova.network import exception
 from nova.network import model
 
@@ -53,10 +52,24 @@ flags.DEFINE_string('flat_network_broadcast', '192.168.0.255',
 flags.DEFINE_string('flat_network_dns', '8.8.4.4',
                        'Dns for simple network')
 
+def type_to_class(network_type):
+    if network_type == 'flat':
+        return FlatNetworkService
+    elif network_type  == 'vlan':
+        return VlanNetworkService
+    raise NotFound("Couldn't find %s network type" % network_type)
+
+
+def setup_compute_network(network_type, user_id, project_id, security_group):
+    srv = type_to_class(network_type)
+    srv.setup_compute_network(network_type, user_id, project_id, security_group)
+
+
 def get_host_for_project(project_id):
     redis = datastore.Redis.instance()
     return redis.get(_host_key(project_id))
 
+
 def _host_key(project_id):
     return "network_host:%s" % project_id
 
@@ -88,6 +101,12 @@ class BaseNetworkService(service.Service):
         """Subclass implements return of ip to the pool"""
         raise NotImplementedError()
 
+    @classmethod
+    def setup_compute_network(self, user_id, project_id, security_group,
+                              *args, **kwargs):
+        """Sets up matching network for compute hosts"""
+        raise NotImplementedError()
+
     def allocate_elastic_ip(self, user_id, project_id):
         """Gets a elastic ip from the pool"""
         # NOTE(vish): Replicating earlier decision to use 'public' as
@@ -111,6 +130,11 @@ class BaseNetworkService(service.Service):
 class FlatNetworkService(BaseNetworkService):
     """Basic network where no vlans are used"""
 
+    @classmethod
+    def setup_compute_network(self, fixed_ip, *args, **kwargs):
+        """Network is created manually"""
+        pass
+
     def allocate_fixed_ip(self, user_id, project_id,
                           security_group='default',
                           *args, **kwargs):
@@ -118,6 +142,9 @@ class FlatNetworkService(BaseNetworkService):
 
         Flat network just grabs the next available ip from the pool
         """
+        # NOTE(vish): Some automation could be done here.  For example,
+        #             creating the flat_network_bridge and setting up
+        #             a gateway.  This is all done manually atm
         redis = datastore.Redis.instance()
         if not redis.exists('ips') and not len(redis.keys('instances:*')):
             for fixed_ip in FLAGS.flat_network_ips:
@@ -125,15 +152,16 @@ class FlatNetworkService(BaseNetworkService):
         fixed_ip = redis.spop('ips')
         if not fixed_ip:
             raise exception.NoMoreAddresses()
-        return defer.succeed({'network_type': 'injected',
-                'mac_address': utils.generate_mac(),
-                'private_dns_name': str(fixed_ip),
-                'bridge_name': FLAGS.flat_network_bridge,
-                'network_network': FLAGS.flat_network_network,
-                'network_netmask': FLAGS.flat_network_netmask,
-                'network_gateway': FLAGS.flat_network_gateway,
-                'network_broadcast': FLAGS.flat_network_broadcast,
-                'network_dns': FLAGS.flat_network_dns})
+        return defer.succeed({'inject_network': True,
+                            'network_type': FLAGS.network_type,
+                            'mac_address': utils.generate_mac(),
+                            'private_dns_name': str(fixed_ip),
+                            'bridge_name': FLAGS.flat_network_bridge,
+                            'network_network': FLAGS.flat_network_network,
+                            'network_netmask': FLAGS.flat_network_netmask,
+                            'network_gateway': FLAGS.flat_network_gateway,
+                            'network_broadcast': FLAGS.flat_network_broadcast,
+                            'network_dns': FLAGS.flat_network_dns})
 
     def deallocate_fixed_ip(self, fixed_ip, *args, **kwargs):
         """Returns an ip to the pool"""
@@ -141,7 +169,10 @@ class FlatNetworkService(BaseNetworkService):
 
 class VlanNetworkService(BaseNetworkService):
     """Vlan network with dhcp"""
-
+    # NOTE(vish): A lot of the interactions with network/model.py can be
+    #             simplified and improved.  Also there it may be useful
+    #             to support vlans separately from dhcp, instead of having
+    #             both of them together in this class.
     def allocate_fixed_ip(self, user_id, project_id,
                           security_group='default',
                           vpn=False, *args, **kwargs):
@@ -152,10 +183,10 @@ class VlanNetworkService(BaseNetworkService):
             fixed_ip = net.allocate_vpn_ip(user_id, project_id, mac)
         else:
             fixed_ip = net.allocate_ip(user_id, project_id, mac)
-        return defer.succeed({'network_type': 'dhcp',
-                'bridge_name': net['bridge_name'],
-                'mac_address': mac,
-                'private_dns_name' : fixed_ip})
+        return defer.succeed({'network_type': FLAGS.network_type,
+                              'bridge_name': net['bridge_name'],
+                              'mac_address': mac,
+                              'private_dns_name' : fixed_ip})
 
     def deallocate_fixed_ip(self, fixed_ip,
                             *args, **kwargs):
@@ -173,4 +204,14 @@ class VlanNetworkService(BaseNetworkService):
         for project in manager.AuthManager().get_projects():
             model.get_project_network(project.id).express()
 
-
+    @classmethod
+    def setup_compute_network(self, user_id, project_id, security_group,
+                              *args, **kwargs):
+        """Sets up matching network for compute hosts"""
+        # NOTE(vish): Use BridgedNetwork instead of DHCPNetwork because
+        #             we don't want to run dnsmasq on the client machines
+        net = model.BridgedNetwork.get_network_for_project(
+                                            user_id,
+                                            project_id,
+                                            security_group)
+        net.express()
diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py
index a9695f818934..42cae327fbb2 100644
--- a/nova/tests/network_unittest.py
+++ b/nova/tests/network_unittest.py
@@ -69,7 +69,7 @@ class NetworkTestCase(test.TrialTestCase):
         self.assertTrue(IPy.IP(address) in self.network.network)
 
     def test_allocate_deallocate_fixed_ip(self):
-        result  = self.service.allocate_fixed_ip(
+        result  = yield self.service.allocate_fixed_ip(
                 self.user.id, self.projects[0].id)
         address = result['private_dns_name']
         mac = result['mac_address']
@@ -88,11 +88,11 @@ class NetworkTestCase(test.TrialTestCase):
 
     def test_range_allocation(self):
         hostname = "test-host"
-        result = self.service.allocate_fixed_ip(
+        result = yield self.service.allocate_fixed_ip(
                     self.user.id, self.projects[0].id)
         mac = result['mac_address']
         address = result['private_dns_name']
-        result = self.service.allocate_fixed_ip(
+        result = yield self.service.allocate_fixed_ip(
                 self.user, self.projects[1].id)
         secondmac = result['mac_address']
         secondaddress = result['private_dns_name']
@@ -122,21 +122,21 @@ class NetworkTestCase(test.TrialTestCase):
         self.assertEqual(False, is_in_project(secondaddress, self.projects[1].id))
 
     def test_subnet_edge(self):
-        result = self.service.allocate_fixed_ip(self.user.id,
+        result = yield self.service.allocate_fixed_ip(self.user.id,
                                                        self.projects[0].id)
         firstaddress = result['private_dns_name']
         hostname = "toomany-hosts"
         for i in range(1,5):
             project_id = self.projects[i].id
-            result = self.service.allocate_fixed_ip(
+            result = yield self.service.allocate_fixed_ip(
                     self.user, project_id)
             mac = result['mac_address']
             address = result['private_dns_name']
-            result = self.service.allocate_fixed_ip(
+            result = yield self.service.allocate_fixed_ip(
                     self.user, project_id)
             mac2 = result['mac_address']
             address2 = result['private_dns_name']
-            result = self.service.allocate_fixed_ip(
+            result = yield self.service.allocate_fixed_ip(
                    self.user, project_id)
             mac3 = result['mac_address']
             address3 = result['private_dns_name']
@@ -193,12 +193,12 @@ class NetworkTestCase(test.TrialTestCase):
         macs = {}
         addresses = {}
         for i in range(0, (num_available_ips - 1)):
-            result = self.service.allocate_fixed_ip(self.user.id, self.projects[0].id)
+            result = yield self.service.allocate_fixed_ip(self.user.id, self.projects[0].id)
             macs[i] = result['mac_address']
             addresses[i] = result['private_dns_name']
             self.dnsmasq.issue_ip(macs[i], addresses[i], hostname, net.bridge_name)
 
-        self.assertRaises(NoMoreAddresses, self.service.allocate_fixed_ip, self.user.id, self.projects[0].id)
+        self.assertFailure(self.service.allocate_fixed_ip(self.user.id, self.projects[0].id), NoMoreAddresses)
 
         for i in range(0, (num_available_ips - 1)):
             rv = self.service.deallocate_fixed_ip(addresses[i])
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 8133daf0b0e7..7c3f7a6e107b 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -204,7 +204,7 @@ class LibvirtConnection(object):
 
         key = data['key_data']
         net = None
-        if data.get('network_type', None) == 'injected':
+        if data.get('inject_network', False):
             with open(FLAGS.injected_network_template) as f:
                 net = f.read() % {'address': data['private_dns_name'],
                                   'network': data['network_network'],

From de456585b67f3eb46bcae5869af4ac83c6d95908 Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Wed, 4 Aug 2010 15:53:24 -0700
Subject: [PATCH 25/36] fix error on terminate instance relating to elastic ip

---
 nova/endpoint/cloud.py | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py
index 1695e4b050c3..349007efab90 100644
--- a/nova/endpoint/cloud.py
+++ b/nova/endpoint/cloud.py
@@ -611,9 +611,8 @@ class CloudController(object):
                 logging.warning("Instance %s was not found during terminate"
                                 % i)
                 continue
-            address = network_model.get_public_ip_for_instance(i)
-            if address:
-                elastic_ip = address['public_ip']
+            elastic_ip = network_model.get_public_ip_for_instance(i)
+            if elastic_ip:
                 logging.debug("Disassociating address %s" % elastic_ip)
                 # NOTE(vish): Right now we don't really care if the ip is
                 #             disassociated.  We may need to worry about

From aa84936cb63cd1913a1640944a9353d018ace13f Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Wed, 4 Aug 2010 16:51:48 -0700
Subject: [PATCH 26/36] fix rpc command line call, remove useless deferreds

---
 nova/network/service.py | 34 ++++++++++++++++------------------
 nova/rpc.py             |  4 ++--
 2 files changed, 18 insertions(+), 20 deletions(-)

diff --git a/nova/network/service.py b/nova/network/service.py
index 57b8bbb78b11..873e316305e9 100644
--- a/nova/network/service.py
+++ b/nova/network/service.py
@@ -20,8 +20,6 @@
 Network Nodes are responsible for allocating ips and setting up network
 """
 
-from twisted.internet import defer
-
 from nova import datastore
 from nova import flags
 from nova import service
@@ -87,9 +85,9 @@ class BaseNetworkService(service.Service):
         redis = datastore.Redis.instance()
         key = _host_key(project_id)
         if redis.setnx(key, FLAGS.node_name):
-            return defer.succeed(FLAGS.node_name)
+            return FLAGS.node_name
         else:
-            return defer.succeed(redis.get(key))
+            return redis.get(key)
 
     def allocate_fixed_ip(self, user_id, project_id,
                           security_group='default',
@@ -152,16 +150,16 @@ class FlatNetworkService(BaseNetworkService):
         fixed_ip = redis.spop('ips')
         if not fixed_ip:
             raise exception.NoMoreAddresses()
-        return defer.succeed({'inject_network': True,
-                            'network_type': FLAGS.network_type,
-                            'mac_address': utils.generate_mac(),
-                            'private_dns_name': str(fixed_ip),
-                            'bridge_name': FLAGS.flat_network_bridge,
-                            'network_network': FLAGS.flat_network_network,
-                            'network_netmask': FLAGS.flat_network_netmask,
-                            'network_gateway': FLAGS.flat_network_gateway,
-                            'network_broadcast': FLAGS.flat_network_broadcast,
-                            'network_dns': FLAGS.flat_network_dns})
+        return {'inject_network': True,
+                'network_type': FLAGS.network_type,
+                'mac_address': utils.generate_mac(),
+                'private_dns_name': str(fixed_ip),
+                'bridge_name': FLAGS.flat_network_bridge,
+                'network_network': FLAGS.flat_network_network,
+                'network_netmask': FLAGS.flat_network_netmask,
+                'network_gateway': FLAGS.flat_network_gateway,
+                'network_broadcast': FLAGS.flat_network_broadcast,
+                'network_dns': FLAGS.flat_network_dns}
 
     def deallocate_fixed_ip(self, fixed_ip, *args, **kwargs):
         """Returns an ip to the pool"""
@@ -183,10 +181,10 @@ class VlanNetworkService(BaseNetworkService):
             fixed_ip = net.allocate_vpn_ip(user_id, project_id, mac)
         else:
             fixed_ip = net.allocate_ip(user_id, project_id, mac)
-        return defer.succeed({'network_type': FLAGS.network_type,
-                              'bridge_name': net['bridge_name'],
-                              'mac_address': mac,
-                              'private_dns_name' : fixed_ip})
+        return {'network_type': FLAGS.network_type,
+                'bridge_name': net['bridge_name'],
+                'mac_address': mac,
+                'private_dns_name' : fixed_ip}
 
     def deallocate_fixed_ip(self, fixed_ip,
                             *args, **kwargs):
diff --git a/nova/rpc.py b/nova/rpc.py
index ebf140d92e86..2a550c3ae25a 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -238,12 +238,12 @@ def send_message(topic, message, wait=True):
                                       exchange=msg_id,
                                       auto_delete=True,
                                       exchange_type="direct",
-                                      routing_key=msg_id,
-                                      durable=False)
+                                      routing_key=msg_id)
         consumer.register_callback(generic_response)
 
     publisher = messaging.Publisher(connection=Connection.instance(),
                                     exchange=FLAGS.control_exchange,
+                                    durable=False,
                                     exchange_type="topic",
                                     routing_key=topic)
     publisher.send(message)

From 6b70951e5b7cb8cabe5d6eb50fce7ae0a6e55d52 Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Wed, 4 Aug 2010 17:40:16 -0700
Subject: [PATCH 27/36] renamed Vpn to NetworkData, moved the creation of data
 to inside network

---
 nova/auth/manager.py    | 100 ++--------------------------------------
 nova/network/model.py   |  90 ++++++++++++++++++++++++++++++++++++
 nova/network/service.py |  20 ++++++--
 3 files changed, 112 insertions(+), 98 deletions(-)

diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index 2da53a7366da..dacdeb383ba5 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -37,6 +37,7 @@ from nova import objectstore # for flags
 from nova import utils
 from nova.auth import ldapdriver # for flags
 from nova.auth import signer
+from nova.network import model
 
 FLAGS = flags.FLAGS
 
@@ -51,7 +52,6 @@ flags.DEFINE_list('global_roles', ['cloudadmin', 'itsec'],
                   'Roles that apply to all projects')
 
 
-flags.DEFINE_bool('use_vpn', True, 'Support per-project vpns')
 flags.DEFINE_string('credentials_template',
                     utils.abspath('auth/novarc.template'),
                     'Template for creating users rc file')
@@ -65,19 +65,11 @@ flags.DEFINE_string('credential_cert_file', 'cert.pem',
 flags.DEFINE_string('credential_rc_file', 'novarc',
                     'Filename of rc in credentials zip')
 
-flags.DEFINE_integer('vpn_start_port', 1000,
-                    'Start port for the cloudpipe VPN servers')
-flags.DEFINE_integer('vpn_end_port', 2000,
-                    'End port for the cloudpipe VPN servers')
-
 flags.DEFINE_string('credential_cert_subject',
                     '/C=US/ST=California/L=MountainView/O=AnsoLabs/'
                     'OU=NovaDev/CN=%s-%s',
                     'Subject for certificate for users')
 
-flags.DEFINE_string('vpn_ip', '127.0.0.1',
-                    'Public IP for the cloudpipe VPN servers')
-
 flags.DEFINE_string('auth_driver', 'nova.auth.ldapdriver.FakeLdapDriver',
                     'Driver that auth manager uses')
 
@@ -229,86 +221,6 @@ class Project(AuthBase):
                                                         self.member_ids)
 
 
-class NoMorePorts(exception.Error):
-    pass
-
-
-class Vpn(datastore.BasicModel):
-    """Manages vpn ips and ports for projects"""
-    def __init__(self, project_id):
-        self.project_id = project_id
-        super(Vpn, self).__init__()
-
-    @property
-    def identifier(self):
-        """Identifier used for key in redis"""
-        return self.project_id
-
-    @classmethod
-    def create(cls, project_id):
-        """Creates a vpn for project
-
-        This method finds a free ip and port and stores the associated
-        values in the datastore.
-        """
-        # TODO(vish): get list of vpn ips from redis
-        port = cls.find_free_port_for_ip(FLAGS.vpn_ip)
-        vpn = cls(project_id)
-        # save ip for project
-        vpn['project'] = project_id
-        vpn['ip'] = FLAGS.vpn_ip
-        vpn['port'] = port
-        vpn.save()
-        return vpn
-
-    @classmethod
-    def find_free_port_for_ip(cls, ip):
-        """Finds a free port for a given ip from the redis set"""
-        # TODO(vish): these redis commands should be generalized and
-        #             placed into a base class. Conceptually, it is
-        #             similar to an association, but we are just
-        #             storing a set of values instead of keys that
-        #             should be turned into objects.
-        redis = datastore.Redis.instance()
-        key = 'ip:%s:ports' % ip
-        # TODO(vish): these ports should be allocated through an admin
-        #             command instead of a flag
-        if (not redis.exists(key) and
-            not redis.exists(cls._redis_association_name('ip', ip))):
-            for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1):
-                redis.sadd(key, i)
-
-        port = redis.spop(key)
-        if not port:
-            raise NoMorePorts()
-        return port
-
-    @classmethod
-    def num_ports_for_ip(cls, ip):
-        """Calculates the number of free ports for a given ip"""
-        return datastore.Redis.instance().scard('ip:%s:ports' % ip)
-
-    @property
-    def ip(self):
-        """The ip assigned to the project"""
-        return self['ip']
-
-    @property
-    def port(self):
-        """The port assigned to the project"""
-        return int(self['port'])
-
-    def save(self):
-        """Saves the association to the given ip"""
-        self.associate_with('ip', self.ip)
-        super(Vpn, self).save()
-
-    def destroy(self):
-        """Cleans up datastore and adds port back to pool"""
-        self.unassociate_with('ip', self.ip)
-        datastore.Redis.instance().sadd('ip:%s:ports' % self.ip, self.port)
-        super(Vpn, self).destroy()
-
 
 class AuthManager(object):
     """Manager Singleton for dealing with Users, Projects, and Keypairs
@@ -581,8 +493,6 @@ class AuthManager(object):
                                                description,
                                                member_users)
             if project_dict:
-                if FLAGS.use_vpn:
-                    Vpn.create(project_dict['id'])
                 return Project(**project_dict)
 
     def add_to_project(self, user, project):
@@ -619,10 +529,10 @@ class AuthManager(object):
         @return: A tuple containing (ip, port) or None, None if vpn has
         not been allocated for user.
         """
-        vpn = Vpn.lookup(Project.safe_id(project))
-        if not vpn:
-            return None, None
-        return (vpn.ip, vpn.port)
+        network_data = model.NetworkData.lookup(Project.safe_id(project))
+        if not network_data:
+            raise exception.NotFound('project network data has not been set')
+        return (network_data.ip, network_data.port)
 
     def delete_project(self, project):
         """Deletes a project"""
diff --git a/nova/network/model.py b/nova/network/model.py
index a1eaf575334f..b065d174e8aa 100644
--- a/nova/network/model.py
+++ b/nova/network/model.py
@@ -53,6 +53,13 @@ flags.DEFINE_integer('cnt_vpn_clients', 5,
 flags.DEFINE_integer('cloudpipe_start_port', 12000,
                         'Starting port for mapped CloudPipe external ports')
 
+flags.DEFINE_string('vpn_ip', utils.get_my_ip(),
+                    'Public IP for the cloudpipe VPN servers')
+flags.DEFINE_integer('vpn_start_port', 1000,
+                    'Start port for the cloudpipe VPN servers')
+flags.DEFINE_integer('vpn_end_port', 2000,
+                    'End port for the cloudpipe VPN servers')
+
 logging.getLogger().setLevel(logging.DEBUG)
 
 
@@ -373,6 +380,89 @@ class PublicAddress(datastore.BasicModel):
         addr.save()
         return addr
 
+
+class NoMorePorts(exception.Error):
+    pass
+
+
+class NetworkData(datastore.BasicModel):
+    """Manages network host, and vpn ip and port for projects"""
+    def __init__(self, project_id):
+        self.project_id = project_id
+        super(NetworkData, self).__init__()
+
+    @property
+    def identifier(self):
+        """Identifier used for key in redis"""
+        return self.project_id
+
+    @classmethod
+    def create(cls, project_id):
+        """Creates a vpn for project
+
+        This method finds a free ip and port and stores the associated
+        values in the datastore.
+        """
+        # TODO(vish): will we ever need multiiple ips per host?
+        port = cls.find_free_port_for_ip(FLAGS.vpn_ip)
+        network_data = cls(project_id)
+        # save ip for project
+        network_data['host'] = FLAGS.node_name
+        network_data['project'] = project_id
+        network_data['ip'] = FLAGS.vpn_ip
+        network_data['port'] = port
+        network_data.save()
+        return network_data
+
+    @classmethod
+    def find_free_port_for_ip(cls, ip):
+        """Finds a free port for a given ip from the redis set"""
+        # TODO(vish): these redis commands should be generalized and
+        #             placed into a base class. Conceptually, it is
+        #             similar to an association, but we are just
+        #             storing a set of values instead of keys that
+        #             should be turned into objects.
+        redis = datastore.Redis.instance()
+        key = 'ip:%s:ports' % ip
+        # TODO(vish): these ports should be allocated through an admin
+        #             command instead of a flag
+        if (not redis.exists(key) and
+            not redis.exists(cls._redis_association_name('ip', ip))):
+            for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1):
+                redis.sadd(key, i)
+
+        port = redis.spop(key)
+        if not port:
+            raise NoMorePorts()
+        return port
+
+    @classmethod
+    def num_ports_for_ip(cls, ip):
+        """Calculates the number of free ports for a given ip"""
+        return datastore.Redis.instance().scard('ip:%s:ports' % ip)
+
+    @property
+    def ip(self):
+        """The ip assigned to the project"""
+        return self['ip']
+
+    @property
+    def port(self):
+        """The port assigned to the project"""
+        return int(self['port'])
+
+    def save(self):
+        """Saves the association to the given ip"""
+        self.associate_with('ip', self.ip)
+        super(NetworkData, self).save()
+
+    def destroy(self):
+        """Cleans up datastore and adds port back to pool"""
+        self.unassociate_with('ip', self.ip)
+        datastore.Redis.instance().sadd('ip:%s:ports' % self.ip, self.port)
+        super(NetworkData, self).destroy()
+
+
 DEFAULT_PORTS = [("tcp",80), ("tcp",22), ("udp",1194), ("tcp",443)]
 class PublicNetworkController(BaseNetwork):
     override_type = 'network'
diff --git a/nova/network/service.py b/nova/network/service.py
index 873e316305e9..243f9a7200ca 100644
--- a/nova/network/service.py
+++ b/nova/network/service.py
@@ -80,11 +80,14 @@ class BaseNetworkService(service.Service):
     def __init__(self, *args, **kwargs):
         self.network = model.PublicNetworkController()
 
-    def get_network_host(self, user_id, project_id, *args, **kwargs):
+    def set_network_host(self, user_id, project_id, *args, **kwargs):
         """Safely becomes the host of the projects network"""
         redis = datastore.Redis.instance()
         key = _host_key(project_id)
         if redis.setnx(key, FLAGS.node_name):
+            self._on_set_network_host(user_id, project_id,
+                                      security_group='default',
+                                      *args, **kwargs)
             return FLAGS.node_name
         else:
             return redis.get(key)
@@ -99,6 +102,11 @@ class BaseNetworkService(service.Service):
         """Subclass implements return of ip to the pool"""
         raise NotImplementedError()
 
+    def _on_set_network_host(self, user_id, project_id,
+                              *args, **kwargs):
+        """Called when this host becomes the host for a project"""
+        pass
+
     @classmethod
     def setup_compute_network(self, user_id, project_id, security_group,
                               *args, **kwargs):
@@ -129,7 +137,8 @@ class FlatNetworkService(BaseNetworkService):
     """Basic network where no vlans are used"""
 
     @classmethod
-    def setup_compute_network(self, fixed_ip, *args, **kwargs):
+    def setup_compute_network(self, user_id, project_id, security_group,
+                              *args, **kwargs):
         """Network is created manually"""
         pass
 
@@ -198,10 +207,15 @@ class VlanNetworkService(BaseNetworkService):
         return model.get_network_by_address(address).release_ip(address)
 
     def restart_nets(self):
-        """ Ensure the network for each user is enabled"""
+        """Ensure the network for each user is enabled"""
         for project in manager.AuthManager().get_projects():
             model.get_project_network(project.id).express()
 
+    def _on_set_network_host(self, user_id, project_id,
+                             *args, **kwargs):
+        """Called when this host becomes the host for a project"""
+        model.NetworkData.create(project_id)
+
     @classmethod
     def setup_compute_network(self, user_id, project_id, security_group,
                               *args, **kwargs):

From c1e0abd5be3ac8473aaf255f77fb2357b5771ea9 Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Wed, 4 Aug 2010 18:00:13 -0700
Subject: [PATCH 28/36] fixed circular reference and tests

---
 nova/auth/manager.py           |   4 +-
 nova/network/model.py          |  89 -------------------------
 nova/network/networkdata.py    | 116 +++++++++++++++++++++++++++++++++
 nova/network/service.py        |   5 +-
 nova/tests/auth_unittest.py    |  14 ----
 nova/tests/network_unittest.py |  15 +++++
 6 files changed, 136 insertions(+), 107 deletions(-)
 create mode 100644 nova/network/networkdata.py

diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index dacdeb383ba5..463cfdf4a70b 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -37,7 +37,7 @@ from nova import objectstore # for flags
 from nova import utils
 from nova.auth import ldapdriver # for flags
 from nova.auth import signer
-from nova.network import model
+from nova.network import networkdata
 
 FLAGS = flags.FLAGS
 
@@ -529,7 +529,7 @@ class AuthManager(object):
         @return: A tuple containing (ip, port) or None, None if vpn has
         not been allocated for user.
         """
-        network_data = model.NetworkData.lookup(Project.safe_id(project))
+        network_data = networkdata.NetworkData.lookup(Project.safe_id(project))
         if not network_data:
             raise exception.NotFound('project network data has not been set')
         return (network_data.ip, network_data.port)
diff --git a/nova/network/model.py b/nova/network/model.py
index b065d174e8aa..daac035e42f8 100644
--- a/nova/network/model.py
+++ b/nova/network/model.py
@@ -53,13 +53,6 @@ flags.DEFINE_integer('cnt_vpn_clients', 5,
 flags.DEFINE_integer('cloudpipe_start_port', 12000,
                         'Starting port for mapped CloudPipe external ports')
 
-flags.DEFINE_string('vpn_ip', utils.get_my_ip(),
-                    'Public IP for the cloudpipe VPN servers')
-flags.DEFINE_integer('vpn_start_port', 1000,
-                    'Start port for the cloudpipe VPN servers')
-flags.DEFINE_integer('vpn_end_port', 2000,
-                    'End port for the cloudpipe VPN servers')
-
 logging.getLogger().setLevel(logging.DEBUG)
 
 
@@ -381,88 +374,6 @@ class PublicAddress(datastore.BasicModel):
         return addr
 
 
-class NoMorePorts(exception.Error):
-    pass
-
-
-class NetworkData(datastore.BasicModel):
-    """Manages network host, and vpn ip and port for projects"""
-    def __init__(self, project_id):
-        self.project_id = project_id
-        super(NetworkData, self).__init__()
-
-    @property
-    def identifier(self):
-        """Identifier used for key in redis"""
-        return self.project_id
-
-    @classmethod
-    def create(cls, project_id):
-        """Creates a vpn for project
-
-        This method finds a free ip and port and stores the associated
-        values in the datastore.
-        """
-        # TODO(vish): will we ever need multiiple ips per host?
-        port = cls.find_free_port_for_ip(FLAGS.vpn_ip)
-        network_data = cls(project_id)
-        # save ip for project
-        network_data['host'] = FLAGS.node_name
-        network_data['project'] = project_id
-        network_data['ip'] = FLAGS.vpn_ip
-        network_data['port'] = port
-        network_data.save()
-        return network_data
-
-    @classmethod
-    def find_free_port_for_ip(cls, ip):
-        """Finds a free port for a given ip from the redis set"""
-        # TODO(vish): these redis commands should be generalized and
-        #             placed into a base class. Conceptually, it is
-        #             similar to an association, but we are just
-        #             storing a set of values instead of keys that
-        #             should be turned into objects.
-        redis = datastore.Redis.instance()
-        key = 'ip:%s:ports' % ip
-        # TODO(vish): these ports should be allocated through an admin
-        #             command instead of a flag
-        if (not redis.exists(key) and
-            not redis.exists(cls._redis_association_name('ip', ip))):
-            for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1):
-                redis.sadd(key, i)
-
-        port = redis.spop(key)
-        if not port:
-            raise NoMorePorts()
-        return port
-
-    @classmethod
-    def num_ports_for_ip(cls, ip):
-        """Calculates the number of free ports for a given ip"""
-        return datastore.Redis.instance().scard('ip:%s:ports' % ip)
-
-    @property
-    def ip(self):
-        """The ip assigned to the project"""
-        return self['ip']
-
-    @property
-    def port(self):
-        """The port assigned to the project"""
-        return int(self['port'])
-
-    def save(self):
-        """Saves the association to the given ip"""
-        self.associate_with('ip', self.ip)
-        super(NetworkData, self).save()
-
-    def destroy(self):
-        """Cleans up datastore and adds port back to pool"""
-        self.unassociate_with('ip', self.ip)
-        datastore.Redis.instance().sadd('ip:%s:ports' % self.ip, self.port)
-        super(NetworkData, self).destroy()
-
-
 DEFAULT_PORTS = [("tcp",80), ("tcp",22), ("udp",1194), ("tcp",443)]
 class PublicNetworkController(BaseNetwork):
     override_type = 'network'
diff --git a/nova/network/networkdata.py b/nova/network/networkdata.py
new file mode 100644
index 000000000000..cec84287cdba
--- /dev/null
+++ b/nova/network/networkdata.py
@@ -0,0 +1,116 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""Network Data for projects"""
+
+from nova import datastore
+from nova import exception
+from nova import flags
+from nova import utils
+
+FLAGS = flags.FLAGS
+
+
+flags.DEFINE_string('vpn_ip', utils.get_my_ip(),
+                    'Public IP for the cloudpipe VPN servers')
+flags.DEFINE_integer('vpn_start_port', 1000,
+                    'Start port for the cloudpipe VPN servers')
+flags.DEFINE_integer('vpn_end_port', 2000,
+                    'End port for the cloudpipe VPN servers')
+
+class NoMorePorts(exception.Error):
+    pass
+
+
+class NetworkData(datastore.BasicModel):
+    """Manages network host, and vpn ip and port for projects"""
+    def __init__(self, project_id):
+        self.project_id = project_id
+        super(NetworkData, self).__init__()
+
+    @property
+    def identifier(self):
+        """Identifier used for key in redis"""
+        return self.project_id
+
+    @classmethod
+    def create(cls, project_id):
+        """Creates a vpn for project
+
+        This method finds a free ip and port and stores the associated
+        values in the datastore.
+        """
+        # TODO(vish): will we ever need multiiple ips per host?
+        port = cls.find_free_port_for_ip(FLAGS.vpn_ip)
+        network_data = cls(project_id)
+        # save ip for project
+        network_data['host'] = FLAGS.node_name
+        network_data['project'] = project_id
+        network_data['ip'] = FLAGS.vpn_ip
+        network_data['port'] = port
+        network_data.save()
+        return network_data
+
+    @classmethod
+    def find_free_port_for_ip(cls, ip):
+        """Finds a free port for a given ip from the redis set"""
+        # TODO(vish): these redis commands should be generalized and
+        #             placed into a base class. Conceptually, it is
+        #             similar to an association, but we are just
+        #             storing a set of values instead of keys that
+        #             should be turned into objects.
+        redis = datastore.Redis.instance()
+        key = 'ip:%s:ports' % ip
+        # TODO(vish): these ports should be allocated through an admin
+        #             command instead of a flag
+        if (not redis.exists(key) and
+            not redis.exists(cls._redis_association_name('ip', ip))):
+            for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1):
+                redis.sadd(key, i)
+
+        port = redis.spop(key)
+        if not port:
+            raise NoMorePorts()
+        return port
+
+    @classmethod
+    def num_ports_for_ip(cls, ip):
+        """Calculates the number of free ports for a given ip"""
+        return datastore.Redis.instance().scard('ip:%s:ports' % ip)
+
+    @property
+    def ip(self):
+        """The ip assigned to the project"""
+        return self['ip']
+
+    @property
+    def port(self):
+        """The port assigned to the project"""
+        return int(self['port'])
+
+    def save(self):
+        """Saves the association to the given ip"""
+        self.associate_with('ip', self.ip)
+        super(NetworkData, self).save()
+
+    def destroy(self):
+        """Cleans up datastore and adds port back to pool"""
+        self.unassociate_with('ip', self.ip)
+        datastore.Redis.instance().sadd('ip:%s:ports' % self.ip, self.port)
+        super(NetworkData, self).destroy()
+
diff --git a/nova/network/service.py b/nova/network/service.py
index 243f9a7200ca..afc20c0d5eb0 100644
--- a/nova/network/service.py
+++ b/nova/network/service.py
@@ -28,6 +28,7 @@ from nova.auth import manager
 from nova.exception import NotFound
 from nova.network import exception
 from nova.network import model
+from nova.network import networkdata
 
 FLAGS = flags.FLAGS
 
@@ -81,7 +82,7 @@ class BaseNetworkService(service.Service):
         self.network = model.PublicNetworkController()
 
     def set_network_host(self, user_id, project_id, *args, **kwargs):
-        """Safely becomes the host of the projects network"""
+        """Safely sets the host of the projects network"""
         redis = datastore.Redis.instance()
         key = _host_key(project_id)
         if redis.setnx(key, FLAGS.node_name):
@@ -214,7 +215,7 @@ class VlanNetworkService(BaseNetworkService):
     def _on_set_network_host(self, user_id, project_id,
                              *args, **kwargs):
         """Called when this host becomes the host for a project"""
-        model.NetworkData.create(project_id)
+        networkdata.NetworkData.create(project_id)
 
     @classmethod
     def setup_compute_network(self, user_id, project_id, security_group,
diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py
index 2167c2385a59..3bd0a432c122 100644
--- a/nova/tests/auth_unittest.py
+++ b/nova/tests/auth_unittest.py
@@ -179,20 +179,6 @@ class AuthTestCase(test.BaseTestCase):
         self.manager.remove_role('test1', 'sysadmin')
         self.assertFalse(project.has_role('test1', 'sysadmin'))
 
-    def test_212_vpn_ip_and_port_looks_valid(self):
-        project = self.manager.get_project('testproj')
-        self.assert_(project.vpn_ip)
-        self.assert_(project.vpn_port >= FLAGS.vpn_start_port)
-        self.assert_(project.vpn_port <= FLAGS.vpn_end_port)
-
-    def test_213_too_many_vpns(self):
-        vpns = []
-        for i in xrange(manager.Vpn.num_ports_for_ip(FLAGS.vpn_ip)):
-            vpns.append(manager.Vpn.create("vpnuser%s" % i))
-        self.assertRaises(manager.NoMorePorts, manager.Vpn.create, "boom")
-        for vpn in vpns:
-            vpn.destroy()
-
     def test_214_can_retrieve_project_by_user(self):
         project = self.manager.create_project('testproj2', 'test2', 'Another test project', ['test2'])
         self.assert_(len(self.manager.get_projects()) > 1)
diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py
index 42cae327fbb2..49147d4ec5e0 100644
--- a/nova/tests/network_unittest.py
+++ b/nova/tests/network_unittest.py
@@ -25,6 +25,7 @@ from nova import test
 from nova import utils
 from nova.auth import manager
 from nova.network import model
+from nova.network import networkdata
 from nova.network import service
 from nova.network.exception import NoMoreAddresses
 
@@ -154,6 +155,20 @@ class NetworkTestCase(test.TrialTestCase):
         rv = self.service.deallocate_fixed_ip(firstaddress)
         self.dnsmasq.release_ip(mac, firstaddress, hostname, net.bridge_name)
 
+    def test_212_vpn_ip_and_port_looks_valid(self):
+        networkdata.NetworkData.create(self.projects[0].id)
+        self.assert_(self.projects[0].vpn_ip)
+        self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start_port)
+        self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_end_port)
+
+    def test_too_many_vpns(self):
+        vpns = []
+        for i in xrange(networkdata.NetworkData.num_ports_for_ip(FLAGS.vpn_ip)):
+            vpns.append(networkdata.NetworkData.create("vpnuser%s" % i))
+        self.assertRaises(networkdata.NoMorePorts, networkdata.NetworkData.create, "boom")
+        for vpn in vpns:
+            vpn.destroy()
+
     def test_release_before_deallocate(self):
         pass
 

From cc64a872c685b931bf76e2323986b427cad777c3 Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Wed, 4 Aug 2010 18:12:19 -0700
Subject: [PATCH 29/36] method is called set_network_host

---
 nova/endpoint/cloud.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py
index 349007efab90..00dabba28780 100644
--- a/nova/endpoint/cloud.py
+++ b/nova/endpoint/cloud.py
@@ -519,7 +519,7 @@ class CloudController(object):
         host = network_service.get_host_for_project(context.project.id)
         if not host:
             result = yield rpc.call(FLAGS.network_topic,
-                                    {"method": "get_network_topic",
+                                    {"method": "set_network_host",
                                      "args": {"user_id": context.user.id,
                                               "project_id": context.project.id}})
             host = result['result']

From d1709793045de2f77f4a1fb06f63d27cbcf640d1 Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Wed, 4 Aug 2010 18:37:00 -0700
Subject: [PATCH 30/36] clean up nova-manage. If vpn data isn't set for user it
 skips it

---
 bin/nova-manage      | 23 +++++++++++------------
 nova/auth/manager.py | 39 ++++++++++++++++++++++++++-------------
 2 files changed, 37 insertions(+), 25 deletions(-)

diff --git a/bin/nova-manage b/bin/nova-manage
index b0f0029edf4d..7835c7a77c4e 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -29,16 +29,12 @@ from nova import flags
 from nova import utils
 from nova.auth import manager
 from nova.compute import model
-from nova.compute import network
 from nova.cloudpipe import pipelib
 from nova.endpoint import cloud
 
 
 FLAGS = flags.FLAGS
 
-class NetworkCommands(object):
-    def restart(self):
-        network.restart_nets()
 
 class VpnCommands(object):
     def __init__(self):
@@ -170,6 +166,13 @@ class ProjectCommands(object):
         arguments: name"""
         self.manager.delete_project(name)
 
+    def environment(self, project_id, user_id, filename='novarc'):
+        """exports environment variables to an sourcable file
+        arguments: project_id user_id [filename='novarc]"""
+        rc = self.manager.get_environment_rc(project_id, user_id)
+        with open(filename, 'w') as f:
+            f.write(rc)
+
     def list(self):
         """lists all projects
         arguments: <none>"""
@@ -182,14 +185,11 @@ class ProjectCommands(object):
         self.manager.remove_from_project(user, project)
 
     def zip(self, project_id, user_id, filename='nova.zip'):
-        """exports credentials for user to a zip file
+        """exports credentials for project to a zip file
         arguments: project_id user_id [filename='nova.zip]"""
-        project = self.manager.get_project(project_id)
-        if project:
-            with open(filename, 'w') as f:
-                f.write(project.get_credentials(user_id))
-        else:
-            print "Project %s doesn't exist" % project
+        zip = self.manager.get_credentials(project_id, user_id)
+        with open(filename, 'w') as f:
+            f.write(zip)
 
 
 def usage(script_name):
@@ -197,7 +197,6 @@ def usage(script_name):
 
 
 categories = [
-    ('network', NetworkCommands),
     ('user', UserCommands),
     ('project', ProjectCommands),
     ('role', RoleCommands),
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index 463cfdf4a70b..312b569aa503 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -58,6 +58,8 @@ flags.DEFINE_string('credentials_template',
 flags.DEFINE_string('vpn_client_template',
                     utils.abspath('cloudpipe/client.ovpn.template'),
                     'Template for creating users vpn file')
+flags.DEFINE_string('credential_vpn_file', 'nova-vpn.conf',
+                    'Filename of certificate in credentials zip')
 flags.DEFINE_string('credential_key_file', 'pk.pem',
                     'Filename of private key in credentials zip')
 flags.DEFINE_string('credential_cert_file', 'cert.pem',
@@ -663,25 +665,27 @@ class AuthManager(object):
         rc = self.__generate_rc(user.access, user.secret, pid)
         private_key, signed_cert = self._generate_x509_cert(user.id, pid)
 
-        vpn = Vpn.lookup(pid)
-        if not vpn:
-            raise exception.Error("No vpn data allocated for project %s" %
-                                  project.name)
-        configfile = open(FLAGS.vpn_client_template,"r")
-        s = string.Template(configfile.read())
-        configfile.close()
-        config = s.substitute(keyfile=FLAGS.credential_key_file,
-                              certfile=FLAGS.credential_cert_file,
-                              ip=vpn.ip,
-                              port=vpn.port)
-
         tmpdir = tempfile.mkdtemp()
         zf = os.path.join(tmpdir, "temp.zip")
         zippy = zipfile.ZipFile(zf, 'w')
         zippy.writestr(FLAGS.credential_rc_file, rc)
         zippy.writestr(FLAGS.credential_key_file, private_key)
         zippy.writestr(FLAGS.credential_cert_file, signed_cert)
-        zippy.writestr("nebula-client.conf", config)
+
+        network_data = networkdata.NetworkData.lookup(pid)
+        if network_data:
+            configfile = open(FLAGS.vpn_client_template,"r")
+            s = string.Template(configfile.read())
+            configfile.close()
+            config = s.substitute(keyfile=FLAGS.credential_key_file,
+                                  certfile=FLAGS.credential_cert_file,
+                                  ip=network_data.ip,
+                                  port=network_data.port)
+            zippy.writestr(FLAGS.credential_vpn_file, config)
+        else:
+            logging.warn("No vpn data for project %s" %
+                                  pid)
+
         zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(user.id))
         zippy.close()
         with open(zf, 'rb') as f:
@@ -690,6 +694,15 @@ class AuthManager(object):
         shutil.rmtree(tmpdir)
         return buffer
 
+    def get_environment_rc(self, user, project=None):
+        """Get credential zip for user in project"""
+        if not isinstance(user, User):
+            user = self.get_user(user)
+        if project is None:
+            project = user.id
+        pid = Project.safe_id(project)
+        return self.__generate_rc(user.access, user.secret, pid)
+
     def __generate_rc(self, access, secret, pid):
         """Generate rc file for user"""
         rc = open(FLAGS.credentials_template).read()

From 024ad9951dcf33f5a3468e9a790f1636770b2837 Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Thu, 5 Aug 2010 12:29:50 -0700
Subject: [PATCH 31/36] rename networkdata to vpn

---
 nova/auth/manager.py                    |  6 +++---
 nova/network/service.py                 |  4 ++--
 nova/network/{networkdata.py => vpn.py} |  0
 nova/tests/network_unittest.py          | 10 +++++-----
 4 files changed, 10 insertions(+), 10 deletions(-)
 rename nova/network/{networkdata.py => vpn.py} (100%)

diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index 312b569aa503..cf920d607945 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -37,7 +37,7 @@ from nova import objectstore # for flags
 from nova import utils
 from nova.auth import ldapdriver # for flags
 from nova.auth import signer
-from nova.network import networkdata
+from nova.network import vpn
 
 FLAGS = flags.FLAGS
 
@@ -531,7 +531,7 @@ class AuthManager(object):
         @return: A tuple containing (ip, port) or None, None if vpn has
         not been allocated for user.
         """
-        network_data = networkdata.NetworkData.lookup(Project.safe_id(project))
+        network_data = vpn.NetworkData.lookup(Project.safe_id(project))
         if not network_data:
             raise exception.NotFound('project network data has not been set')
         return (network_data.ip, network_data.port)
@@ -672,7 +672,7 @@ class AuthManager(object):
         zippy.writestr(FLAGS.credential_key_file, private_key)
         zippy.writestr(FLAGS.credential_cert_file, signed_cert)
 
-        network_data = networkdata.NetworkData.lookup(pid)
+        network_data = vpn.NetworkData.lookup(pid)
         if network_data:
             configfile = open(FLAGS.vpn_client_template,"r")
             s = string.Template(configfile.read())
diff --git a/nova/network/service.py b/nova/network/service.py
index afc20c0d5eb0..1a61f49d4581 100644
--- a/nova/network/service.py
+++ b/nova/network/service.py
@@ -28,7 +28,7 @@ from nova.auth import manager
 from nova.exception import NotFound
 from nova.network import exception
 from nova.network import model
-from nova.network import networkdata
+from nova.network import vpn
 
 FLAGS = flags.FLAGS
 
@@ -215,7 +215,7 @@ class VlanNetworkService(BaseNetworkService):
     def _on_set_network_host(self, user_id, project_id,
                              *args, **kwargs):
         """Called when this host becomes the host for a project"""
-        networkdata.NetworkData.create(project_id)
+        vpn.NetworkData.create(project_id)
 
     @classmethod
     def setup_compute_network(self, user_id, project_id, security_group,
diff --git a/nova/network/networkdata.py b/nova/network/vpn.py
similarity index 100%
rename from nova/network/networkdata.py
rename to nova/network/vpn.py
diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py
index 49147d4ec5e0..c7d3e86f0a0a 100644
--- a/nova/tests/network_unittest.py
+++ b/nova/tests/network_unittest.py
@@ -25,8 +25,8 @@ from nova import test
 from nova import utils
 from nova.auth import manager
 from nova.network import model
-from nova.network import networkdata
 from nova.network import service
+from nova.network import vpn
 from nova.network.exception import NoMoreAddresses
 
 FLAGS = flags.FLAGS
@@ -156,16 +156,16 @@ class NetworkTestCase(test.TrialTestCase):
         self.dnsmasq.release_ip(mac, firstaddress, hostname, net.bridge_name)
 
     def test_212_vpn_ip_and_port_looks_valid(self):
-        networkdata.NetworkData.create(self.projects[0].id)
+        vpn.NetworkData.create(self.projects[0].id)
         self.assert_(self.projects[0].vpn_ip)
         self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start_port)
         self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_end_port)
 
     def test_too_many_vpns(self):
         vpns = []
-        for i in xrange(networkdata.NetworkData.num_ports_for_ip(FLAGS.vpn_ip)):
-            vpns.append(networkdata.NetworkData.create("vpnuser%s" % i))
-        self.assertRaises(networkdata.NoMorePorts, networkdata.NetworkData.create, "boom")
+        for i in xrange(vpn.NetworkData.num_ports_for_ip(FLAGS.vpn_ip)):
+            vpns.append(vpn.NetworkData.create("vpnuser%s" % i))
+        self.assertRaises(vpn.NoMorePorts, vpn.NetworkData.create, "boom")
         for vpn in vpns:
             vpn.destroy()
 

From b77d261b0288f89d2b25a52e7ad7c8073e357cb1 Mon Sep 17 00:00:00 2001
From: Eric Day <eday@oddments.org>
Date: Thu, 5 Aug 2010 13:51:44 -0700
Subject: [PATCH 32/36] First pass at making a file pass pep8 and pylint tests
 as an example.

---
 nova/tests/objectstore_unittest.py | 241 +++++++++++++++++------------
 pylintrc                           |   6 +
 2 files changed, 144 insertions(+), 103 deletions(-)
 create mode 100644 pylintrc

diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py
index dd00377e702c..dece4b5d5282 100644
--- a/nova/tests/objectstore_unittest.py
+++ b/nova/tests/objectstore_unittest.py
@@ -16,6 +16,10 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+"""
+Unittets for S3 objectstore clone.
+"""
+
 import boto
 import glob
 import hashlib
@@ -24,76 +28,69 @@ import os
 import shutil
 import tempfile
 
-from nova import flags
-from nova import objectstore
-from nova.objectstore import bucket # for buckets_path flag
-from nova.objectstore import image # for images_path flag
-from nova import test
-from nova.auth import manager
-from nova.objectstore.handler import S3
-from nova.exception import NotEmpty, NotFound, NotAuthorized
-
 from boto.s3.connection import S3Connection, OrdinaryCallingFormat
 from twisted.internet import reactor, threads, defer
 from twisted.web import http, server
 
+from nova import flags
+from nova import objectstore
+from nova import test
+from nova.auth import manager
+from nova.exception import NotEmpty, NotFound
+from nova.objectstore import image
+from nova.objectstore.handler import S3
+
+
 FLAGS = flags.FLAGS
 
-oss_tempdir = tempfile.mkdtemp(prefix='test_oss-')
+# Create a unique temporary directory. We don't delete after test to
+# allow checking the contents after running tests. Users and/or tools
+# running the tests need to remove the tests directories.
+OSS_TEMPDIR = tempfile.mkdtemp(prefix='test_oss-')
 
+# Create bucket/images path
+os.makedirs(os.path.join(OSS_TEMPDIR, 'images'))
+os.makedirs(os.path.join(OSS_TEMPDIR, 'buckets'))
 
-# delete tempdirs from previous runs (we don't delete after test to allow
-# checking the contents after running tests)
-# TODO: This fails on the test box with a permission denied error
-# Also, doing these things in a global tempdir means that different runs of
-# the test suite on the same box could clobber each other.
-#for path in glob.glob(os.path.abspath(os.path.join(oss_tempdir, '../test_oss-*'))):
-#    if path != oss_tempdir:
-#        shutil.rmtree(path)
-
-
-# create bucket/images path
-os.makedirs(os.path.join(oss_tempdir, 'images'))
-os.makedirs(os.path.join(oss_tempdir, 'buckets'))
 
 class ObjectStoreTestCase(test.BaseTestCase):
-    def setUp(self):
+    """Test objectstore API directly."""
+
+    def setUp(self): # pylint: disable-msg=C0103
+        """Setup users and projects."""
         super(ObjectStoreTestCase, self).setUp()
-        self.flags(buckets_path=os.path.join(oss_tempdir, 'buckets'),
-                   images_path=os.path.join(oss_tempdir, 'images'),
+        self.flags(buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'),
+                   images_path=os.path.join(OSS_TEMPDIR, 'images'),
                    ca_path=os.path.join(os.path.dirname(__file__), 'CA'))
         logging.getLogger().setLevel(logging.DEBUG)
 
-        self.um = manager.AuthManager()
-        try:
-            self.um.create_user('user1')
-        except: pass
-        try:
-            self.um.create_user('user2')
-        except: pass
-        try:
-            self.um.create_user('admin_user', admin=True)
-        except: pass
-        try:
-            self.um.create_project('proj1', 'user1', 'a proj', ['user1'])
-        except: pass
-        try:
-            self.um.create_project('proj2', 'user2', 'a proj', ['user2'])
-        except: pass
-        class Context(object): pass
+        self.auth_manager = manager.AuthManager()
+        self.auth_manager.create_user('user1')
+        self.auth_manager.create_user('user2')
+        self.auth_manager.create_user('admin_user', admin=True)
+        self.auth_manager.create_project('proj1', 'user1', 'a proj', ['user1'])
+        self.auth_manager.create_project('proj2', 'user2', 'a proj', ['user2'])
+
+        class Context(object):
+            """Dummy context for running tests."""
+            user = None
+            project = None
+
         self.context = Context()
 
-    def tearDown(self):
-        self.um.delete_project('proj1')
-        self.um.delete_project('proj2')
-        self.um.delete_user('user1')
-        self.um.delete_user('user2')
-        self.um.delete_user('admin_user')
+    def tearDown(self): # pylint: disable-msg=C0103
+        """Tear down users and projects."""
+        self.auth_manager.delete_project('proj1')
+        self.auth_manager.delete_project('proj2')
+        self.auth_manager.delete_user('user1')
+        self.auth_manager.delete_user('user2')
+        self.auth_manager.delete_user('admin_user')
         super(ObjectStoreTestCase, self).tearDown()
 
     def test_buckets(self):
-        self.context.user = self.um.get_user('user1')
-        self.context.project = self.um.get_project('proj1')
+        """Test the bucket API."""
+        self.context.user = self.auth_manager.get_user('user1')
+        self.context.project = self.auth_manager.get_project('proj1')
         objectstore.bucket.Bucket.create('new_bucket', self.context)
         bucket = objectstore.bucket.Bucket('new_bucket')
 
@@ -101,12 +98,12 @@ class ObjectStoreTestCase(test.BaseTestCase):
         self.assert_(bucket.is_authorized(self.context))
 
         # another user is not authorized
-        self.context.user = self.um.get_user('user2')
-        self.context.project = self.um.get_project('proj2')
+        self.context.user = self.auth_manager.get_user('user2')
+        self.context.project = self.auth_manager.get_project('proj2')
         self.assertFalse(bucket.is_authorized(self.context))
 
         # admin is authorized to use bucket
-        self.context.user = self.um.get_user('admin_user')
+        self.context.user = self.auth_manager.get_user('admin_user')
         self.context.project = None
         self.assertTrue(bucket.is_authorized(self.context))
 
@@ -136,8 +133,9 @@ class ObjectStoreTestCase(test.BaseTestCase):
         self.assertRaises(NotFound, objectstore.bucket.Bucket, 'new_bucket')
 
     def test_images(self):
-        self.context.user = self.um.get_user('user1')
-        self.context.project = self.um.get_project('proj1')
+        "Test the image API."
+        self.context.user = self.auth_manager.get_user('user1')
+        self.context.project = self.auth_manager.get_project('proj1')
 
         # create a bucket for our bundle
         objectstore.bucket.Bucket.create('image_bucket', self.context)
@@ -149,10 +147,12 @@ class ObjectStoreTestCase(test.BaseTestCase):
             bucket[os.path.basename(path)] = open(path, 'rb').read()
 
         # register an image
-        objectstore.image.Image.register_aws_image('i-testing', 'image_bucket/1mb.manifest.xml', self.context)
+        image.Image.register_aws_image('i-testing',
+                                       'image_bucket/1mb.manifest.xml',
+                                       self.context)
 
         # verify image
-        my_img = objectstore.image.Image('i-testing')
+        my_img = image.Image('i-testing')
         result_image_file = os.path.join(my_img.path, 'image')
         self.assertEqual(os.stat(result_image_file).st_size, 1048576)
 
@@ -160,38 +160,48 @@ class ObjectStoreTestCase(test.BaseTestCase):
         self.assertEqual(sha, '3b71f43ff30f4b15b5cd85dd9e95ebc7e84eb5a3')
 
         # verify image permissions
-        self.context.user = self.um.get_user('user2')
-        self.context.project = self.um.get_project('proj2')
+        self.context.user = self.auth_manager.get_user('user2')
+        self.context.project = self.auth_manager.get_project('proj2')
         self.assertFalse(my_img.is_authorized(self.context))
 
 
 class TestHTTPChannel(http.HTTPChannel):
-    # Otherwise we end up with an unclean reactor
-    def checkPersistence(self, _, __):
+    """Dummy site required for twisted.web"""
+
+    def checkPersistence(self, _, __): # pylint: disable-msg=C0103
+        """Otherwise we end up with an unclean reactor."""
         return False
 
 
 class TestSite(server.Site):
+    """Dummy site required for twisted.web"""
     protocol = TestHTTPChannel
 
 
 class S3APITestCase(test.TrialTestCase):
-    def setUp(self):
+    """Test objectstore through S3 API."""
+
+    def setUp(self): # pylint: disable-msg=C0103
+        """Setup users, projects, and start a test server."""
         super(S3APITestCase, self).setUp()
 
-        FLAGS.auth_driver='nova.auth.ldapdriver.FakeLdapDriver',
-        FLAGS.buckets_path = os.path.join(oss_tempdir, 'buckets')
+        FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver',
+        FLAGS.buckets_path = os.path.join(OSS_TEMPDIR, 'buckets')
 
-        self.um = manager.AuthManager()
-        self.admin_user = self.um.create_user('admin', admin=True)
-        self.admin_project = self.um.create_project('admin', self.admin_user)
+        self.auth_manager = manager.AuthManager()
+        self.admin_user = self.auth_manager.create_user('admin', admin=True)
+        self.admin_project = self.auth_manager.create_project('admin',
+                                                              self.admin_user)
 
         shutil.rmtree(FLAGS.buckets_path)
         os.mkdir(FLAGS.buckets_path)
 
         root = S3()
         self.site = TestSite(root)
-        self.listening_port = reactor.listenTCP(0, self.site, interface='127.0.0.1')
+        # pylint: disable-msg=E1101
+        self.listening_port = reactor.listenTCP(0, self.site,
+                                                interface='127.0.0.1')
+        # pylint: enable-msg=E1101
         self.tcp_port = self.listening_port.getHost().port
 
 
@@ -205,65 +215,90 @@ class S3APITestCase(test.TrialTestCase):
                                  is_secure=False,
                                  calling_format=OrdinaryCallingFormat())
 
-        # Don't attempt to reuse connections
         def get_http_connection(host, is_secure):
+            """Get a new S3 connection, don't attempt to reuse connections."""
             return self.conn.new_http_connection(host, is_secure)
+
         self.conn.get_http_connection = get_http_connection
 
-    def _ensure_empty_list(self, l):
-        self.assertEquals(len(l), 0, "List was not empty")
+    def _ensure_no_buckets(self, buckets): # pylint: disable-msg=C0111
+        self.assertEquals(len(buckets), 0, "Bucket list was not empty")
         return True
 
-    def _ensure_only_bucket(self, l, name):
-        self.assertEquals(len(l), 1, "List didn't have exactly one element in it")
-        self.assertEquals(l[0].name, name, "Wrong name")
+    def _ensure_one_bucket(self, buckets, name): # pylint: disable-msg=C0111
+        self.assertEquals(len(buckets), 1,
+                          "Bucket list didn't have exactly one element in it")
+        self.assertEquals(buckets[0].name, name, "Wrong name")
+        return True
 
     def test_000_list_buckets(self):
-        d = threads.deferToThread(self.conn.get_all_buckets)
-        d.addCallback(self._ensure_empty_list)
-        return d
+        """Make sure we are starting with no buckets."""
+        deferred = threads.deferToThread(self.conn.get_all_buckets)
+        deferred.addCallback(self._ensure_no_buckets)
+        return deferred
 
     def test_001_create_and_delete_bucket(self):
+        """Test bucket creation and deletion."""
         bucket_name = 'testbucket'
 
-        d = threads.deferToThread(self.conn.create_bucket, bucket_name)
-        d.addCallback(lambda _:threads.deferToThread(self.conn.get_all_buckets))
+        deferred = threads.deferToThread(self.conn.create_bucket, bucket_name)
+        deferred.addCallback(lambda _:
+                             threads.deferToThread(self.conn.get_all_buckets))
 
-        def ensure_only_bucket(l, name):
-            self.assertEquals(len(l), 1, "List didn't have exactly one element in it")
-            self.assertEquals(l[0].name, name, "Wrong name")
-        d.addCallback(ensure_only_bucket, bucket_name)
+        deferred.addCallback(self._ensure_one_bucket, bucket_name)
 
-        d.addCallback(lambda _:threads.deferToThread(self.conn.delete_bucket, bucket_name))
-        d.addCallback(lambda _:threads.deferToThread(self.conn.get_all_buckets))
-        d.addCallback(self._ensure_empty_list)
-        return d
+        deferred.addCallback(lambda _:
+                             threads.deferToThread(self.conn.delete_bucket,
+                                                   bucket_name))
+        deferred.addCallback(lambda _:
+                             threads.deferToThread(self.conn.get_all_buckets))
+        deferred.addCallback(self._ensure_no_buckets)
+        return deferred
 
     def test_002_create_bucket_and_key_and_delete_key_again(self):
+        """Test key operations on buckets."""
         bucket_name = 'testbucket'
         key_name = 'somekey'
         key_contents = 'somekey'
 
-        d = threads.deferToThread(self.conn.create_bucket, bucket_name)
-        d.addCallback(lambda b:threads.deferToThread(b.new_key, key_name))
-        d.addCallback(lambda k:threads.deferToThread(k.set_contents_from_string, key_contents))
+        deferred = threads.deferToThread(self.conn.create_bucket, bucket_name)
+        deferred.addCallback(lambda b:
+                             threads.deferToThread(b.new_key, key_name))
+        deferred.addCallback(lambda k:
+                             threads.deferToThread(k.set_contents_from_string,
+                                                   key_contents))
+
         def ensure_key_contents(bucket_name, key_name, contents):
+            """Verify contents for a key in the given bucket."""
             bucket = self.conn.get_bucket(bucket_name)
             key = bucket.get_key(key_name)
-            self.assertEquals(key.get_contents_as_string(), contents,  "Bad contents")
-        d.addCallback(lambda _:threads.deferToThread(ensure_key_contents, bucket_name, key_name, key_contents))
+            self.assertEquals(key.get_contents_as_string(), contents,
+                              "Bad contents")
+
+        deferred.addCallback(lambda _:
+                             threads.deferToThread(ensure_key_contents,
+                                                   bucket_name, key_name,
+                                                   key_contents))
+
         def delete_key(bucket_name, key_name):
+            """Delete a key for the given bucket."""
             bucket = self.conn.get_bucket(bucket_name)
             key = bucket.get_key(key_name)
             key.delete()
-        d.addCallback(lambda _:threads.deferToThread(delete_key, bucket_name, key_name))
-        d.addCallback(lambda _:threads.deferToThread(self.conn.get_bucket, bucket_name))
-        d.addCallback(lambda b:threads.deferToThread(b.get_all_keys))
-        d.addCallback(self._ensure_empty_list)
-        return d
 
-    def tearDown(self):
-        self.um.delete_user('admin')
-        self.um.delete_project('admin')
-        return defer.DeferredList([defer.maybeDeferred(self.listening_port.stopListening)])
-        super(S3APITestCase, self).tearDown()
+        deferred.addCallback(lambda _:
+                             threads.deferToThread(delete_key, bucket_name,
+                                                   key_name))
+        deferred.addCallback(lambda _:
+                             threads.deferToThread(self.conn.get_bucket,
+                                                   bucket_name))
+        deferred.addCallback(lambda b: threads.deferToThread(b.get_all_keys))
+        deferred.addCallback(self._ensure_no_buckets)
+        return deferred
+
+    def tearDown(self): # pylint: disable-msg=C0103
+        """Tear down auth and test server."""
+        self.auth_manager.delete_user('admin')
+        self.auth_manager.delete_project('admin')
+        stop_listening = defer.maybeDeferred(self.listening_port.stopListening)
+        return defer.DeferredList([stop_listening])
diff --git a/pylintrc b/pylintrc
new file mode 100644
index 000000000000..a853e5bed2f3
--- /dev/null
+++ b/pylintrc
@@ -0,0 +1,6 @@
+[Basic]
+method-rgx=[a-z_][a-z0-9_]{2,50}$
+
+[Design]
+max-public-methods=100
+min-public-methods=0

From 778e8152751ebdbb2adad544cc705691395d335d Mon Sep 17 00:00:00 2001
From: Devin Carlen <devin.carlen@gmail.com>
Date: Thu, 5 Aug 2010 16:20:26 -0700
Subject: [PATCH 33/36] Fixed image modification authorization, API cleanup

---
 nova/endpoint/cloud.py      | 2 ++
 nova/objectstore/handler.py | 3 ++-
 nova/objectstore/image.py   | 8 ++++++--
 3 files changed, 10 insertions(+), 3 deletions(-)

diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py
index 0ee278f84065..cc6216fecd3c 100644
--- a/nova/endpoint/cloud.py
+++ b/nova/endpoint/cloud.py
@@ -677,6 +677,8 @@ class CloudController(object):
         # TODO(devcamcar): Support users and groups other than 'all'.
         if attribute != 'launchPermission':
             raise exception.ApiError('attribute not supported: %s' % attribute)
+        if not 'user_group' in kwargs:
+            raise exception.ApiError('user or group not specified')
         if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all':
             raise exception.ApiError('only group "all" is supported')
         if not operation_type in ['add', 'remove']:
diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py
index b4d7e61790cc..f625a2aa1e1d 100644
--- a/nova/objectstore/handler.py
+++ b/nova/objectstore/handler.py
@@ -266,7 +266,8 @@ class ImagesResource(Resource):
         """ returns a json listing of all images
             that a user has permissions to see """
 
-        images = [i for i in image.Image.all() if i.is_authorized(request.context)]
+        images = [i for i in image.Image.all() \
+                  if i.is_authorized(request.context, readonly=True)]
 
         request.write(json.dumps([i.metadata for i in images]))
         request.finish()
diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py
index bea2e96378be..860298ba6423 100644
--- a/nova/objectstore/image.py
+++ b/nova/objectstore/image.py
@@ -65,9 +65,13 @@ class Image(object):
         except:
             pass
 
-    def is_authorized(self, context):
+    def is_authorized(self, context, readonly=False):
+        # NOTE(devcamcar): Public images can be read by anyone,
+        #                  but only modified by admin or owner.
         try:
-            return self.metadata['isPublic'] or context.user.is_admin() or self.metadata['imageOwnerId'] == context.project.id
+            return (self.metadata['isPublic'] and readonly) or \
+                   context.user.is_admin() or \
+                   self.metadata['imageOwnerId'] == context.project.id
         except:
             return False
 

From 63708752366300b4267a9dfbf926f89f9df3f4df Mon Sep 17 00:00:00 2001
From: Chris Behrens <cbehrens@codestud.com>
Date: Thu, 5 Aug 2010 18:26:39 -0500
Subject: [PATCH 34/36] Fixes bug#614090 -- nova.virt.images._fetch_local_image
 being called with 4 args but only has 3

---
 nova/virt/images.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/nova/virt/images.py b/nova/virt/images.py
index 872eb6d6a91e..48a87b514a22 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -65,7 +65,7 @@ def _fetch_s3_image(image, path, user, project):
     cmd += ['-o', path]
     return process.SharedPool().execute(executable=cmd[0], args=cmd[1:])
 
-def _fetch_local_image(image, path, _):
+def _fetch_local_image(image, path, user, project):
     source = _image_path('%s/image' % image)
     return process.simple_execute('cp %s %s' % (source, path))
 

From 5cda99300a437feefac39131bb714e9f85d765ce Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Thu, 5 Aug 2010 16:56:23 -0700
Subject: [PATCH 35/36] Made group membership check only search group instead
 of subtree.  Roles in a group are removed when a user is removed from that
 group.  Added test

---
 nova/auth/fakeldap.py       | 11 ++++++++---
 nova/auth/ldapdriver.py     | 25 +++++++++++++++++--------
 nova/tests/auth_unittest.py | 10 +++++++++-
 3 files changed, 34 insertions(+), 12 deletions(-)

diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py
index 4c32ed9d92a5..b420924aff9f 100644
--- a/nova/auth/fakeldap.py
+++ b/nova/auth/fakeldap.py
@@ -28,6 +28,8 @@ import json
 from nova import datastore
 
 
+SCOPE_BASE = 0
+SCOPE_ONELEVEL = 1 # not implemented
 SCOPE_SUBTREE  = 2
 MOD_ADD = 0
 MOD_DELETE = 1
@@ -188,15 +190,18 @@ class FakeLDAP(object):
 
         Args:
         dn -- dn to search under
-        scope -- only SCOPE_SUBTREE is supported
+        scope -- only SCOPE_BASE and SCOPE_SUBTREE are supported
         query -- query to filter objects by
         fields -- fields to return. Returns all fields if not specified
 
         """
-        if scope != SCOPE_SUBTREE:
+        if scope != SCOPE_BASE and scope != SCOPE_SUBTREE:
             raise NotImplementedError(str(scope))
         redis = datastore.Redis.instance()
-        keys = redis.keys("%s*%s" % (self.__redis_prefix, dn))
+        if scope == SCOPE_BASE:
+            keys = ["%s%s" % (self.__redis_prefix, dn)]
+        else:
+            keys = redis.keys("%s*%s" % (self.__redis_prefix, dn))
         objects = []
         for key in keys:
             # get the attributes from redis
diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py
index 055e8332bb38..ec739e1341a5 100644
--- a/nova/auth/ldapdriver.py
+++ b/nova/auth/ldapdriver.py
@@ -272,26 +272,30 @@ class LdapDriver(object):
         """Check if project exists"""
         return self.get_project(name) != None
 
-    def __find_object(self, dn, query = None):
+    def __find_object(self, dn, query=None, scope=None):
         """Find an object by dn and query"""
-        objects = self.__find_objects(dn, query)
+        objects = self.__find_objects(dn, query, scope)
         if len(objects) == 0:
             return None
         return objects[0]
 
-    def __find_dns(self, dn, query=None):
+    def __find_dns(self, dn, query=None, scope=None):
         """Find dns by query"""
+        if scope is None: # one of the flags is 0!!
+            scope = self.ldap.SCOPE_SUBTREE
         try:
-            res = self.conn.search_s(dn, self.ldap.SCOPE_SUBTREE, query)
+            res = self.conn.search_s(dn, scope, query)
         except self.ldap.NO_SUCH_OBJECT:
             return []
         # just return the DNs
         return [dn for dn, attributes in res]
 
-    def __find_objects(self, dn, query = None):
+    def __find_objects(self, dn, query=None, scope=None):
         """Find objects by query"""
+        if scope is None: # one of the flags is 0!!
+            scope = self.ldap.SCOPE_SUBTREE
         try:
-            res = self.conn.search_s(dn, self.ldap.SCOPE_SUBTREE, query)
+            res = self.conn.search_s(dn, scope, query)
         except self.ldap.NO_SUCH_OBJECT:
             return []
         # just return the attributes
@@ -361,7 +365,8 @@ class LdapDriver(object):
         if not self.__group_exists(group_dn):
             return False
         res = self.__find_object(group_dn,
-                               '(member=%s)' % self.__uid_to_dn(uid))
+                                 '(member=%s)' % self.__uid_to_dn(uid),
+                                 self.ldap.SCOPE_BASE)
         return res != None
 
     def __add_to_group(self, uid, group_dn):
@@ -391,7 +396,11 @@ class LdapDriver(object):
         if not self.__is_in_group(uid, group_dn):
             raise exception.NotFound("User %s is not a member of the group" %
                                      (uid,))
-        self.__safe_remove_from_group(uid, group_dn)
+        # NOTE(vish): remove user from group and any sub_groups
+        sub_dns = self.__find_group_dns_with_member(
+                group_dn, uid)
+        for sub_dn in sub_dns:
+            self.__safe_remove_from_group(uid, sub_dn)
 
     def __safe_remove_from_group(self, uid, group_dn):
         """Remove user from group, deleting group if user is last member"""
diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py
index 2167c2385a59..e00297cb1711 100644
--- a/nova/tests/auth_unittest.py
+++ b/nova/tests/auth_unittest.py
@@ -135,10 +135,18 @@ class AuthTestCase(test.BaseTestCase):
         self.manager.add_to_project('test2', 'testproj')
         self.assertTrue(self.manager.get_project('testproj').has_member('test2'))
 
-    def test_208_can_remove_user_from_project(self):
+    def test_207_can_remove_user_from_project(self):
         self.manager.remove_from_project('test2', 'testproj')
         self.assertFalse(self.manager.get_project('testproj').has_member('test2'))
 
+    def test_208_can_remove_add_user_with_role(self):
+        self.manager.add_to_project('test2', 'testproj')
+        self.manager.add_role('test2', 'developer', 'testproj')
+        self.manager.remove_from_project('test2', 'testproj')
+        self.assertFalse(self.manager.has_role('test2', 'developer', 'testproj'))
+        self.manager.add_to_project('test2', 'testproj')
+        self.manager.remove_from_project('test2', 'testproj')
+
     def test_209_can_generate_x509(self):
         # MUST HAVE RUN CLOUD SETUP BY NOW
         self.cloud = cloud.CloudController()

From 66c8abfb9f00ea06517e102f02ef8bdc9469aae8 Mon Sep 17 00:00:00 2001
From: Vishvananda Ishaya <vishvananda@gmail.com>
Date: Fri, 6 Aug 2010 14:31:03 -0700
Subject: [PATCH 36/36] fix search/replace error

---
 nova/tests/network_unittest.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py
index c7d3e86f0a0a..879ee02a472c 100644
--- a/nova/tests/network_unittest.py
+++ b/nova/tests/network_unittest.py
@@ -166,8 +166,8 @@ class NetworkTestCase(test.TrialTestCase):
         for i in xrange(vpn.NetworkData.num_ports_for_ip(FLAGS.vpn_ip)):
             vpns.append(vpn.NetworkData.create("vpnuser%s" % i))
         self.assertRaises(vpn.NoMorePorts, vpn.NetworkData.create, "boom")
-        for vpn in vpns:
-            vpn.destroy()
+        for network_datum in vpns:
+            network_datum.destroy()
 
     def test_release_before_deallocate(self):
         pass