merged trunk
This commit is contained in:
		
							
								
								
									
										1
									
								
								.mailmap
									
									
									
									
									
								
							
							
						
						
									
										1
									
								
								.mailmap
									
									
									
									
									
								
							@@ -15,6 +15,7 @@
 | 
			
		||||
<corywright@gmail.com> <cory.wright@rackspace.com>
 | 
			
		||||
<devin.carlen@gmail.com> <devcamcar@illian.local>
 | 
			
		||||
<ewan.mellor@citrix.com> <emellor@silver>
 | 
			
		||||
<itoumsn@nttdata.co.jp> <itoumsn@shayol>
 | 
			
		||||
<jaypipes@gmail.com> <jpipes@serialcoder>
 | 
			
		||||
<jmckenty@gmail.com> <jmckenty@joshua-mckentys-macbook-pro.local>
 | 
			
		||||
<jmckenty@gmail.com> <jmckenty@yyj-dhcp171.corp.flock.com>
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										1
									
								
								Authors
									
									
									
									
									
								
							
							
						
						
									
										1
									
								
								Authors
									
									
									
									
									
								
							@@ -39,6 +39,7 @@ Ken Pepple <ken.pepple@gmail.com>
 | 
			
		||||
Kevin L. Mitchell <kevin.mitchell@rackspace.com>
 | 
			
		||||
Koji Iida <iida.koji@lab.ntt.co.jp>
 | 
			
		||||
Lorin Hochstein <lorin@isi.edu>
 | 
			
		||||
Masanori Itoh <itoumsn@nttdata.co.jp>
 | 
			
		||||
Matt Dietz <matt.dietz@rackspace.com>
 | 
			
		||||
Michael Gundlach <michael.gundlach@rackspace.com>
 | 
			
		||||
Monsyne Dragon <mdragon@rackspace.com>
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										46
									
								
								bin/nova-api
									
									
									
									
									
								
							
							
						
						
									
										46
									
								
								bin/nova-api
									
									
									
									
									
								
							@@ -36,49 +36,15 @@ gettext.install('nova', unicode=1)
 | 
			
		||||
 | 
			
		||||
from nova import flags
 | 
			
		||||
from nova import log as logging
 | 
			
		||||
from nova import service
 | 
			
		||||
from nova import utils
 | 
			
		||||
from nova import version
 | 
			
		||||
from nova import wsgi
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
LOG = logging.getLogger('nova.api')
 | 
			
		||||
 | 
			
		||||
FLAGS = flags.FLAGS
 | 
			
		||||
flags.DEFINE_string('ec2_listen', "0.0.0.0",
 | 
			
		||||
                    'IP address for EC2 API to listen')
 | 
			
		||||
flags.DEFINE_integer('ec2_listen_port', 8773, 'port for ec2 api to listen')
 | 
			
		||||
flags.DEFINE_string('osapi_listen', "0.0.0.0",
 | 
			
		||||
                    'IP address for OpenStack API to listen')
 | 
			
		||||
flags.DEFINE_integer('osapi_listen_port', 8774, 'port for os api to listen')
 | 
			
		||||
flags.DEFINE_flag(flags.HelpFlag())
 | 
			
		||||
flags.DEFINE_flag(flags.HelpshortFlag())
 | 
			
		||||
flags.DEFINE_flag(flags.HelpXMLFlag())
 | 
			
		||||
 | 
			
		||||
API_ENDPOINTS = ['ec2', 'osapi']
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def run_app(paste_config_file):
 | 
			
		||||
    LOG.debug(_("Using paste.deploy config at: %s"), paste_config_file)
 | 
			
		||||
    apps = []
 | 
			
		||||
    for api in API_ENDPOINTS:
 | 
			
		||||
        config = wsgi.load_paste_configuration(paste_config_file, api)
 | 
			
		||||
        if config is None:
 | 
			
		||||
            LOG.debug(_("No paste configuration for app: %s"), api)
 | 
			
		||||
            continue
 | 
			
		||||
        LOG.debug(_("App Config: %(api)s\n%(config)r") % locals())
 | 
			
		||||
        LOG.info(_("Running %s API"), api)
 | 
			
		||||
        app = wsgi.load_paste_app(paste_config_file, api)
 | 
			
		||||
        apps.append((app, getattr(FLAGS, "%s_listen_port" % api),
 | 
			
		||||
                     getattr(FLAGS, "%s_listen" % api)))
 | 
			
		||||
    if len(apps) == 0:
 | 
			
		||||
        LOG.error(_("No known API applications configured in %s."),
 | 
			
		||||
                  paste_config_file)
 | 
			
		||||
        return
 | 
			
		||||
 | 
			
		||||
    server = wsgi.Server()
 | 
			
		||||
    for app in apps:
 | 
			
		||||
        server.start(*app)
 | 
			
		||||
    server.wait()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    utils.default_flagfile()
 | 
			
		||||
@@ -90,8 +56,6 @@ if __name__ == '__main__':
 | 
			
		||||
    for flag in FLAGS:
 | 
			
		||||
        flag_get = FLAGS.get(flag, None)
 | 
			
		||||
        LOG.debug("%(flag)s : %(flag_get)s" % locals())
 | 
			
		||||
    conf = wsgi.paste_config_file('nova-api.conf')
 | 
			
		||||
    if conf:
 | 
			
		||||
        run_app(conf)
 | 
			
		||||
    else:
 | 
			
		||||
        LOG.error(_("No paste configuration found for: %s"), 'nova-api.conf')
 | 
			
		||||
 | 
			
		||||
    service = service.serve_wsgi(service.ApiService)
 | 
			
		||||
    service.wait()
 | 
			
		||||
 
 | 
			
		||||
@@ -548,6 +548,15 @@ class NetworkCommands(object):
 | 
			
		||||
                                network.dhcp_start,
 | 
			
		||||
                                network.dns)
 | 
			
		||||
 | 
			
		||||
    def delete(self, fixed_range):
 | 
			
		||||
        """Deletes a network"""
 | 
			
		||||
        network = db.network_get_by_cidr(context.get_admin_context(), \
 | 
			
		||||
                                         fixed_range)
 | 
			
		||||
        if network.project_id is not None:
 | 
			
		||||
            raise ValueError(_('Network must be disassociated from project %s'
 | 
			
		||||
                               ' before delete' % network.project_id))
 | 
			
		||||
        db.network_delete_safe(context.get_admin_context(), network.id)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ServiceCommands(object):
 | 
			
		||||
    """Enable and disable running services"""
 | 
			
		||||
 
 | 
			
		||||
@@ -8,5 +8,6 @@ from nova import utils
 | 
			
		||||
def setup(app):
 | 
			
		||||
    rootdir = os.path.abspath(app.srcdir + '/..')
 | 
			
		||||
    print "**Autodocumenting from %s" % rootdir
 | 
			
		||||
    rv = utils.execute('cd %s && ./generate_autodoc_index.sh' % rootdir)
 | 
			
		||||
    os.chdir(rootdir)
 | 
			
		||||
    rv = utils.execute('./generate_autodoc_index.sh')
 | 
			
		||||
    print rv[0]
 | 
			
		||||
 
 | 
			
		||||
@@ -88,6 +88,10 @@ class InvalidInputException(Error):
 | 
			
		||||
    pass
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class InvalidContentType(Error):
 | 
			
		||||
    pass
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TimeoutException(Error):
 | 
			
		||||
    pass
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -48,7 +48,6 @@ class Exchange(object):
 | 
			
		||||
        nm = self.name
 | 
			
		||||
        LOG.debug(_('(%(nm)s) publish (key: %(routing_key)s)'
 | 
			
		||||
                ' %(message)s') % locals())
 | 
			
		||||
        routing_key = routing_key.split('.')[0]
 | 
			
		||||
        if routing_key in self._routes:
 | 
			
		||||
            for f in self._routes[routing_key]:
 | 
			
		||||
                LOG.debug(_('Publishing to route %s'), f)
 | 
			
		||||
 
 | 
			
		||||
@@ -321,6 +321,8 @@ DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger')
 | 
			
		||||
 | 
			
		||||
DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'),
 | 
			
		||||
              "Top-level directory for maintaining nova's state")
 | 
			
		||||
DEFINE_string('lock_path', os.path.join(os.path.dirname(__file__), '../'),
 | 
			
		||||
              "Directory for lock files")
 | 
			
		||||
DEFINE_string('logdir', None, 'output to a per-service log file in named '
 | 
			
		||||
                              'directory')
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -266,7 +266,10 @@ class NovaRootLogger(NovaLogger):
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def handle_exception(type, value, tb):
 | 
			
		||||
    logging.root.critical(str(value), exc_info=(type, value, tb))
 | 
			
		||||
    extra = {}
 | 
			
		||||
    if FLAGS.verbose:
 | 
			
		||||
        extra['exc_info'] = (type, value, tb)
 | 
			
		||||
    logging.root.critical(str(value), **extra)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def reset():
 | 
			
		||||
 
 | 
			
		||||
@@ -123,7 +123,7 @@ class Consumer(messaging.Consumer):
 | 
			
		||||
                LOG.error(_("Reconnected to queue"))
 | 
			
		||||
                self.failed_connection = False
 | 
			
		||||
        # NOTE(vish): This is catching all errors because we really don't
 | 
			
		||||
        #             exceptions to be logged 10 times a second if some
 | 
			
		||||
        #             want exceptions to be logged 10 times a second if some
 | 
			
		||||
        #             persistent failure occurs.
 | 
			
		||||
        except Exception:  # pylint: disable-msg=W0703
 | 
			
		||||
            if not self.failed_connection:
 | 
			
		||||
 
 | 
			
		||||
@@ -2,6 +2,7 @@
 | 
			
		||||
 | 
			
		||||
# Copyright 2010 United States Government as represented by the
 | 
			
		||||
# Administrator of the National Aeronautics and Space Administration.
 | 
			
		||||
# Copyright 2011 Justin Santa Barbara
 | 
			
		||||
# All Rights Reserved.
 | 
			
		||||
#
 | 
			
		||||
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
 | 
			
		||||
@@ -39,6 +40,7 @@ from nova import flags
 | 
			
		||||
from nova import rpc
 | 
			
		||||
from nova import utils
 | 
			
		||||
from nova import version
 | 
			
		||||
from nova import wsgi
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
FLAGS = flags.FLAGS
 | 
			
		||||
@@ -48,6 +50,14 @@ flags.DEFINE_integer('report_interval', 10,
 | 
			
		||||
flags.DEFINE_integer('periodic_interval', 60,
 | 
			
		||||
                     'seconds between running periodic tasks',
 | 
			
		||||
                     lower_bound=1)
 | 
			
		||||
flags.DEFINE_string('ec2_listen', "0.0.0.0",
 | 
			
		||||
                    'IP address for EC2 API to listen')
 | 
			
		||||
flags.DEFINE_integer('ec2_listen_port', 8773, 'port for ec2 api to listen')
 | 
			
		||||
flags.DEFINE_string('osapi_listen', "0.0.0.0",
 | 
			
		||||
                    'IP address for OpenStack API to listen')
 | 
			
		||||
flags.DEFINE_integer('osapi_listen_port', 8774, 'port for os api to listen')
 | 
			
		||||
flags.DEFINE_string('api_paste_config', "api-paste.ini",
 | 
			
		||||
                    'File name for the paste.deploy config for nova-api')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Service(object):
 | 
			
		||||
@@ -210,6 +220,41 @@ class Service(object):
 | 
			
		||||
                logging.exception(_("model server went away"))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class WsgiService(object):
 | 
			
		||||
    """Base class for WSGI based services.
 | 
			
		||||
 | 
			
		||||
    For each api you define, you must also define these flags:
 | 
			
		||||
    :<api>_listen:            The address on which to listen
 | 
			
		||||
    :<api>_listen_port:       The port on which to listen
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, conf, apis):
 | 
			
		||||
        self.conf = conf
 | 
			
		||||
        self.apis = apis
 | 
			
		||||
        self.wsgi_app = None
 | 
			
		||||
 | 
			
		||||
    def start(self):
 | 
			
		||||
        self.wsgi_app = _run_wsgi(self.conf, self.apis)
 | 
			
		||||
 | 
			
		||||
    def wait(self):
 | 
			
		||||
        self.wsgi_app.wait()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ApiService(WsgiService):
 | 
			
		||||
    """Class for our nova-api service"""
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def create(cls, conf=None):
 | 
			
		||||
        if not conf:
 | 
			
		||||
            conf = wsgi.paste_config_file(FLAGS.api_paste_config)
 | 
			
		||||
            if not conf:
 | 
			
		||||
                message = (_("No paste configuration found for: %s"),
 | 
			
		||||
                           FLAGS.api_paste_config)
 | 
			
		||||
                raise exception.Error(message)
 | 
			
		||||
        api_endpoints = ['ec2', 'osapi']
 | 
			
		||||
        service = cls(conf, api_endpoints)
 | 
			
		||||
        return service
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def serve(*services):
 | 
			
		||||
    try:
 | 
			
		||||
        if not services:
 | 
			
		||||
@@ -239,3 +284,46 @@ def serve(*services):
 | 
			
		||||
def wait():
 | 
			
		||||
    while True:
 | 
			
		||||
        greenthread.sleep(5)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def serve_wsgi(cls, conf=None):
 | 
			
		||||
    try:
 | 
			
		||||
        service = cls.create(conf)
 | 
			
		||||
    except Exception:
 | 
			
		||||
        logging.exception('in WsgiService.create()')
 | 
			
		||||
        raise
 | 
			
		||||
    finally:
 | 
			
		||||
        # After we've loaded up all our dynamic bits, check
 | 
			
		||||
        # whether we should print help
 | 
			
		||||
        flags.DEFINE_flag(flags.HelpFlag())
 | 
			
		||||
        flags.DEFINE_flag(flags.HelpshortFlag())
 | 
			
		||||
        flags.DEFINE_flag(flags.HelpXMLFlag())
 | 
			
		||||
        FLAGS.ParseNewFlags()
 | 
			
		||||
 | 
			
		||||
    service.start()
 | 
			
		||||
 | 
			
		||||
    return service
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _run_wsgi(paste_config_file, apis):
 | 
			
		||||
    logging.debug(_("Using paste.deploy config at: %s"), paste_config_file)
 | 
			
		||||
    apps = []
 | 
			
		||||
    for api in apis:
 | 
			
		||||
        config = wsgi.load_paste_configuration(paste_config_file, api)
 | 
			
		||||
        if config is None:
 | 
			
		||||
            logging.debug(_("No paste configuration for app: %s"), api)
 | 
			
		||||
            continue
 | 
			
		||||
        logging.debug(_("App Config: %(api)s\n%(config)r") % locals())
 | 
			
		||||
        logging.info(_("Running %s API"), api)
 | 
			
		||||
        app = wsgi.load_paste_app(paste_config_file, api)
 | 
			
		||||
        apps.append((app, getattr(FLAGS, "%s_listen_port" % api),
 | 
			
		||||
                     getattr(FLAGS, "%s_listen" % api)))
 | 
			
		||||
    if len(apps) == 0:
 | 
			
		||||
        logging.error(_("No known API applications configured in %s."),
 | 
			
		||||
                      paste_config_file)
 | 
			
		||||
        return
 | 
			
		||||
 | 
			
		||||
    server = wsgi.Server()
 | 
			
		||||
    for app in apps:
 | 
			
		||||
        server.start(*app)
 | 
			
		||||
    return server
 | 
			
		||||
 
 | 
			
		||||
@@ -275,7 +275,7 @@ class CloudTestCase(test.TestCase):
 | 
			
		||||
        self._create_key('test1')
 | 
			
		||||
        self._create_key('test2')
 | 
			
		||||
        result = self.cloud.describe_key_pairs(self.context)
 | 
			
		||||
        keys = result["keypairsSet"]
 | 
			
		||||
        keys = result["keySet"]
 | 
			
		||||
        self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys))
 | 
			
		||||
        self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys))
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -62,7 +62,7 @@ class ComputeTestCase(test.TestCase):
 | 
			
		||||
        self.manager.delete_project(self.project)
 | 
			
		||||
        super(ComputeTestCase, self).tearDown()
 | 
			
		||||
 | 
			
		||||
    def _create_instance(self):
 | 
			
		||||
    def _create_instance(self, params={}):
 | 
			
		||||
        """Create a test instance"""
 | 
			
		||||
        inst = {}
 | 
			
		||||
        inst['image_id'] = 1
 | 
			
		||||
@@ -73,6 +73,7 @@ class ComputeTestCase(test.TestCase):
 | 
			
		||||
        inst['instance_type'] = 'm1.tiny'
 | 
			
		||||
        inst['mac_address'] = utils.generate_mac()
 | 
			
		||||
        inst['ami_launch_index'] = 0
 | 
			
		||||
        inst.update(params)
 | 
			
		||||
        return db.instance_create(self.context, inst)['id']
 | 
			
		||||
 | 
			
		||||
    def _create_group(self):
 | 
			
		||||
@@ -273,9 +274,30 @@ class ComputeTestCase(test.TestCase):
 | 
			
		||||
 | 
			
		||||
        self.compute.terminate_instance(self.context, instance_id)
 | 
			
		||||
 | 
			
		||||
    def test_resize_instance(self):
 | 
			
		||||
        """Ensure instance can be migrated/resized"""
 | 
			
		||||
        instance_id = self._create_instance()
 | 
			
		||||
        context = self.context.elevated()
 | 
			
		||||
        self.compute.run_instance(self.context, instance_id)
 | 
			
		||||
        db.instance_update(self.context, instance_id, {'host': 'foo'})
 | 
			
		||||
        self.compute.prep_resize(context, instance_id)
 | 
			
		||||
        migration_ref = db.migration_get_by_instance_and_status(context,
 | 
			
		||||
                instance_id, 'pre-migrating')
 | 
			
		||||
        self.compute.resize_instance(context, instance_id,
 | 
			
		||||
                migration_ref['id'])
 | 
			
		||||
        self.compute.terminate_instance(context, instance_id)
 | 
			
		||||
 | 
			
		||||
    def test_get_by_flavor_id(self):
 | 
			
		||||
        type = instance_types.get_by_flavor_id(1)
 | 
			
		||||
        self.assertEqual(type, 'm1.tiny')
 | 
			
		||||
 | 
			
		||||
    def test_resize_same_source_fails(self):
 | 
			
		||||
        """Ensure instance fails to migrate when source and destination are
 | 
			
		||||
        the same host"""
 | 
			
		||||
        instance_id = self._create_instance()
 | 
			
		||||
        self.compute.run_instance(self.context, instance_id)
 | 
			
		||||
        self.assertRaises(exception.Error, self.compute.prep_resize,
 | 
			
		||||
                self.context, instance_id)
 | 
			
		||||
        self.compute.terminate_instance(self.context, instance_id)
 | 
			
		||||
        type = instance_types.get_by_flavor_id("1")
 | 
			
		||||
        self.assertEqual(type, 'm1.tiny')
 | 
			
		||||
 
 | 
			
		||||
@@ -59,6 +59,7 @@ class DirectTestCase(test.TestCase):
 | 
			
		||||
        req.headers['X-OpenStack-User'] = 'user1'
 | 
			
		||||
        req.headers['X-OpenStack-Project'] = 'proj1'
 | 
			
		||||
        resp = req.get_response(self.auth_router)
 | 
			
		||||
        self.assertEqual(resp.status_int, 200)
 | 
			
		||||
        data = json.loads(resp.body)
 | 
			
		||||
        self.assertEqual(data['user'], 'user1')
 | 
			
		||||
        self.assertEqual(data['project'], 'proj1')
 | 
			
		||||
@@ -69,6 +70,7 @@ class DirectTestCase(test.TestCase):
 | 
			
		||||
        req.method = 'POST'
 | 
			
		||||
        req.body = 'json=%s' % json.dumps({'data': 'foo'})
 | 
			
		||||
        resp = req.get_response(self.router)
 | 
			
		||||
        self.assertEqual(resp.status_int, 200)
 | 
			
		||||
        resp_parsed = json.loads(resp.body)
 | 
			
		||||
        self.assertEqual(resp_parsed['data'], 'foo')
 | 
			
		||||
 | 
			
		||||
@@ -78,6 +80,7 @@ class DirectTestCase(test.TestCase):
 | 
			
		||||
        req.method = 'POST'
 | 
			
		||||
        req.body = 'data=foo'
 | 
			
		||||
        resp = req.get_response(self.router)
 | 
			
		||||
        self.assertEqual(resp.status_int, 200)
 | 
			
		||||
        resp_parsed = json.loads(resp.body)
 | 
			
		||||
        self.assertEqual(resp_parsed['data'], 'foo')
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -14,10 +14,12 @@
 | 
			
		||||
#    License for the specific language governing permissions and limitations
 | 
			
		||||
#    under the License.
 | 
			
		||||
 | 
			
		||||
import errno
 | 
			
		||||
import os
 | 
			
		||||
import select
 | 
			
		||||
 | 
			
		||||
from nova import test
 | 
			
		||||
from nova.utils import parse_mailmap, str_dict_replace
 | 
			
		||||
from nova.utils import parse_mailmap, str_dict_replace, synchronized
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ProjectTestCase(test.TestCase):
 | 
			
		||||
@@ -55,3 +57,47 @@ class ProjectTestCase(test.TestCase):
 | 
			
		||||
                                '%r not listed in Authors' % missing)
 | 
			
		||||
            finally:
 | 
			
		||||
                tree.unlock()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class LockTestCase(test.TestCase):
 | 
			
		||||
    def test_synchronized_wrapped_function_metadata(self):
 | 
			
		||||
        @synchronized('whatever')
 | 
			
		||||
        def foo():
 | 
			
		||||
            """Bar"""
 | 
			
		||||
            pass
 | 
			
		||||
        self.assertEquals(foo.__doc__, 'Bar', "Wrapped function's docstring "
 | 
			
		||||
                                              "got lost")
 | 
			
		||||
        self.assertEquals(foo.__name__, 'foo', "Wrapped function's name "
 | 
			
		||||
                                               "got mangled")
 | 
			
		||||
 | 
			
		||||
    def test_synchronized(self):
 | 
			
		||||
        rpipe1, wpipe1 = os.pipe()
 | 
			
		||||
        rpipe2, wpipe2 = os.pipe()
 | 
			
		||||
 | 
			
		||||
        @synchronized('testlock')
 | 
			
		||||
        def f(rpipe, wpipe):
 | 
			
		||||
            try:
 | 
			
		||||
                os.write(wpipe, "foo")
 | 
			
		||||
            except OSError, e:
 | 
			
		||||
                self.assertEquals(e.errno, errno.EPIPE)
 | 
			
		||||
                return
 | 
			
		||||
 | 
			
		||||
            rfds, _, __ = select.select([rpipe], [], [], 1)
 | 
			
		||||
            self.assertEquals(len(rfds), 0, "The other process, which was"
 | 
			
		||||
                                            " supposed to be locked, "
 | 
			
		||||
                                            "wrote on its end of the "
 | 
			
		||||
                                            "pipe")
 | 
			
		||||
            os.close(rpipe)
 | 
			
		||||
 | 
			
		||||
        pid = os.fork()
 | 
			
		||||
        if pid > 0:
 | 
			
		||||
            os.close(wpipe1)
 | 
			
		||||
            os.close(rpipe2)
 | 
			
		||||
 | 
			
		||||
            f(rpipe1, wpipe2)
 | 
			
		||||
        else:
 | 
			
		||||
            os.close(rpipe1)
 | 
			
		||||
            os.close(wpipe2)
 | 
			
		||||
 | 
			
		||||
            f(rpipe2, wpipe1)
 | 
			
		||||
            os._exit(0)
 | 
			
		||||
 
 | 
			
		||||
@@ -343,13 +343,13 @@ def lease_ip(private_ip):
 | 
			
		||||
                                          private_ip)
 | 
			
		||||
    instance_ref = db.fixed_ip_get_instance(context.get_admin_context(),
 | 
			
		||||
                                            private_ip)
 | 
			
		||||
    cmd = "%s add %s %s fake" % (binpath('nova-dhcpbridge'),
 | 
			
		||||
                                 instance_ref['mac_address'],
 | 
			
		||||
                                 private_ip)
 | 
			
		||||
    cmd = (binpath('nova-dhcpbridge'), 'add',
 | 
			
		||||
           instance_ref['mac_address'],
 | 
			
		||||
           private_ip, 'fake')
 | 
			
		||||
    env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
 | 
			
		||||
           'TESTING': '1',
 | 
			
		||||
           'FLAGFILE': FLAGS.dhcpbridge_flagfile}
 | 
			
		||||
    (out, err) = utils.execute(cmd, addl_env=env)
 | 
			
		||||
    (out, err) = utils.execute(*cmd, addl_env=env)
 | 
			
		||||
    LOG.debug("ISSUE_IP: %s, %s ", out, err)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -359,11 +359,11 @@ def release_ip(private_ip):
 | 
			
		||||
                                          private_ip)
 | 
			
		||||
    instance_ref = db.fixed_ip_get_instance(context.get_admin_context(),
 | 
			
		||||
                                            private_ip)
 | 
			
		||||
    cmd = "%s del %s %s fake" % (binpath('nova-dhcpbridge'),
 | 
			
		||||
                                 instance_ref['mac_address'],
 | 
			
		||||
                                 private_ip)
 | 
			
		||||
    cmd = (binpath('nova-dhcpbridge'), 'del',
 | 
			
		||||
           instance_ref['mac_address'],
 | 
			
		||||
           private_ip, 'fake')
 | 
			
		||||
    env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
 | 
			
		||||
           'TESTING': '1',
 | 
			
		||||
           'FLAGFILE': FLAGS.dhcpbridge_flagfile}
 | 
			
		||||
    (out, err) = utils.execute(cmd, addl_env=env)
 | 
			
		||||
    (out, err) = utils.execute(*cmd, addl_env=env)
 | 
			
		||||
    LOG.debug("RELEASE_IP: %s, %s ", out, err)
 | 
			
		||||
 
 | 
			
		||||
@@ -14,6 +14,9 @@
 | 
			
		||||
#    License for the specific language governing permissions and limitations
 | 
			
		||||
#    under the License.
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
 | 
			
		||||
import eventlet
 | 
			
		||||
from xml.etree.ElementTree import fromstring as xml_to_tree
 | 
			
		||||
from xml.dom.minidom import parseString as xml_to_dom
 | 
			
		||||
 | 
			
		||||
@@ -30,6 +33,70 @@ FLAGS = flags.FLAGS
 | 
			
		||||
flags.DECLARE('instances_path', 'nova.compute.manager')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _concurrency(wait, done, target):
 | 
			
		||||
    wait.wait()
 | 
			
		||||
    done.send()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class CacheConcurrencyTestCase(test.TestCase):
 | 
			
		||||
    def setUp(self):
 | 
			
		||||
        super(CacheConcurrencyTestCase, self).setUp()
 | 
			
		||||
 | 
			
		||||
        def fake_exists(fname):
 | 
			
		||||
            basedir = os.path.join(FLAGS.instances_path, '_base')
 | 
			
		||||
            if fname == basedir:
 | 
			
		||||
                return True
 | 
			
		||||
            return False
 | 
			
		||||
 | 
			
		||||
        def fake_execute(*args, **kwargs):
 | 
			
		||||
            pass
 | 
			
		||||
 | 
			
		||||
        self.stubs.Set(os.path, 'exists', fake_exists)
 | 
			
		||||
        self.stubs.Set(utils, 'execute', fake_execute)
 | 
			
		||||
 | 
			
		||||
    def test_same_fname_concurrency(self):
 | 
			
		||||
        """Ensures that the same fname cache runs at a sequentially"""
 | 
			
		||||
        conn = libvirt_conn.LibvirtConnection
 | 
			
		||||
        wait1 = eventlet.event.Event()
 | 
			
		||||
        done1 = eventlet.event.Event()
 | 
			
		||||
        eventlet.spawn(conn._cache_image, _concurrency,
 | 
			
		||||
                       'target', 'fname', False, wait1, done1)
 | 
			
		||||
        wait2 = eventlet.event.Event()
 | 
			
		||||
        done2 = eventlet.event.Event()
 | 
			
		||||
        eventlet.spawn(conn._cache_image, _concurrency,
 | 
			
		||||
                       'target', 'fname', False, wait2, done2)
 | 
			
		||||
        wait2.send()
 | 
			
		||||
        eventlet.sleep(0)
 | 
			
		||||
        try:
 | 
			
		||||
            self.assertFalse(done2.ready())
 | 
			
		||||
            self.assertTrue('fname' in conn._image_sems)
 | 
			
		||||
        finally:
 | 
			
		||||
            wait1.send()
 | 
			
		||||
        done1.wait()
 | 
			
		||||
        eventlet.sleep(0)
 | 
			
		||||
        self.assertTrue(done2.ready())
 | 
			
		||||
        self.assertFalse('fname' in conn._image_sems)
 | 
			
		||||
 | 
			
		||||
    def test_different_fname_concurrency(self):
 | 
			
		||||
        """Ensures that two different fname caches are concurrent"""
 | 
			
		||||
        conn = libvirt_conn.LibvirtConnection
 | 
			
		||||
        wait1 = eventlet.event.Event()
 | 
			
		||||
        done1 = eventlet.event.Event()
 | 
			
		||||
        eventlet.spawn(conn._cache_image, _concurrency,
 | 
			
		||||
                       'target', 'fname2', False, wait1, done1)
 | 
			
		||||
        wait2 = eventlet.event.Event()
 | 
			
		||||
        done2 = eventlet.event.Event()
 | 
			
		||||
        eventlet.spawn(conn._cache_image, _concurrency,
 | 
			
		||||
                       'target', 'fname1', False, wait2, done2)
 | 
			
		||||
        wait2.send()
 | 
			
		||||
        eventlet.sleep(0)
 | 
			
		||||
        try:
 | 
			
		||||
            self.assertTrue(done2.ready())
 | 
			
		||||
        finally:
 | 
			
		||||
            wait1.send()
 | 
			
		||||
            eventlet.sleep(0)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class LibvirtConnTestCase(test.TestCase):
 | 
			
		||||
    def setUp(self):
 | 
			
		||||
        super(LibvirtConnTestCase, self).setUp()
 | 
			
		||||
@@ -315,15 +382,16 @@ class IptablesFirewallTestCase(test.TestCase):
 | 
			
		||||
        instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
 | 
			
		||||
 | 
			
		||||
#        self.fw.add_instance(instance_ref)
 | 
			
		||||
        def fake_iptables_execute(cmd, process_input=None):
 | 
			
		||||
            if cmd == 'sudo ip6tables-save -t filter':
 | 
			
		||||
        def fake_iptables_execute(*cmd, **kwargs):
 | 
			
		||||
            process_input = kwargs.get('process_input', None)
 | 
			
		||||
            if cmd == ('sudo', 'ip6tables-save', '-t', 'filter'):
 | 
			
		||||
                return '\n'.join(self.in6_rules), None
 | 
			
		||||
            if cmd == 'sudo iptables-save -t filter':
 | 
			
		||||
            if cmd == ('sudo', 'iptables-save', '-t', 'filter'):
 | 
			
		||||
                return '\n'.join(self.in_rules), None
 | 
			
		||||
            if cmd == 'sudo iptables-restore':
 | 
			
		||||
            if cmd == ('sudo', 'iptables-restore'):
 | 
			
		||||
                self.out_rules = process_input.split('\n')
 | 
			
		||||
                return '', ''
 | 
			
		||||
            if cmd == 'sudo ip6tables-restore':
 | 
			
		||||
            if cmd == ('sudo', 'ip6tables-restore'):
 | 
			
		||||
                self.out6_rules = process_input.split('\n')
 | 
			
		||||
                return '', ''
 | 
			
		||||
        self.fw.execute = fake_iptables_execute
 | 
			
		||||
 
 | 
			
		||||
@@ -346,6 +346,56 @@ class XenAPIDiffieHellmanTestCase(test.TestCase):
 | 
			
		||||
        super(XenAPIDiffieHellmanTestCase, self).tearDown()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class XenAPIMigrateInstance(test.TestCase):
 | 
			
		||||
    """
 | 
			
		||||
    Unit test for verifying migration-related actions
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def setUp(self):
 | 
			
		||||
        super(XenAPIMigrateInstance, self).setUp()
 | 
			
		||||
        self.stubs = stubout.StubOutForTesting()
 | 
			
		||||
        FLAGS.target_host = '127.0.0.1'
 | 
			
		||||
        FLAGS.xenapi_connection_url = 'test_url'
 | 
			
		||||
        FLAGS.xenapi_connection_password = 'test_pass'
 | 
			
		||||
        db_fakes.stub_out_db_instance_api(self.stubs)
 | 
			
		||||
        stubs.stub_out_get_target(self.stubs)
 | 
			
		||||
        xenapi_fake.reset()
 | 
			
		||||
        self.manager = manager.AuthManager()
 | 
			
		||||
        self.user = self.manager.create_user('fake', 'fake', 'fake',
 | 
			
		||||
                                             admin=True)
 | 
			
		||||
        self.project = self.manager.create_project('fake', 'fake', 'fake')
 | 
			
		||||
        self.values = {'name': 1, 'id': 1,
 | 
			
		||||
                  'project_id': self.project.id,
 | 
			
		||||
                  'user_id': self.user.id,
 | 
			
		||||
                  'image_id': 1,
 | 
			
		||||
                  'kernel_id': None,
 | 
			
		||||
                  'ramdisk_id': None,
 | 
			
		||||
                  'instance_type': 'm1.large',
 | 
			
		||||
                  'mac_address': 'aa:bb:cc:dd:ee:ff',
 | 
			
		||||
                  }
 | 
			
		||||
        stubs.stub_out_migration_methods(self.stubs)
 | 
			
		||||
        glance_stubs.stubout_glance_client(self.stubs,
 | 
			
		||||
                                           glance_stubs.FakeGlance)
 | 
			
		||||
 | 
			
		||||
    def tearDown(self):
 | 
			
		||||
        super(XenAPIMigrateInstance, self).tearDown()
 | 
			
		||||
        self.manager.delete_project(self.project)
 | 
			
		||||
        self.manager.delete_user(self.user)
 | 
			
		||||
        self.stubs.UnsetAll()
 | 
			
		||||
 | 
			
		||||
    def test_migrate_disk_and_power_off(self):
 | 
			
		||||
        instance = db.instance_create(self.values)
 | 
			
		||||
        stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
 | 
			
		||||
        conn = xenapi_conn.get_connection(False)
 | 
			
		||||
        conn.migrate_disk_and_power_off(instance, '127.0.0.1')
 | 
			
		||||
 | 
			
		||||
    def test_finish_resize(self):
 | 
			
		||||
        instance = db.instance_create(self.values)
 | 
			
		||||
        stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
 | 
			
		||||
        conn = xenapi_conn.get_connection(False)
 | 
			
		||||
        conn.finish_resize(instance, dict(base_copy='hurr', cow='durr'))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class XenAPIDetermineDiskImageTestCase(test.TestCase):
 | 
			
		||||
    """
 | 
			
		||||
    Unit tests for code that detects the ImageType
 | 
			
		||||
 
 | 
			
		||||
@@ -20,6 +20,7 @@ from nova.virt import xenapi_conn
 | 
			
		||||
from nova.virt.xenapi import fake
 | 
			
		||||
from nova.virt.xenapi import volume_utils
 | 
			
		||||
from nova.virt.xenapi import vm_utils
 | 
			
		||||
from nova.virt.xenapi import vmops
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def stubout_instance_snapshot(stubs):
 | 
			
		||||
@@ -217,3 +218,60 @@ class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests):
 | 
			
		||||
 | 
			
		||||
    def SR_forget(self, _1, ref):
 | 
			
		||||
        pass
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class FakeSessionForMigrationTests(fake.SessionBase):
 | 
			
		||||
    """Stubs out a XenAPISession for Migration tests"""
 | 
			
		||||
    def __init__(self, uri):
 | 
			
		||||
        super(FakeSessionForMigrationTests, self).__init__(uri)
 | 
			
		||||
 | 
			
		||||
    def VDI_get_by_uuid(*args):
 | 
			
		||||
        return 'hurr'
 | 
			
		||||
 | 
			
		||||
    def VM_start(self, _1, ref, _2, _3):
 | 
			
		||||
        vm = fake.get_record('VM', ref)
 | 
			
		||||
        if vm['power_state'] != 'Halted':
 | 
			
		||||
            raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted',
 | 
			
		||||
                                  vm['power_state']])
 | 
			
		||||
        vm['power_state'] = 'Running'
 | 
			
		||||
        vm['is_a_template'] = False
 | 
			
		||||
        vm['is_control_domain'] = False
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def stub_out_migration_methods(stubs):
 | 
			
		||||
    def fake_get_snapshot(self, instance):
 | 
			
		||||
        return 'foo', 'bar'
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def fake_get_vdi(cls, session, vm_ref):
 | 
			
		||||
        vdi_ref = fake.create_vdi(name_label='derp', read_only=False,
 | 
			
		||||
                             sr_ref='herp', sharable=False)
 | 
			
		||||
        vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
 | 
			
		||||
        return vdi_ref, {'uuid': vdi_rec['uuid'], }
 | 
			
		||||
 | 
			
		||||
    def fake_shutdown(self, inst, vm, method='clean'):
 | 
			
		||||
        pass
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def fake_sr(cls, session, *args):
 | 
			
		||||
        pass
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def fake_get_sr_path(cls, *args):
 | 
			
		||||
        return "fake"
 | 
			
		||||
 | 
			
		||||
    def fake_destroy(*args, **kwargs):
 | 
			
		||||
        pass
 | 
			
		||||
 | 
			
		||||
    def fake_reset_network(*args, **kwargs):
 | 
			
		||||
        pass
 | 
			
		||||
 | 
			
		||||
    stubs.Set(vmops.VMOps, '_destroy', fake_destroy)
 | 
			
		||||
    stubs.Set(vm_utils.VMHelper, 'scan_default_sr', fake_sr)
 | 
			
		||||
    stubs.Set(vm_utils.VMHelper, 'scan_sr', fake_sr)
 | 
			
		||||
    stubs.Set(vmops.VMOps, '_get_snapshot', fake_get_snapshot)
 | 
			
		||||
    stubs.Set(vm_utils.VMHelper, 'get_vdi_for_vm_safely', fake_get_vdi)
 | 
			
		||||
    stubs.Set(xenapi_conn.XenAPISession, 'wait_for_task', lambda x, y, z: None)
 | 
			
		||||
    stubs.Set(vm_utils.VMHelper, 'get_sr_path', fake_get_sr_path)
 | 
			
		||||
    stubs.Set(vmops.VMOps, 'reset_network', fake_reset_network)
 | 
			
		||||
    stubs.Set(vmops.VMOps, '_shutdown', fake_shutdown)
 | 
			
		||||
 
 | 
			
		||||
@@ -23,10 +23,14 @@ System-level utilities and helper functions.
 | 
			
		||||
 | 
			
		||||
import base64
 | 
			
		||||
import datetime
 | 
			
		||||
import functools
 | 
			
		||||
import inspect
 | 
			
		||||
import json
 | 
			
		||||
import lockfile
 | 
			
		||||
import netaddr
 | 
			
		||||
import os
 | 
			
		||||
import random
 | 
			
		||||
import re
 | 
			
		||||
import socket
 | 
			
		||||
import string
 | 
			
		||||
import struct
 | 
			
		||||
@@ -34,20 +38,20 @@ import sys
 | 
			
		||||
import time
 | 
			
		||||
import types
 | 
			
		||||
from xml.sax import saxutils
 | 
			
		||||
import re
 | 
			
		||||
import netaddr
 | 
			
		||||
 | 
			
		||||
from eventlet import event
 | 
			
		||||
from eventlet import greenthread
 | 
			
		||||
from eventlet.green import subprocess
 | 
			
		||||
 | 
			
		||||
None
 | 
			
		||||
from nova import exception
 | 
			
		||||
from nova.exception import ProcessExecutionError
 | 
			
		||||
from nova import flags
 | 
			
		||||
from nova import log as logging
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
LOG = logging.getLogger("nova.utils")
 | 
			
		||||
TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
 | 
			
		||||
FLAGS = flags.FLAGS
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def import_class(import_str):
 | 
			
		||||
@@ -125,16 +129,24 @@ def fetchfile(url, target):
 | 
			
		||||
#    c.perform()
 | 
			
		||||
#    c.close()
 | 
			
		||||
#    fp.close()
 | 
			
		||||
    execute("curl --fail %s -o %s" % (url, target))
 | 
			
		||||
    execute("curl", "--fail", url, "-o", target)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def execute(cmd, process_input=None, addl_env=None, check_exit_code=True):
 | 
			
		||||
    LOG.debug(_("Running cmd (subprocess): %s"), cmd)
 | 
			
		||||
def execute(*cmd, **kwargs):
 | 
			
		||||
    process_input = kwargs.get('process_input', None)
 | 
			
		||||
    addl_env = kwargs.get('addl_env', None)
 | 
			
		||||
    check_exit_code = kwargs.get('check_exit_code', 0)
 | 
			
		||||
    stdin = kwargs.get('stdin', subprocess.PIPE)
 | 
			
		||||
    stdout = kwargs.get('stdout', subprocess.PIPE)
 | 
			
		||||
    stderr = kwargs.get('stderr', subprocess.PIPE)
 | 
			
		||||
    cmd = map(str, cmd)
 | 
			
		||||
 | 
			
		||||
    LOG.debug(_("Running cmd (subprocess): %s"), ' '.join(cmd))
 | 
			
		||||
    env = os.environ.copy()
 | 
			
		||||
    if addl_env:
 | 
			
		||||
        env.update(addl_env)
 | 
			
		||||
    obj = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
 | 
			
		||||
        stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
 | 
			
		||||
    obj = subprocess.Popen(cmd, stdin=stdin,
 | 
			
		||||
            stdout=stdout, stderr=stderr, env=env)
 | 
			
		||||
    result = None
 | 
			
		||||
    if process_input != None:
 | 
			
		||||
        result = obj.communicate(process_input)
 | 
			
		||||
@@ -143,12 +155,13 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True):
 | 
			
		||||
    obj.stdin.close()
 | 
			
		||||
    if obj.returncode:
 | 
			
		||||
        LOG.debug(_("Result was %s") % obj.returncode)
 | 
			
		||||
        if check_exit_code and obj.returncode != 0:
 | 
			
		||||
        if type(check_exit_code) == types.IntType \
 | 
			
		||||
                and obj.returncode != check_exit_code:
 | 
			
		||||
            (stdout, stderr) = result
 | 
			
		||||
            raise ProcessExecutionError(exit_code=obj.returncode,
 | 
			
		||||
                                        stdout=stdout,
 | 
			
		||||
                                        stderr=stderr,
 | 
			
		||||
                                        cmd=cmd)
 | 
			
		||||
                                        cmd=' '.join(cmd))
 | 
			
		||||
    # NOTE(termie): this appears to be necessary to let the subprocess call
 | 
			
		||||
    #               clean something up in between calls, without it two
 | 
			
		||||
    #               execute calls in a row hangs the second one
 | 
			
		||||
@@ -158,7 +171,7 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True):
 | 
			
		||||
 | 
			
		||||
def ssh_execute(ssh, cmd, process_input=None,
 | 
			
		||||
                addl_env=None, check_exit_code=True):
 | 
			
		||||
    LOG.debug(_("Running cmd (SSH): %s"), cmd)
 | 
			
		||||
    LOG.debug(_("Running cmd (SSH): %s"), ' '.join(cmd))
 | 
			
		||||
    if addl_env:
 | 
			
		||||
        raise exception.Error("Environment not supported over SSH")
 | 
			
		||||
 | 
			
		||||
@@ -187,7 +200,7 @@ def ssh_execute(ssh, cmd, process_input=None,
 | 
			
		||||
            raise exception.ProcessExecutionError(exit_code=exit_status,
 | 
			
		||||
                                                  stdout=stdout,
 | 
			
		||||
                                                  stderr=stderr,
 | 
			
		||||
                                                  cmd=cmd)
 | 
			
		||||
                                                  cmd=' '.join(cmd))
 | 
			
		||||
 | 
			
		||||
    return (stdout, stderr)
 | 
			
		||||
 | 
			
		||||
@@ -220,9 +233,9 @@ def debug(arg):
 | 
			
		||||
    return arg
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def runthis(prompt, cmd, check_exit_code=True):
 | 
			
		||||
    LOG.debug(_("Running %s"), (cmd))
 | 
			
		||||
    rv, err = execute(cmd, check_exit_code=check_exit_code)
 | 
			
		||||
def runthis(prompt, *cmd, **kwargs):
 | 
			
		||||
    LOG.debug(_("Running %s"), (" ".join(cmd)))
 | 
			
		||||
    rv, err = execute(*cmd, **kwargs)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def generate_uid(topic, size=8):
 | 
			
		||||
@@ -254,7 +267,7 @@ def last_octet(address):
 | 
			
		||||
 | 
			
		||||
def  get_my_linklocal(interface):
 | 
			
		||||
    try:
 | 
			
		||||
        if_str = execute("ip -f inet6 -o addr show %s" % interface)
 | 
			
		||||
        if_str = execute("ip", "-f", "inet6", "-o", "addr", "show", interface)
 | 
			
		||||
        condition = "\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link"
 | 
			
		||||
        links = [re.search(condition, x) for x in if_str[0].split('\n')]
 | 
			
		||||
        address = [w.group(1) for w in links if w is not None]
 | 
			
		||||
@@ -491,6 +504,18 @@ def loads(s):
 | 
			
		||||
    return json.loads(s)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def synchronized(name):
 | 
			
		||||
    def wrap(f):
 | 
			
		||||
        @functools.wraps(f)
 | 
			
		||||
        def inner(*args, **kwargs):
 | 
			
		||||
            lock = lockfile.FileLock(os.path.join(FLAGS.lock_path,
 | 
			
		||||
                                                  'nova-%s.lock' % name))
 | 
			
		||||
            with lock:
 | 
			
		||||
                return f(*args, **kwargs)
 | 
			
		||||
        return inner
 | 
			
		||||
    return wrap
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def ensure_b64_encoding(val):
 | 
			
		||||
    """Safety method to ensure that values expected to be base64-encoded
 | 
			
		||||
    actually are. If they are, the value is returned unchanged. Otherwise,
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										133
									
								
								nova/wsgi.py
									
									
									
									
									
								
							
							
						
						
									
										133
									
								
								nova/wsgi.py
									
									
									
									
									
								
							@@ -36,6 +36,7 @@ import webob.exc
 | 
			
		||||
 | 
			
		||||
from paste import deploy
 | 
			
		||||
 | 
			
		||||
from nova import exception
 | 
			
		||||
from nova import flags
 | 
			
		||||
from nova import log as logging
 | 
			
		||||
from nova import utils
 | 
			
		||||
@@ -82,6 +83,35 @@ class Server(object):
 | 
			
		||||
                             log=WritableLogger(logger))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Request(webob.Request):
 | 
			
		||||
 | 
			
		||||
    def best_match_content_type(self):
 | 
			
		||||
        """
 | 
			
		||||
        Determine the most acceptable content-type based on the
 | 
			
		||||
        query extension then the Accept header
 | 
			
		||||
        """
 | 
			
		||||
 | 
			
		||||
        parts = self.path.rsplit(".", 1)
 | 
			
		||||
 | 
			
		||||
        if len(parts) > 1:
 | 
			
		||||
            format = parts[1]
 | 
			
		||||
            if format in ["json", "xml"]:
 | 
			
		||||
                return "application/{0}".format(parts[1])
 | 
			
		||||
 | 
			
		||||
        ctypes = ["application/json", "application/xml"]
 | 
			
		||||
        bm = self.accept.best_match(ctypes)
 | 
			
		||||
 | 
			
		||||
        return bm or "application/json"
 | 
			
		||||
 | 
			
		||||
    def get_content_type(self):
 | 
			
		||||
        try:
 | 
			
		||||
            ct = self.headers["Content-Type"]
 | 
			
		||||
            assert ct in ("application/xml", "application/json")
 | 
			
		||||
            return ct
 | 
			
		||||
        except Exception:
 | 
			
		||||
            raise webob.exc.HTTPBadRequest("Invalid content type")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Application(object):
 | 
			
		||||
    """Base WSGI application wrapper. Subclasses need to implement __call__."""
 | 
			
		||||
 | 
			
		||||
@@ -113,7 +143,7 @@ class Application(object):
 | 
			
		||||
    def __call__(self, environ, start_response):
 | 
			
		||||
        r"""Subclasses will probably want to implement __call__ like this:
 | 
			
		||||
 | 
			
		||||
        @webob.dec.wsgify
 | 
			
		||||
        @webob.dec.wsgify(RequestClass=Request)
 | 
			
		||||
        def __call__(self, req):
 | 
			
		||||
          # Any of the following objects work as responses:
 | 
			
		||||
 | 
			
		||||
@@ -199,7 +229,7 @@ class Middleware(Application):
 | 
			
		||||
        """Do whatever you'd like to the response."""
 | 
			
		||||
        return response
 | 
			
		||||
 | 
			
		||||
    @webob.dec.wsgify
 | 
			
		||||
    @webob.dec.wsgify(RequestClass=Request)
 | 
			
		||||
    def __call__(self, req):
 | 
			
		||||
        response = self.process_request(req)
 | 
			
		||||
        if response:
 | 
			
		||||
@@ -212,7 +242,7 @@ class Debug(Middleware):
 | 
			
		||||
    """Helper class that can be inserted into any WSGI application chain
 | 
			
		||||
    to get information about the request and response."""
 | 
			
		||||
 | 
			
		||||
    @webob.dec.wsgify
 | 
			
		||||
    @webob.dec.wsgify(RequestClass=Request)
 | 
			
		||||
    def __call__(self, req):
 | 
			
		||||
        print ("*" * 40) + " REQUEST ENVIRON"
 | 
			
		||||
        for key, value in req.environ.items():
 | 
			
		||||
@@ -276,7 +306,7 @@ class Router(object):
 | 
			
		||||
        self._router = routes.middleware.RoutesMiddleware(self._dispatch,
 | 
			
		||||
                                                          self.map)
 | 
			
		||||
 | 
			
		||||
    @webob.dec.wsgify
 | 
			
		||||
    @webob.dec.wsgify(RequestClass=Request)
 | 
			
		||||
    def __call__(self, req):
 | 
			
		||||
        """
 | 
			
		||||
        Route the incoming request to a controller based on self.map.
 | 
			
		||||
@@ -285,7 +315,7 @@ class Router(object):
 | 
			
		||||
        return self._router
 | 
			
		||||
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    @webob.dec.wsgify
 | 
			
		||||
    @webob.dec.wsgify(RequestClass=Request)
 | 
			
		||||
    def _dispatch(req):
 | 
			
		||||
        """
 | 
			
		||||
        Called by self._router after matching the incoming request to a route
 | 
			
		||||
@@ -304,11 +334,11 @@ class Controller(object):
 | 
			
		||||
    WSGI app that reads routing information supplied by RoutesMiddleware
 | 
			
		||||
    and calls the requested action method upon itself.  All action methods
 | 
			
		||||
    must, in addition to their normal parameters, accept a 'req' argument
 | 
			
		||||
    which is the incoming webob.Request.  They raise a webob.exc exception,
 | 
			
		||||
    which is the incoming wsgi.Request.  They raise a webob.exc exception,
 | 
			
		||||
    or return a dict which will be serialized by requested content type.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    @webob.dec.wsgify
 | 
			
		||||
    @webob.dec.wsgify(RequestClass=Request)
 | 
			
		||||
    def __call__(self, req):
 | 
			
		||||
        """
 | 
			
		||||
        Call the method specified in req.environ by RoutesMiddleware.
 | 
			
		||||
@@ -318,32 +348,45 @@ class Controller(object):
 | 
			
		||||
        method = getattr(self, action)
 | 
			
		||||
        del arg_dict['controller']
 | 
			
		||||
        del arg_dict['action']
 | 
			
		||||
        if 'format' in arg_dict:
 | 
			
		||||
            del arg_dict['format']
 | 
			
		||||
        arg_dict['req'] = req
 | 
			
		||||
        result = method(**arg_dict)
 | 
			
		||||
 | 
			
		||||
        if type(result) is dict:
 | 
			
		||||
            return self._serialize(result, req)
 | 
			
		||||
            content_type = req.best_match_content_type()
 | 
			
		||||
            body = self._serialize(result, content_type)
 | 
			
		||||
 | 
			
		||||
            response = webob.Response()
 | 
			
		||||
            response.headers["Content-Type"] = content_type
 | 
			
		||||
            response.body = body
 | 
			
		||||
            return response
 | 
			
		||||
 | 
			
		||||
        else:
 | 
			
		||||
            return result
 | 
			
		||||
 | 
			
		||||
    def _serialize(self, data, request):
 | 
			
		||||
    def _serialize(self, data, content_type):
 | 
			
		||||
        """
 | 
			
		||||
        Serialize the given dict to the response type requested in request.
 | 
			
		||||
        Serialize the given dict to the provided content_type.
 | 
			
		||||
        Uses self._serialization_metadata if it exists, which is a dict mapping
 | 
			
		||||
        MIME types to information needed to serialize to that type.
 | 
			
		||||
        """
 | 
			
		||||
        _metadata = getattr(type(self), "_serialization_metadata", {})
 | 
			
		||||
        serializer = Serializer(request.environ, _metadata)
 | 
			
		||||
        return serializer.to_content_type(data)
 | 
			
		||||
        serializer = Serializer(_metadata)
 | 
			
		||||
        try:
 | 
			
		||||
            return serializer.serialize(data, content_type)
 | 
			
		||||
        except exception.InvalidContentType:
 | 
			
		||||
            raise webob.exc.HTTPNotAcceptable()
 | 
			
		||||
 | 
			
		||||
    def _deserialize(self, data, request):
 | 
			
		||||
    def _deserialize(self, data, content_type):
 | 
			
		||||
        """
 | 
			
		||||
        Deserialize the request body to the response type requested in request.
 | 
			
		||||
        Deserialize the request body to the specefied content type.
 | 
			
		||||
        Uses self._serialization_metadata if it exists, which is a dict mapping
 | 
			
		||||
        MIME types to information needed to serialize to that type.
 | 
			
		||||
        """
 | 
			
		||||
        _metadata = getattr(type(self), "_serialization_metadata", {})
 | 
			
		||||
        serializer = Serializer(request.environ, _metadata)
 | 
			
		||||
        return serializer.deserialize(data)
 | 
			
		||||
        serializer = Serializer(_metadata)
 | 
			
		||||
        return serializer.deserialize(data, content_type)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Serializer(object):
 | 
			
		||||
@@ -351,50 +394,52 @@ class Serializer(object):
 | 
			
		||||
    Serializes and deserializes dictionaries to certain MIME types.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, environ, metadata=None):
 | 
			
		||||
    def __init__(self, metadata=None):
 | 
			
		||||
        """
 | 
			
		||||
        Create a serializer based on the given WSGI environment.
 | 
			
		||||
        'metadata' is an optional dict mapping MIME types to information
 | 
			
		||||
        needed to serialize a dictionary to that type.
 | 
			
		||||
        """
 | 
			
		||||
        self.metadata = metadata or {}
 | 
			
		||||
        req = webob.Request.blank('', environ)
 | 
			
		||||
        suffix = req.path_info.split('.')[-1].lower()
 | 
			
		||||
        if suffix == 'json':
 | 
			
		||||
            self.handler = self._to_json
 | 
			
		||||
        elif suffix == 'xml':
 | 
			
		||||
            self.handler = self._to_xml
 | 
			
		||||
        elif 'application/json' in req.accept:
 | 
			
		||||
            self.handler = self._to_json
 | 
			
		||||
        elif 'application/xml' in req.accept:
 | 
			
		||||
            self.handler = self._to_xml
 | 
			
		||||
        else:
 | 
			
		||||
            # This is the default
 | 
			
		||||
            self.handler = self._to_json
 | 
			
		||||
 | 
			
		||||
    def to_content_type(self, data):
 | 
			
		||||
    def _get_serialize_handler(self, content_type):
 | 
			
		||||
        handlers = {
 | 
			
		||||
            "application/json": self._to_json,
 | 
			
		||||
            "application/xml": self._to_xml,
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        try:
 | 
			
		||||
            return handlers[content_type]
 | 
			
		||||
        except Exception:
 | 
			
		||||
            raise exception.InvalidContentType()
 | 
			
		||||
 | 
			
		||||
    def serialize(self, data, content_type):
 | 
			
		||||
        """
 | 
			
		||||
        Serialize a dictionary into a string.
 | 
			
		||||
 | 
			
		||||
        The format of the string will be decided based on the Content Type
 | 
			
		||||
        requested in self.environ: by Accept: header, or by URL suffix.
 | 
			
		||||
        Serialize a dictionary into a string of the specified content type.
 | 
			
		||||
        """
 | 
			
		||||
        return self.handler(data)
 | 
			
		||||
        return self._get_serialize_handler(content_type)(data)
 | 
			
		||||
 | 
			
		||||
    def deserialize(self, datastring):
 | 
			
		||||
    def deserialize(self, datastring, content_type):
 | 
			
		||||
        """
 | 
			
		||||
        Deserialize a string to a dictionary.
 | 
			
		||||
 | 
			
		||||
        The string must be in the format of a supported MIME type.
 | 
			
		||||
        """
 | 
			
		||||
        datastring = datastring.strip()
 | 
			
		||||
        return self.get_deserialize_handler(content_type)(datastring)
 | 
			
		||||
 | 
			
		||||
    def get_deserialize_handler(self, content_type):
 | 
			
		||||
        handlers = {
 | 
			
		||||
            "application/json": self._from_json,
 | 
			
		||||
            "application/xml": self._from_xml,
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        try:
 | 
			
		||||
            is_xml = (datastring[0] == '<')
 | 
			
		||||
            if not is_xml:
 | 
			
		||||
                return utils.loads(datastring)
 | 
			
		||||
            return self._from_xml(datastring)
 | 
			
		||||
        except:
 | 
			
		||||
            return None
 | 
			
		||||
            return handlers[content_type]
 | 
			
		||||
        except Exception:
 | 
			
		||||
            raise exception.InvalidContentType()
 | 
			
		||||
 | 
			
		||||
    def _from_json(self, datastring):
 | 
			
		||||
        return utils.loads(datastring)
 | 
			
		||||
 | 
			
		||||
    def _from_xml(self, datastring):
 | 
			
		||||
        xmldata = self.metadata.get('application/xml', {})
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user