Merge trunk.
This commit is contained in:
@@ -536,7 +536,7 @@ class FloatingIpCommands(object):
|
||||
for floating_ip in floating_ips:
|
||||
instance = None
|
||||
if floating_ip['fixed_ip']:
|
||||
instance = floating_ip['fixed_ip']['instance']['ec2_id']
|
||||
instance = floating_ip['fixed_ip']['instance']['hostname']
|
||||
print "%s\t%s\t%s" % (floating_ip['host'],
|
||||
floating_ip['address'],
|
||||
instance)
|
||||
|
||||
@@ -42,6 +42,7 @@ from nova import exception
|
||||
from nova import flags
|
||||
from nova import utils
|
||||
from nova import wsgi
|
||||
import nova.api.openstack.wsgi
|
||||
|
||||
|
||||
# Global storage for registering modules.
|
||||
@@ -251,7 +252,7 @@ class Reflection(object):
|
||||
return self._methods[method]
|
||||
|
||||
|
||||
class ServiceWrapper(wsgi.Controller):
|
||||
class ServiceWrapper(object):
|
||||
"""Wrapper to dynamically povide a WSGI controller for arbitrary objects.
|
||||
|
||||
With lightweight introspection allows public methods on the object to
|
||||
@@ -265,7 +266,7 @@ class ServiceWrapper(wsgi.Controller):
|
||||
def __init__(self, service_handle):
|
||||
self.service_handle = service_handle
|
||||
|
||||
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||
@webob.dec.wsgify(RequestClass=nova.api.openstack.wsgi.Request)
|
||||
def __call__(self, req):
|
||||
arg_dict = req.environ['wsgiorg.routing_args'][1]
|
||||
action = arg_dict['action']
|
||||
@@ -289,8 +290,11 @@ class ServiceWrapper(wsgi.Controller):
|
||||
|
||||
try:
|
||||
content_type = req.best_match_content_type()
|
||||
default_xmlns = self.get_default_xmlns(req)
|
||||
return self._serialize(result, content_type, default_xmlns)
|
||||
serializer = {
|
||||
'application/xml': nova.api.openstack.wsgi.XMLDictSerializer(),
|
||||
'application/json': nova.api.openstack.wsgi.JSONDictSerializer(),
|
||||
}[content_type]
|
||||
return serializer.serialize(result)
|
||||
except:
|
||||
raise exception.Error("returned non-serializable type: %s"
|
||||
% result)
|
||||
|
||||
@@ -23,6 +23,7 @@ import webob.exc
|
||||
|
||||
from nova import log as logging
|
||||
from nova import flags
|
||||
from nova import utils
|
||||
from nova import wsgi
|
||||
from nova.api.ec2 import cloud
|
||||
|
||||
@@ -71,7 +72,15 @@ class MetadataRequestHandler(wsgi.Application):
|
||||
remote_address = req.remote_addr
|
||||
if FLAGS.use_forwarded_for:
|
||||
remote_address = req.headers.get('X-Forwarded-For', remote_address)
|
||||
meta_data = cc.get_metadata(remote_address)
|
||||
try:
|
||||
meta_data = cc.get_metadata(remote_address)
|
||||
except Exception:
|
||||
LOG.exception(_('Failed to get metadata for ip: %s'),
|
||||
remote_address)
|
||||
msg = _('An unknown error has occurred. '
|
||||
'Please try your request again.')
|
||||
exc = webob.exc.HTTPInternalServerError(explanation=unicode(msg))
|
||||
return exc
|
||||
if meta_data is None:
|
||||
LOG.error(_('Failed to get metadata for ip: %s'), remote_address)
|
||||
raise webob.exc.HTTPNotFound()
|
||||
|
||||
@@ -26,7 +26,7 @@ import webob.exc
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import wsgi
|
||||
from nova import wsgi as base_wsgi
|
||||
from nova.api.openstack import accounts
|
||||
from nova.api.openstack import faults
|
||||
from nova.api.openstack import backup_schedules
|
||||
@@ -40,6 +40,7 @@ from nova.api.openstack import servers
|
||||
from nova.api.openstack import server_metadata
|
||||
from nova.api.openstack import shared_ip_groups
|
||||
from nova.api.openstack import users
|
||||
from nova.api.openstack import wsgi
|
||||
from nova.api.openstack import zones
|
||||
|
||||
|
||||
@@ -50,7 +51,7 @@ flags.DEFINE_bool('allow_admin_api',
|
||||
'When True, this API service will accept admin operations.')
|
||||
|
||||
|
||||
class FaultWrapper(wsgi.Middleware):
|
||||
class FaultWrapper(base_wsgi.Middleware):
|
||||
"""Calls down the middleware stack, making exceptions into faults."""
|
||||
|
||||
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||
@@ -63,7 +64,7 @@ class FaultWrapper(wsgi.Middleware):
|
||||
return faults.Fault(exc)
|
||||
|
||||
|
||||
class APIRouter(wsgi.Router):
|
||||
class APIRouter(base_wsgi.Router):
|
||||
"""
|
||||
Routes requests on the OpenStack API to the appropriate controller
|
||||
and method.
|
||||
@@ -97,19 +98,21 @@ class APIRouter(wsgi.Router):
|
||||
server_members['reset_network'] = 'POST'
|
||||
server_members['inject_network_info'] = 'POST'
|
||||
|
||||
mapper.resource("zone", "zones", controller=zones.Controller(),
|
||||
mapper.resource("zone", "zones",
|
||||
controller=zones.create_resource(),
|
||||
collection={'detail': 'GET', 'info': 'GET',
|
||||
'select': 'GET'})
|
||||
|
||||
mapper.resource("user", "users", controller=users.Controller(),
|
||||
mapper.resource("user", "users",
|
||||
controller=users.create_resource(),
|
||||
collection={'detail': 'GET'})
|
||||
|
||||
mapper.resource("account", "accounts",
|
||||
controller=accounts.Controller(),
|
||||
controller=accounts.create_resource(),
|
||||
collection={'detail': 'GET'})
|
||||
|
||||
mapper.resource("console", "consoles",
|
||||
controller=consoles.Controller(),
|
||||
controller=consoles.create_resource(),
|
||||
parent_resource=dict(member_name='server',
|
||||
collection_name='servers'))
|
||||
|
||||
@@ -122,31 +125,31 @@ class APIRouterV10(APIRouter):
|
||||
def _setup_routes(self, mapper):
|
||||
super(APIRouterV10, self)._setup_routes(mapper)
|
||||
mapper.resource("server", "servers",
|
||||
controller=servers.ControllerV10(),
|
||||
controller=servers.create_resource('1.0'),
|
||||
collection={'detail': 'GET'},
|
||||
member=self.server_members)
|
||||
|
||||
mapper.resource("image", "images",
|
||||
controller=images.ControllerV10(),
|
||||
controller=images.create_resource('1.0'),
|
||||
collection={'detail': 'GET'})
|
||||
|
||||
mapper.resource("flavor", "flavors",
|
||||
controller=flavors.ControllerV10(),
|
||||
controller=flavors.create_resource('1.0'),
|
||||
collection={'detail': 'GET'})
|
||||
|
||||
mapper.resource("shared_ip_group", "shared_ip_groups",
|
||||
collection={'detail': 'GET'},
|
||||
controller=shared_ip_groups.Controller())
|
||||
controller=shared_ip_groups.create_resource())
|
||||
|
||||
mapper.resource("backup_schedule", "backup_schedule",
|
||||
controller=backup_schedules.Controller(),
|
||||
controller=backup_schedules.create_resource(),
|
||||
parent_resource=dict(member_name='server',
|
||||
collection_name='servers'))
|
||||
|
||||
mapper.resource("limit", "limits",
|
||||
controller=limits.LimitsControllerV10())
|
||||
controller=limits.create_resource('1.0'))
|
||||
|
||||
mapper.resource("ip", "ips", controller=ips.Controller(),
|
||||
mapper.resource("ip", "ips", controller=ips.create_resource(),
|
||||
collection=dict(public='GET', private='GET'),
|
||||
parent_resource=dict(member_name='server',
|
||||
collection_name='servers'))
|
||||
@@ -158,27 +161,27 @@ class APIRouterV11(APIRouter):
|
||||
def _setup_routes(self, mapper):
|
||||
super(APIRouterV11, self)._setup_routes(mapper)
|
||||
mapper.resource("server", "servers",
|
||||
controller=servers.ControllerV11(),
|
||||
controller=servers.create_resource('1.1'),
|
||||
collection={'detail': 'GET'},
|
||||
member=self.server_members)
|
||||
|
||||
mapper.resource("image", "images",
|
||||
controller=images.ControllerV11(),
|
||||
controller=images.create_resource('1.1'),
|
||||
collection={'detail': 'GET'})
|
||||
|
||||
mapper.resource("image_meta", "meta",
|
||||
controller=image_metadata.Controller(),
|
||||
controller=image_metadata.create_resource(),
|
||||
parent_resource=dict(member_name='image',
|
||||
collection_name='images'))
|
||||
|
||||
mapper.resource("server_meta", "meta",
|
||||
controller=server_metadata.Controller(),
|
||||
controller=server_metadata.create_resource(),
|
||||
parent_resource=dict(member_name='server',
|
||||
collection_name='servers'))
|
||||
|
||||
mapper.resource("flavor", "flavors",
|
||||
controller=flavors.ControllerV11(),
|
||||
controller=flavors.create_resource('1.1'),
|
||||
collection={'detail': 'GET'})
|
||||
|
||||
mapper.resource("limit", "limits",
|
||||
controller=limits.LimitsControllerV11())
|
||||
controller=limits.create_resource('1.1'))
|
||||
|
||||
@@ -20,8 +20,9 @@ from nova import flags
|
||||
from nova import log as logging
|
||||
|
||||
from nova.auth import manager
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import faults
|
||||
from nova.api.openstack import wsgi
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
LOG = logging.getLogger('nova.api.openstack')
|
||||
@@ -34,12 +35,7 @@ def _translate_keys(account):
|
||||
manager=account.project_manager_id)
|
||||
|
||||
|
||||
class Controller(common.OpenstackController):
|
||||
|
||||
_serialization_metadata = {
|
||||
'application/xml': {
|
||||
"attributes": {
|
||||
"account": ["id", "name", "description", "manager"]}}}
|
||||
class Controller(object):
|
||||
|
||||
def __init__(self):
|
||||
self.manager = manager.AuthManager()
|
||||
@@ -66,20 +62,33 @@ class Controller(common.OpenstackController):
|
||||
self.manager.delete_project(id)
|
||||
return {}
|
||||
|
||||
def create(self, req):
|
||||
def create(self, req, body):
|
||||
"""We use update with create-or-update semantics
|
||||
because the id comes from an external source"""
|
||||
raise faults.Fault(webob.exc.HTTPNotImplemented())
|
||||
|
||||
def update(self, req, id):
|
||||
def update(self, req, id, body):
|
||||
"""This is really create or update."""
|
||||
self._check_admin(req.environ['nova.context'])
|
||||
env = self._deserialize(req.body, req.get_content_type())
|
||||
description = env['account'].get('description')
|
||||
manager = env['account'].get('manager')
|
||||
description = body['account'].get('description')
|
||||
manager = body['account'].get('manager')
|
||||
try:
|
||||
account = self.manager.get_project(id)
|
||||
self.manager.modify_project(id, manager, description)
|
||||
except exception.NotFound:
|
||||
account = self.manager.create_project(id, manager, description)
|
||||
return dict(account=_translate_keys(account))
|
||||
|
||||
|
||||
def create_resource():
|
||||
metadata = {
|
||||
"attributes": {
|
||||
"account": ["id", "name", "description", "manager"],
|
||||
},
|
||||
}
|
||||
|
||||
serializers = {
|
||||
'application/xml': wsgi.XMLDictSerializer(metadata=metadata),
|
||||
}
|
||||
|
||||
return wsgi.Resource(Controller(), serializers=serializers)
|
||||
|
||||
@@ -19,9 +19,8 @@ import time
|
||||
|
||||
from webob import exc
|
||||
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import faults
|
||||
import nova.image.service
|
||||
from nova.api.openstack import wsgi
|
||||
|
||||
|
||||
def _translate_keys(inst):
|
||||
@@ -29,14 +28,9 @@ def _translate_keys(inst):
|
||||
return dict(backupSchedule=inst)
|
||||
|
||||
|
||||
class Controller(common.OpenstackController):
|
||||
class Controller(object):
|
||||
""" The backup schedule API controller for the Openstack API """
|
||||
|
||||
_serialization_metadata = {
|
||||
'application/xml': {
|
||||
'attributes': {
|
||||
'backupSchedule': []}}}
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@@ -48,7 +42,7 @@ class Controller(common.OpenstackController):
|
||||
""" Returns a single backup schedule for a given instance """
|
||||
return faults.Fault(exc.HTTPNotImplemented())
|
||||
|
||||
def create(self, req, server_id):
|
||||
def create(self, req, server_id, body):
|
||||
""" No actual update method required, since the existing API allows
|
||||
both create and update through a POST """
|
||||
return faults.Fault(exc.HTTPNotImplemented())
|
||||
@@ -56,3 +50,18 @@ class Controller(common.OpenstackController):
|
||||
def delete(self, req, server_id, id):
|
||||
""" Deletes an existing backup schedule """
|
||||
return faults.Fault(exc.HTTPNotImplemented())
|
||||
|
||||
|
||||
def create_resource():
|
||||
metadata = {
|
||||
'attributes': {
|
||||
'backupSchedule': [],
|
||||
},
|
||||
}
|
||||
|
||||
serializers = {
|
||||
'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V10,
|
||||
metadata=metadata),
|
||||
}
|
||||
|
||||
return wsgi.Resource(Controller(), serializers=serializers)
|
||||
|
||||
@@ -23,7 +23,6 @@ import webob
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import wsgi
|
||||
|
||||
|
||||
LOG = logging.getLogger('nova.api.openstack.common')
|
||||
@@ -146,9 +145,3 @@ def get_id_from_href(href):
|
||||
except:
|
||||
LOG.debug(_("Error extracting id from href: %s") % href)
|
||||
raise webob.exc.HTTPBadRequest(_('could not parse id from href'))
|
||||
|
||||
|
||||
class OpenstackController(wsgi.Controller):
|
||||
def get_default_xmlns(self, req):
|
||||
# Use V10 by default
|
||||
return XML_NS_V10
|
||||
|
||||
@@ -19,8 +19,8 @@ from webob import exc
|
||||
|
||||
from nova import console
|
||||
from nova import exception
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import faults
|
||||
from nova.api.openstack import wsgi
|
||||
|
||||
|
||||
def _translate_keys(cons):
|
||||
@@ -43,17 +43,11 @@ def _translate_detail_keys(cons):
|
||||
return dict(console=info)
|
||||
|
||||
|
||||
class Controller(common.OpenstackController):
|
||||
"""The Consoles Controller for the Openstack API"""
|
||||
|
||||
_serialization_metadata = {
|
||||
'application/xml': {
|
||||
'attributes': {
|
||||
'console': []}}}
|
||||
class Controller(object):
|
||||
"""The Consoles controller for the Openstack API"""
|
||||
|
||||
def __init__(self):
|
||||
self.console_api = console.API()
|
||||
super(Controller, self).__init__()
|
||||
|
||||
def index(self, req, server_id):
|
||||
"""Returns a list of consoles for this instance"""
|
||||
@@ -63,9 +57,8 @@ class Controller(common.OpenstackController):
|
||||
return dict(consoles=[_translate_keys(console)
|
||||
for console in consoles])
|
||||
|
||||
def create(self, req, server_id):
|
||||
def create(self, req, server_id, body):
|
||||
"""Creates a new console"""
|
||||
#info = self._deserialize(req.body, req.get_content_type())
|
||||
self.console_api.create_console(
|
||||
req.environ['nova.context'],
|
||||
int(server_id))
|
||||
@@ -94,3 +87,17 @@ class Controller(common.OpenstackController):
|
||||
except exception.NotFound:
|
||||
return faults.Fault(exc.HTTPNotFound())
|
||||
return exc.HTTPAccepted()
|
||||
|
||||
|
||||
def create_resource():
|
||||
metadata = {
|
||||
'attributes': {
|
||||
'console': [],
|
||||
},
|
||||
}
|
||||
|
||||
serializers = {
|
||||
'application/xml': wsgi.XMLDictSerializer(metadata=metadata),
|
||||
}
|
||||
|
||||
return wsgi.Resource(Controller(), serializers=serializers)
|
||||
|
||||
@@ -22,7 +22,6 @@ from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import volume
|
||||
from nova import wsgi
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import extensions
|
||||
from nova.api.openstack import faults
|
||||
@@ -64,7 +63,7 @@ def _translate_volume_summary_view(context, vol):
|
||||
return d
|
||||
|
||||
|
||||
class VolumeController(wsgi.Controller):
|
||||
class VolumeController(object):
|
||||
"""The Volumes API controller for the OpenStack API."""
|
||||
|
||||
_serialization_metadata = {
|
||||
@@ -124,15 +123,14 @@ class VolumeController(wsgi.Controller):
|
||||
res = [entity_maker(context, vol) for vol in limited_list]
|
||||
return {'volumes': res}
|
||||
|
||||
def create(self, req):
|
||||
def create(self, req, body):
|
||||
"""Creates a new volume."""
|
||||
context = req.environ['nova.context']
|
||||
|
||||
env = self._deserialize(req.body, req.get_content_type())
|
||||
if not env:
|
||||
if not body:
|
||||
return faults.Fault(exc.HTTPUnprocessableEntity())
|
||||
|
||||
vol = env['volume']
|
||||
vol = body['volume']
|
||||
size = vol['size']
|
||||
LOG.audit(_("Create volume of %s GB"), size, context=context)
|
||||
new_volume = self.volume_api.create(context, size, None,
|
||||
@@ -175,7 +173,7 @@ def _translate_attachment_summary_view(_context, vol):
|
||||
return d
|
||||
|
||||
|
||||
class VolumeAttachmentController(wsgi.Controller):
|
||||
class VolumeAttachmentController(object):
|
||||
"""The volume attachment API controller for the Openstack API.
|
||||
|
||||
A child resource of the server. Note that we use the volume id
|
||||
@@ -219,17 +217,16 @@ class VolumeAttachmentController(wsgi.Controller):
|
||||
return {'volumeAttachment': _translate_attachment_detail_view(context,
|
||||
vol)}
|
||||
|
||||
def create(self, req, server_id):
|
||||
def create(self, req, server_id, body):
|
||||
"""Attach a volume to an instance."""
|
||||
context = req.environ['nova.context']
|
||||
|
||||
env = self._deserialize(req.body, req.get_content_type())
|
||||
if not env:
|
||||
if not body:
|
||||
return faults.Fault(exc.HTTPUnprocessableEntity())
|
||||
|
||||
instance_id = server_id
|
||||
volume_id = env['volumeAttachment']['volumeId']
|
||||
device = env['volumeAttachment']['device']
|
||||
volume_id = body['volumeAttachment']['volumeId']
|
||||
device = body['volumeAttachment']['device']
|
||||
|
||||
msg = _("Attach volume %(volume_id)s to instance %(server_id)s"
|
||||
" at %(device)s") % locals()
|
||||
@@ -259,7 +256,7 @@ class VolumeAttachmentController(wsgi.Controller):
|
||||
# TODO(justinsb): How do I return "accepted" here?
|
||||
return {'volumeAttachment': attachment}
|
||||
|
||||
def update(self, _req, _server_id, _id):
|
||||
def update(self, req, server_id, id, body):
|
||||
"""Update a volume attachment. We don't currently support this."""
|
||||
return faults.Fault(exc.HTTPBadRequest())
|
||||
|
||||
|
||||
@@ -27,9 +27,10 @@ import webob.exc
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import wsgi
|
||||
from nova import wsgi as base_wsgi
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import faults
|
||||
from nova.api.openstack import wsgi
|
||||
|
||||
|
||||
LOG = logging.getLogger('extensions')
|
||||
@@ -115,28 +116,34 @@ class ExtensionDescriptor(object):
|
||||
return request_exts
|
||||
|
||||
|
||||
class ActionExtensionController(common.OpenstackController):
|
||||
|
||||
class ActionExtensionController(object):
|
||||
def __init__(self, application):
|
||||
|
||||
self.application = application
|
||||
self.action_handlers = {}
|
||||
|
||||
def add_action(self, action_name, handler):
|
||||
self.action_handlers[action_name] = handler
|
||||
|
||||
def action(self, req, id):
|
||||
|
||||
input_dict = self._deserialize(req.body, req.get_content_type())
|
||||
def action(self, req, id, body):
|
||||
for action_name, handler in self.action_handlers.iteritems():
|
||||
if action_name in input_dict:
|
||||
return handler(input_dict, req, id)
|
||||
if action_name in body:
|
||||
return handler(body, req, id)
|
||||
# no action handler found (bump to downstream application)
|
||||
res = self.application
|
||||
return res
|
||||
|
||||
|
||||
class RequestExtensionController(common.OpenstackController):
|
||||
class ActionExtensionResource(wsgi.Resource):
|
||||
|
||||
def __init__(self, application):
|
||||
controller = ActionExtensionController(application)
|
||||
super(ActionExtensionResource, self).__init__(controller)
|
||||
|
||||
def add_action(self, action_name, handler):
|
||||
self.controller.add_action(action_name, handler)
|
||||
|
||||
|
||||
class RequestExtensionController(object):
|
||||
|
||||
def __init__(self, application):
|
||||
self.application = application
|
||||
@@ -153,7 +160,17 @@ class RequestExtensionController(common.OpenstackController):
|
||||
return res
|
||||
|
||||
|
||||
class ExtensionController(common.OpenstackController):
|
||||
class RequestExtensionResource(wsgi.Resource):
|
||||
|
||||
def __init__(self, application):
|
||||
controller = RequestExtensionController(application)
|
||||
super(RequestExtensionResource, self).__init__(controller)
|
||||
|
||||
def add_handler(self, handler):
|
||||
self.controller.add_handler(handler)
|
||||
|
||||
|
||||
class ExtensionsResource(wsgi.Resource):
|
||||
|
||||
def __init__(self, extension_manager):
|
||||
self.extension_manager = extension_manager
|
||||
@@ -186,7 +203,7 @@ class ExtensionController(common.OpenstackController):
|
||||
raise faults.Fault(webob.exc.HTTPNotFound())
|
||||
|
||||
|
||||
class ExtensionMiddleware(wsgi.Middleware):
|
||||
class ExtensionMiddleware(base_wsgi.Middleware):
|
||||
"""Extensions middleware for WSGI."""
|
||||
@classmethod
|
||||
def factory(cls, global_config, **local_config):
|
||||
@@ -195,43 +212,43 @@ class ExtensionMiddleware(wsgi.Middleware):
|
||||
return cls(app, **local_config)
|
||||
return _factory
|
||||
|
||||
def _action_ext_controllers(self, application, ext_mgr, mapper):
|
||||
"""Return a dict of ActionExtensionController-s by collection."""
|
||||
action_controllers = {}
|
||||
def _action_ext_resources(self, application, ext_mgr, mapper):
|
||||
"""Return a dict of ActionExtensionResource-s by collection."""
|
||||
action_resources = {}
|
||||
for action in ext_mgr.get_actions():
|
||||
if not action.collection in action_controllers.keys():
|
||||
controller = ActionExtensionController(application)
|
||||
if not action.collection in action_resources.keys():
|
||||
resource = ActionExtensionResource(application)
|
||||
mapper.connect("/%s/:(id)/action.:(format)" %
|
||||
action.collection,
|
||||
action='action',
|
||||
controller=controller,
|
||||
controller=resource,
|
||||
conditions=dict(method=['POST']))
|
||||
mapper.connect("/%s/:(id)/action" % action.collection,
|
||||
action='action',
|
||||
controller=controller,
|
||||
controller=resource,
|
||||
conditions=dict(method=['POST']))
|
||||
action_controllers[action.collection] = controller
|
||||
action_resources[action.collection] = resource
|
||||
|
||||
return action_controllers
|
||||
return action_resources
|
||||
|
||||
def _request_ext_controllers(self, application, ext_mgr, mapper):
|
||||
"""Returns a dict of RequestExtensionController-s by collection."""
|
||||
request_ext_controllers = {}
|
||||
def _request_ext_resources(self, application, ext_mgr, mapper):
|
||||
"""Returns a dict of RequestExtensionResource-s by collection."""
|
||||
request_ext_resources = {}
|
||||
for req_ext in ext_mgr.get_request_extensions():
|
||||
if not req_ext.key in request_ext_controllers.keys():
|
||||
controller = RequestExtensionController(application)
|
||||
if not req_ext.key in request_ext_resources.keys():
|
||||
resource = RequestExtensionResource(application)
|
||||
mapper.connect(req_ext.url_route + '.:(format)',
|
||||
action='process',
|
||||
controller=controller,
|
||||
controller=resource,
|
||||
conditions=req_ext.conditions)
|
||||
|
||||
mapper.connect(req_ext.url_route,
|
||||
action='process',
|
||||
controller=controller,
|
||||
controller=resource,
|
||||
conditions=req_ext.conditions)
|
||||
request_ext_controllers[req_ext.key] = controller
|
||||
request_ext_resources[req_ext.key] = resource
|
||||
|
||||
return request_ext_controllers
|
||||
return request_ext_resources
|
||||
|
||||
def __init__(self, application, ext_mgr=None):
|
||||
|
||||
@@ -246,22 +263,22 @@ class ExtensionMiddleware(wsgi.Middleware):
|
||||
LOG.debug(_('Extended resource: %s'),
|
||||
resource.collection)
|
||||
mapper.resource(resource.collection, resource.collection,
|
||||
controller=resource.controller,
|
||||
controller=wsgi.Resource(resource.controller),
|
||||
collection=resource.collection_actions,
|
||||
member=resource.member_actions,
|
||||
parent_resource=resource.parent)
|
||||
|
||||
# extended actions
|
||||
action_controllers = self._action_ext_controllers(application, ext_mgr,
|
||||
action_resources = self._action_ext_resources(application, ext_mgr,
|
||||
mapper)
|
||||
for action in ext_mgr.get_actions():
|
||||
LOG.debug(_('Extended action: %s'), action.action_name)
|
||||
controller = action_controllers[action.collection]
|
||||
controller.add_action(action.action_name, action.handler)
|
||||
resource = action_resources[action.collection]
|
||||
resource.add_action(action.action_name, action.handler)
|
||||
|
||||
# extended requests
|
||||
req_controllers = self._request_ext_controllers(application, ext_mgr,
|
||||
mapper)
|
||||
req_controllers = self._request_ext_resources(application, ext_mgr,
|
||||
mapper)
|
||||
for request_ext in ext_mgr.get_request_extensions():
|
||||
LOG.debug(_('Extended request: %s'), request_ext.key)
|
||||
controller = req_controllers[request_ext.key]
|
||||
@@ -313,7 +330,7 @@ class ExtensionManager(object):
|
||||
"""Returns a list of ResourceExtension objects."""
|
||||
resources = []
|
||||
resources.append(ResourceExtension('extensions',
|
||||
ExtensionController(self)))
|
||||
ExtensionsResource(self)))
|
||||
for alias, ext in self.extensions.iteritems():
|
||||
try:
|
||||
resources.extend(ext.get_resources())
|
||||
@@ -410,7 +427,7 @@ class ExtensionManager(object):
|
||||
|
||||
|
||||
class RequestExtension(object):
|
||||
"""Extend requests and responses of core nova OpenStack API controllers.
|
||||
"""Extend requests and responses of core nova OpenStack API resources.
|
||||
|
||||
Provide a way to add data to responses and handle custom request data
|
||||
that is sent to core nova OpenStack API controllers.
|
||||
@@ -424,7 +441,7 @@ class RequestExtension(object):
|
||||
|
||||
|
||||
class ActionExtension(object):
|
||||
"""Add custom actions to core nova OpenStack API controllers."""
|
||||
"""Add custom actions to core nova OpenStack API resources."""
|
||||
|
||||
def __init__(self, collection, action_name, handler):
|
||||
self.collection = collection
|
||||
|
||||
@@ -19,8 +19,7 @@
|
||||
import webob.dec
|
||||
import webob.exc
|
||||
|
||||
from nova import wsgi
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import wsgi
|
||||
|
||||
|
||||
class Fault(webob.exc.HTTPException):
|
||||
@@ -55,13 +54,21 @@ class Fault(webob.exc.HTTPException):
|
||||
if code == 413:
|
||||
retry = self.wrapped_exc.headers['Retry-After']
|
||||
fault_data[fault_name]['retryAfter'] = retry
|
||||
|
||||
# 'code' is an attribute on the fault tag itself
|
||||
metadata = {'application/xml': {'attributes': {fault_name: 'code'}}}
|
||||
default_xmlns = common.XML_NS_V10
|
||||
serializer = wsgi.Serializer(metadata, default_xmlns)
|
||||
metadata = {'attributes': {fault_name: 'code'}}
|
||||
|
||||
content_type = req.best_match_content_type()
|
||||
self.wrapped_exc.body = serializer.serialize(fault_data, content_type)
|
||||
|
||||
serializer = {
|
||||
'application/xml': wsgi.XMLDictSerializer(metadata=metadata,
|
||||
xmlns=wsgi.XMLNS_V10),
|
||||
'application/json': wsgi.JSONDictSerializer(),
|
||||
}[content_type]
|
||||
|
||||
self.wrapped_exc.body = serializer.serialize(fault_data)
|
||||
self.wrapped_exc.content_type = content_type
|
||||
|
||||
return self.wrapped_exc
|
||||
|
||||
|
||||
@@ -70,14 +77,6 @@ class OverLimitFault(webob.exc.HTTPException):
|
||||
Rate-limited request response.
|
||||
"""
|
||||
|
||||
_serialization_metadata = {
|
||||
"application/xml": {
|
||||
"attributes": {
|
||||
"overLimitFault": "code",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
def __init__(self, message, details, retry_time):
|
||||
"""
|
||||
Initialize new `OverLimitFault` with relevant information.
|
||||
@@ -97,8 +96,16 @@ class OverLimitFault(webob.exc.HTTPException):
|
||||
Return the wrapped exception with a serialized body conforming to our
|
||||
error format.
|
||||
"""
|
||||
serializer = wsgi.Serializer(self._serialization_metadata)
|
||||
content_type = request.best_match_content_type()
|
||||
content = serializer.serialize(self.content, content_type)
|
||||
metadata = {"attributes": {"overLimitFault": "code"}}
|
||||
|
||||
serializer = {
|
||||
'application/xml': wsgi.XMLDictSerializer(metadata=metadata,
|
||||
xmlns=wsgi.XMLNS_V10),
|
||||
'application/json': wsgi.JSONDictSerializer(),
|
||||
}[content_type]
|
||||
|
||||
content = serializer.serialize(self.content)
|
||||
self.wrapped_exc.body = content
|
||||
|
||||
return self.wrapped_exc
|
||||
|
||||
@@ -19,22 +19,13 @@ import webob
|
||||
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import views
|
||||
from nova.api.openstack import wsgi
|
||||
|
||||
|
||||
class Controller(common.OpenstackController):
|
||||
class Controller(object):
|
||||
"""Flavor controller for the OpenStack API."""
|
||||
|
||||
_serialization_metadata = {
|
||||
'application/xml': {
|
||||
"attributes": {
|
||||
"flavor": ["id", "name", "ram", "disk"],
|
||||
"link": ["rel", "type", "href"],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def index(self, req):
|
||||
"""Return all flavors in brief."""
|
||||
items = self._get_flavors(req, is_detail=False)
|
||||
@@ -71,14 +62,31 @@ class Controller(common.OpenstackController):
|
||||
|
||||
|
||||
class ControllerV10(Controller):
|
||||
|
||||
def _get_view_builder(self, req):
|
||||
return views.flavors.ViewBuilder()
|
||||
|
||||
|
||||
class ControllerV11(Controller):
|
||||
|
||||
def _get_view_builder(self, req):
|
||||
base_url = req.application_url
|
||||
return views.flavors.ViewBuilderV11(base_url)
|
||||
|
||||
def get_default_xmlns(self, req):
|
||||
return common.XML_NS_V11
|
||||
|
||||
def create_resource(version='1.0'):
|
||||
controller = {
|
||||
'1.0': ControllerV10,
|
||||
'1.1': ControllerV11,
|
||||
}[version]()
|
||||
|
||||
xmlns = {
|
||||
'1.0': wsgi.XMLNS_V10,
|
||||
'1.1': wsgi.XMLNS_V11,
|
||||
}[version]
|
||||
|
||||
serializers = {
|
||||
'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns),
|
||||
}
|
||||
|
||||
return wsgi.Resource(controller, serializers=serializers)
|
||||
|
||||
@@ -20,20 +20,18 @@ from webob import exc
|
||||
from nova import flags
|
||||
from nova import quota
|
||||
from nova import utils
|
||||
from nova import wsgi
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import faults
|
||||
from nova.api.openstack import wsgi
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
class Controller(common.OpenstackController):
|
||||
class Controller(object):
|
||||
"""The image metadata API controller for the Openstack API"""
|
||||
|
||||
def __init__(self):
|
||||
self.image_service = utils.import_object(FLAGS.image_service)
|
||||
super(Controller, self).__init__()
|
||||
|
||||
def _get_metadata(self, context, image_id, image=None):
|
||||
if not image:
|
||||
@@ -64,9 +62,8 @@ class Controller(common.OpenstackController):
|
||||
else:
|
||||
return faults.Fault(exc.HTTPNotFound())
|
||||
|
||||
def create(self, req, image_id):
|
||||
def create(self, req, image_id, body):
|
||||
context = req.environ['nova.context']
|
||||
body = self._deserialize(req.body, req.get_content_type())
|
||||
img = self.image_service.show(context, image_id)
|
||||
metadata = self._get_metadata(context, image_id, img)
|
||||
if 'metadata' in body:
|
||||
@@ -77,9 +74,8 @@ class Controller(common.OpenstackController):
|
||||
self.image_service.update(context, image_id, img, None)
|
||||
return dict(metadata=metadata)
|
||||
|
||||
def update(self, req, image_id, id):
|
||||
def update(self, req, image_id, id, body):
|
||||
context = req.environ['nova.context']
|
||||
body = self._deserialize(req.body, req.get_content_type())
|
||||
if not id in body:
|
||||
expl = _('Request body and URI mismatch')
|
||||
raise exc.HTTPBadRequest(explanation=expl)
|
||||
@@ -104,3 +100,11 @@ class Controller(common.OpenstackController):
|
||||
metadata.pop(id)
|
||||
img['properties'] = metadata
|
||||
self.image_service.update(context, image_id, img, None)
|
||||
|
||||
|
||||
def create_resource():
|
||||
serializers = {
|
||||
'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V11),
|
||||
}
|
||||
|
||||
return wsgi.Resource(Controller(), serializers=serializers)
|
||||
|
||||
@@ -23,6 +23,7 @@ from nova import utils
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import faults
|
||||
from nova.api.openstack.views import images as images_view
|
||||
from nova.api.openstack import wsgi
|
||||
|
||||
|
||||
LOG = log.getLogger('nova.api.openstack.images')
|
||||
@@ -31,18 +32,8 @@ FLAGS = flags.FLAGS
|
||||
SUPPORTED_FILTERS = ['name', 'status']
|
||||
|
||||
|
||||
class Controller(common.OpenstackController):
|
||||
"""Base `wsgi.Controller` for retrieving/displaying images."""
|
||||
|
||||
_serialization_metadata = {
|
||||
'application/xml': {
|
||||
"attributes": {
|
||||
"image": ["id", "name", "updated", "created", "status",
|
||||
"serverId", "progress"],
|
||||
"link": ["rel", "type", "href"],
|
||||
},
|
||||
},
|
||||
}
|
||||
class Controller(object):
|
||||
"""Base controller for retrieving/displaying images."""
|
||||
|
||||
def __init__(self, image_service=None, compute_service=None):
|
||||
"""Initialize new `ImageController`.
|
||||
@@ -126,21 +117,20 @@ class Controller(common.OpenstackController):
|
||||
self._image_service.delete(context, image_id)
|
||||
return webob.exc.HTTPNoContent()
|
||||
|
||||
def create(self, req):
|
||||
def create(self, req, body):
|
||||
"""Snapshot a server instance and save the image.
|
||||
|
||||
:param req: `wsgi.Request` object
|
||||
"""
|
||||
context = req.environ['nova.context']
|
||||
content_type = req.get_content_type()
|
||||
image = self._deserialize(req.body, content_type)
|
||||
|
||||
if not image:
|
||||
if not body:
|
||||
raise webob.exc.HTTPBadRequest()
|
||||
|
||||
try:
|
||||
server_id = image["image"]["serverId"]
|
||||
image_name = image["image"]["name"]
|
||||
server_id = body["image"]["serverId"]
|
||||
image_name = body["image"]["name"]
|
||||
except KeyError:
|
||||
raise webob.exc.HTTPBadRequest()
|
||||
|
||||
@@ -169,5 +159,29 @@ class ControllerV11(Controller):
|
||||
base_url = request.application_url
|
||||
return images_view.ViewBuilderV11(base_url)
|
||||
|
||||
def get_default_xmlns(self, req):
|
||||
return common.XML_NS_V11
|
||||
|
||||
def create_resource(version='1.0'):
|
||||
controller = {
|
||||
'1.0': ControllerV10,
|
||||
'1.1': ControllerV11,
|
||||
}[version]()
|
||||
|
||||
xmlns = {
|
||||
'1.0': wsgi.XMLNS_V10,
|
||||
'1.1': wsgi.XMLNS_V11,
|
||||
}[version]
|
||||
|
||||
metadata = {
|
||||
"attributes": {
|
||||
"image": ["id", "name", "updated", "created", "status",
|
||||
"serverId", "progress"],
|
||||
"link": ["rel", "type", "href"],
|
||||
},
|
||||
}
|
||||
|
||||
serializers = {
|
||||
'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns,
|
||||
metadata=metadata),
|
||||
}
|
||||
|
||||
return wsgi.Resource(controller, serializers=serializers)
|
||||
|
||||
@@ -20,23 +20,14 @@ import time
|
||||
from webob import exc
|
||||
|
||||
import nova
|
||||
import nova.api.openstack.views.addresses
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import faults
|
||||
import nova.api.openstack.views.addresses
|
||||
from nova.api.openstack import wsgi
|
||||
|
||||
|
||||
class Controller(common.OpenstackController):
|
||||
class Controller(object):
|
||||
"""The servers addresses API controller for the Openstack API."""
|
||||
|
||||
_serialization_metadata = {
|
||||
'application/xml': {
|
||||
'list_collections': {
|
||||
'public': {'item_name': 'ip', 'item_key': 'addr'},
|
||||
'private': {'item_name': 'ip', 'item_key': 'addr'},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
self.compute_api = nova.compute.API()
|
||||
self.builder = nova.api.openstack.views.addresses.ViewBuilderV10()
|
||||
@@ -65,8 +56,24 @@ class Controller(common.OpenstackController):
|
||||
def show(self, req, server_id, id):
|
||||
return faults.Fault(exc.HTTPNotImplemented())
|
||||
|
||||
def create(self, req, server_id):
|
||||
def create(self, req, server_id, body):
|
||||
return faults.Fault(exc.HTTPNotImplemented())
|
||||
|
||||
def delete(self, req, server_id, id):
|
||||
return faults.Fault(exc.HTTPNotImplemented())
|
||||
|
||||
|
||||
def create_resource():
|
||||
metadata = {
|
||||
'list_collections': {
|
||||
'public': {'item_name': 'ip', 'item_key': 'addr'},
|
||||
'private': {'item_name': 'ip', 'item_key': 'addr'},
|
||||
},
|
||||
}
|
||||
|
||||
serializers = {
|
||||
'application/xml': wsgi.XMLDictSerializer(metadata=metadata,
|
||||
xmlns=wsgi.XMLNS_V10),
|
||||
}
|
||||
|
||||
return wsgi.Resource(Controller(), serializers=serializers)
|
||||
|
||||
@@ -31,10 +31,12 @@ from collections import defaultdict
|
||||
from webob.dec import wsgify
|
||||
|
||||
from nova import quota
|
||||
from nova import wsgi as base_wsgi
|
||||
from nova import wsgi
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import faults
|
||||
from nova.api.openstack.views import limits as limits_views
|
||||
from nova.api.openstack import wsgi
|
||||
|
||||
|
||||
# Convenience constants for the limits dictionary passed to Limiter().
|
||||
@@ -44,23 +46,11 @@ PER_HOUR = 60 * 60
|
||||
PER_DAY = 60 * 60 * 24
|
||||
|
||||
|
||||
class LimitsController(common.OpenstackController):
|
||||
class LimitsController(object):
|
||||
"""
|
||||
Controller for accessing limits in the OpenStack API.
|
||||
"""
|
||||
|
||||
_serialization_metadata = {
|
||||
"application/xml": {
|
||||
"attributes": {
|
||||
"limit": ["verb", "URI", "uri", "regex", "value", "unit",
|
||||
"resetTime", "next-available", "remaining", "name"],
|
||||
},
|
||||
"plurals": {
|
||||
"rate": "limit",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
def index(self, req):
|
||||
"""
|
||||
Return all global and rate limit information.
|
||||
@@ -86,6 +76,35 @@ class LimitsControllerV11(LimitsController):
|
||||
return limits_views.ViewBuilderV11()
|
||||
|
||||
|
||||
def create_resource(version='1.0'):
|
||||
controller = {
|
||||
'1.0': LimitsControllerV10,
|
||||
'1.1': LimitsControllerV11,
|
||||
}[version]()
|
||||
|
||||
xmlns = {
|
||||
'1.0': wsgi.XMLNS_V10,
|
||||
'1.1': wsgi.XMLNS_V11,
|
||||
}[version]
|
||||
|
||||
metadata = {
|
||||
"attributes": {
|
||||
"limit": ["verb", "URI", "uri", "regex", "value", "unit",
|
||||
"resetTime", "next-available", "remaining", "name"],
|
||||
},
|
||||
"plurals": {
|
||||
"rate": "limit",
|
||||
},
|
||||
}
|
||||
|
||||
serializers = {
|
||||
'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns,
|
||||
metadata=metadata)
|
||||
}
|
||||
|
||||
return wsgi.Resource(controller, serializers=serializers)
|
||||
|
||||
|
||||
class Limit(object):
|
||||
"""
|
||||
Stores information about a limit for HTTP requets.
|
||||
@@ -197,7 +216,7 @@ DEFAULT_LIMITS = [
|
||||
]
|
||||
|
||||
|
||||
class RateLimitingMiddleware(wsgi.Middleware):
|
||||
class RateLimitingMiddleware(base_wsgi.Middleware):
|
||||
"""
|
||||
Rate-limits requests passing through this middleware. All limit information
|
||||
is stored in memory for this implementation.
|
||||
@@ -211,7 +230,7 @@ class RateLimitingMiddleware(wsgi.Middleware):
|
||||
@param application: WSGI application to wrap
|
||||
@param limits: List of dictionaries describing limits
|
||||
"""
|
||||
wsgi.Middleware.__init__(self, application)
|
||||
base_wsgi.Middleware.__init__(self, application)
|
||||
self._limiter = Limiter(limits or DEFAULT_LIMITS)
|
||||
|
||||
@wsgify(RequestClass=wsgi.Request)
|
||||
|
||||
@@ -19,12 +19,11 @@ from webob import exc
|
||||
|
||||
from nova import compute
|
||||
from nova import quota
|
||||
from nova import wsgi
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import faults
|
||||
from nova.api.openstack import wsgi
|
||||
|
||||
|
||||
class Controller(common.OpenstackController):
|
||||
class Controller(object):
|
||||
""" The server metadata API controller for the Openstack API """
|
||||
|
||||
def __init__(self):
|
||||
@@ -43,10 +42,9 @@ class Controller(common.OpenstackController):
|
||||
context = req.environ['nova.context']
|
||||
return self._get_metadata(context, server_id)
|
||||
|
||||
def create(self, req, server_id):
|
||||
def create(self, req, server_id, body):
|
||||
context = req.environ['nova.context']
|
||||
data = self._deserialize(req.body, req.get_content_type())
|
||||
metadata = data.get('metadata')
|
||||
metadata = body.get('metadata')
|
||||
try:
|
||||
self.compute_api.update_or_create_instance_metadata(context,
|
||||
server_id,
|
||||
@@ -55,9 +53,8 @@ class Controller(common.OpenstackController):
|
||||
self._handle_quota_error(error)
|
||||
return req.body
|
||||
|
||||
def update(self, req, server_id, id):
|
||||
def update(self, req, server_id, id, body):
|
||||
context = req.environ['nova.context']
|
||||
body = self._deserialize(req.body, req.get_content_type())
|
||||
if not id in body:
|
||||
expl = _('Request body and URI mismatch')
|
||||
raise exc.HTTPBadRequest(explanation=expl)
|
||||
@@ -92,3 +89,11 @@ class Controller(common.OpenstackController):
|
||||
if error.code == "MetadataLimitExceeded":
|
||||
raise exc.HTTPBadRequest(explanation=error.message)
|
||||
raise error
|
||||
|
||||
|
||||
def create_resource():
|
||||
serializers = {
|
||||
'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V11),
|
||||
}
|
||||
|
||||
return wsgi.Resource(Controller(), serializers=serializers)
|
||||
|
||||
@@ -31,6 +31,7 @@ import nova.api.openstack.views.addresses
|
||||
import nova.api.openstack.views.flavors
|
||||
import nova.api.openstack.views.images
|
||||
import nova.api.openstack.views.servers
|
||||
from nova.api.openstack import wsgi
|
||||
from nova.auth import manager as auth_manager
|
||||
from nova.compute import instance_types
|
||||
import nova.api.openstack
|
||||
@@ -41,31 +42,12 @@ LOG = logging.getLogger('nova.api.openstack.servers')
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
class Controller(common.OpenstackController):
|
||||
class Controller(object):
|
||||
""" The Server API controller for the OpenStack API """
|
||||
|
||||
_serialization_metadata = {
|
||||
"application/xml": {
|
||||
"attributes": {
|
||||
"server": ["id", "imageId", "name", "flavorId", "hostId",
|
||||
"status", "progress", "adminPass", "flavorRef",
|
||||
"imageRef"],
|
||||
"link": ["rel", "type", "href"],
|
||||
},
|
||||
"dict_collections": {
|
||||
"metadata": {"item_name": "meta", "item_key": "key"},
|
||||
},
|
||||
"list_collections": {
|
||||
"public": {"item_name": "ip", "item_key": "addr"},
|
||||
"private": {"item_name": "ip", "item_key": "addr"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
self.compute_api = compute.API()
|
||||
self._image_service = utils.import_object(FLAGS.image_service)
|
||||
super(Controller, self).__init__()
|
||||
|
||||
def index(self, req):
|
||||
""" Returns a list of server names and ids for a given user """
|
||||
@@ -122,15 +104,14 @@ class Controller(common.OpenstackController):
|
||||
return faults.Fault(exc.HTTPNotFound())
|
||||
return exc.HTTPAccepted()
|
||||
|
||||
def create(self, req):
|
||||
def create(self, req, body):
|
||||
""" Creates a new server for a given user """
|
||||
env = self._deserialize_create(req)
|
||||
if not env:
|
||||
if not body:
|
||||
return faults.Fault(exc.HTTPUnprocessableEntity())
|
||||
|
||||
context = req.environ['nova.context']
|
||||
|
||||
password = self._get_server_admin_password(env['server'])
|
||||
password = self._get_server_admin_password(body['server'])
|
||||
|
||||
key_name = None
|
||||
key_data = None
|
||||
@@ -140,7 +121,7 @@ class Controller(common.OpenstackController):
|
||||
key_name = key_pair['name']
|
||||
key_data = key_pair['public_key']
|
||||
|
||||
requested_image_id = self._image_id_from_req_data(env)
|
||||
requested_image_id = self._image_id_from_req_data(body)
|
||||
try:
|
||||
image_id = common.get_image_id_from_image_hash(self._image_service,
|
||||
context, requested_image_id)
|
||||
@@ -151,18 +132,18 @@ class Controller(common.OpenstackController):
|
||||
kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(
|
||||
req, image_id)
|
||||
|
||||
personality = env['server'].get('personality')
|
||||
personality = body['server'].get('personality')
|
||||
injected_files = []
|
||||
if personality:
|
||||
injected_files = self._get_injected_files(personality)
|
||||
|
||||
flavor_id = self._flavor_id_from_req_data(env)
|
||||
flavor_id = self._flavor_id_from_req_data(body)
|
||||
|
||||
if not 'name' in env['server']:
|
||||
if not 'name' in body['server']:
|
||||
msg = _("Server name is not defined")
|
||||
return exc.HTTPBadRequest(msg)
|
||||
|
||||
name = env['server']['name']
|
||||
name = body['server']['name']
|
||||
self._validate_server_name(name)
|
||||
name = name.strip()
|
||||
|
||||
@@ -179,7 +160,7 @@ class Controller(common.OpenstackController):
|
||||
display_description=name,
|
||||
key_name=key_name,
|
||||
key_data=key_data,
|
||||
metadata=env['server'].get('metadata', {}),
|
||||
metadata=body['server'].get('metadata', {}),
|
||||
injected_files=injected_files,
|
||||
admin_password=password)
|
||||
except quota.QuotaError as error:
|
||||
@@ -193,18 +174,6 @@ class Controller(common.OpenstackController):
|
||||
server['server']['adminPass'] = password
|
||||
return server
|
||||
|
||||
def _deserialize_create(self, request):
|
||||
"""
|
||||
Deserialize a create request
|
||||
|
||||
Overrides normal behavior in the case of xml content
|
||||
"""
|
||||
if request.content_type == "application/xml":
|
||||
deserializer = ServerCreateRequestXMLDeserializer()
|
||||
return deserializer.deserialize(request.body)
|
||||
else:
|
||||
return self._deserialize(request.body, request.get_content_type())
|
||||
|
||||
def _get_injected_files(self, personality):
|
||||
"""
|
||||
Create a list of injected files from the personality attribute
|
||||
@@ -254,24 +223,23 @@ class Controller(common.OpenstackController):
|
||||
return utils.generate_password(16)
|
||||
|
||||
@scheduler_api.redirect_handler
|
||||
def update(self, req, id):
|
||||
def update(self, req, id, body):
|
||||
""" Updates the server name or password """
|
||||
if len(req.body) == 0:
|
||||
raise exc.HTTPUnprocessableEntity()
|
||||
|
||||
inst_dict = self._deserialize(req.body, req.get_content_type())
|
||||
if not inst_dict:
|
||||
if not body:
|
||||
return faults.Fault(exc.HTTPUnprocessableEntity())
|
||||
|
||||
ctxt = req.environ['nova.context']
|
||||
update_dict = {}
|
||||
|
||||
if 'name' in inst_dict['server']:
|
||||
name = inst_dict['server']['name']
|
||||
if 'name' in body['server']:
|
||||
name = body['server']['name']
|
||||
self._validate_server_name(name)
|
||||
update_dict['display_name'] = name.strip()
|
||||
|
||||
self._parse_update(ctxt, id, inst_dict, update_dict)
|
||||
self._parse_update(ctxt, id, body, update_dict)
|
||||
|
||||
try:
|
||||
self.compute_api.update(ctxt, id, **update_dict)
|
||||
@@ -293,7 +261,7 @@ class Controller(common.OpenstackController):
|
||||
pass
|
||||
|
||||
@scheduler_api.redirect_handler
|
||||
def action(self, req, id):
|
||||
def action(self, req, id, body):
|
||||
"""Multi-purpose method used to reboot, rebuild, or
|
||||
resize a server"""
|
||||
|
||||
@@ -306,10 +274,9 @@ class Controller(common.OpenstackController):
|
||||
'rebuild': self._action_rebuild,
|
||||
}
|
||||
|
||||
input_dict = self._deserialize(req.body, req.get_content_type())
|
||||
for key in actions.keys():
|
||||
if key in input_dict:
|
||||
return actions[key](input_dict, req, id)
|
||||
if key in body:
|
||||
return actions[key](body, req, id)
|
||||
return faults.Fault(exc.HTTPNotImplemented())
|
||||
|
||||
def _action_change_password(self, input_dict, req, id):
|
||||
@@ -332,19 +299,7 @@ class Controller(common.OpenstackController):
|
||||
return exc.HTTPAccepted()
|
||||
|
||||
def _action_resize(self, input_dict, req, id):
|
||||
""" Resizes a given instance to the flavor size requested """
|
||||
try:
|
||||
if 'resize' in input_dict and 'flavorId' in input_dict['resize']:
|
||||
flavor_id = input_dict['resize']['flavorId']
|
||||
self.compute_api.resize(req.environ['nova.context'], id,
|
||||
flavor_id)
|
||||
else:
|
||||
LOG.exception(_("Missing arguments for resize"))
|
||||
return faults.Fault(exc.HTTPUnprocessableEntity())
|
||||
except Exception, e:
|
||||
LOG.exception(_("Error in resize %s"), e)
|
||||
return faults.Fault(exc.HTTPBadRequest())
|
||||
return exc.HTTPAccepted()
|
||||
return exc.HTTPNotImplemented()
|
||||
|
||||
def _action_reboot(self, input_dict, req, id):
|
||||
if 'reboot' in input_dict and 'type' in input_dict['reboot']:
|
||||
@@ -409,7 +364,7 @@ class Controller(common.OpenstackController):
|
||||
return exc.HTTPAccepted()
|
||||
|
||||
@scheduler_api.redirect_handler
|
||||
def reset_network(self, req, id):
|
||||
def reset_network(self, req, id, body):
|
||||
"""
|
||||
Reset networking on an instance (admin only).
|
||||
|
||||
@@ -424,7 +379,7 @@ class Controller(common.OpenstackController):
|
||||
return exc.HTTPAccepted()
|
||||
|
||||
@scheduler_api.redirect_handler
|
||||
def inject_network_info(self, req, id):
|
||||
def inject_network_info(self, req, id, body):
|
||||
"""
|
||||
Inject network info for an instance (admin only).
|
||||
|
||||
@@ -439,7 +394,7 @@ class Controller(common.OpenstackController):
|
||||
return exc.HTTPAccepted()
|
||||
|
||||
@scheduler_api.redirect_handler
|
||||
def pause(self, req, id):
|
||||
def pause(self, req, id, body):
|
||||
""" Permit Admins to Pause the server. """
|
||||
ctxt = req.environ['nova.context']
|
||||
try:
|
||||
@@ -451,7 +406,7 @@ class Controller(common.OpenstackController):
|
||||
return exc.HTTPAccepted()
|
||||
|
||||
@scheduler_api.redirect_handler
|
||||
def unpause(self, req, id):
|
||||
def unpause(self, req, id, body):
|
||||
""" Permit Admins to Unpause the server. """
|
||||
ctxt = req.environ['nova.context']
|
||||
try:
|
||||
@@ -463,7 +418,7 @@ class Controller(common.OpenstackController):
|
||||
return exc.HTTPAccepted()
|
||||
|
||||
@scheduler_api.redirect_handler
|
||||
def suspend(self, req, id):
|
||||
def suspend(self, req, id, body):
|
||||
"""permit admins to suspend the server"""
|
||||
context = req.environ['nova.context']
|
||||
try:
|
||||
@@ -475,7 +430,7 @@ class Controller(common.OpenstackController):
|
||||
return exc.HTTPAccepted()
|
||||
|
||||
@scheduler_api.redirect_handler
|
||||
def resume(self, req, id):
|
||||
def resume(self, req, id, body):
|
||||
"""permit admins to resume the server from suspend"""
|
||||
context = req.environ['nova.context']
|
||||
try:
|
||||
@@ -610,6 +565,21 @@ class ControllerV10(Controller):
|
||||
self.compute_api.set_admin_password(context, server_id,
|
||||
inst_dict['server']['adminPass'])
|
||||
|
||||
def _action_resize(self, input_dict, req, id):
|
||||
""" Resizes a given instance to the flavor size requested """
|
||||
try:
|
||||
if 'resize' in input_dict and 'flavorId' in input_dict['resize']:
|
||||
flavor_id = input_dict['resize']['flavorId']
|
||||
self.compute_api.resize(req.environ['nova.context'], id,
|
||||
flavor_id)
|
||||
else:
|
||||
LOG.exception(_("Missing 'flavorId' argument for resize"))
|
||||
return faults.Fault(exc.HTTPUnprocessableEntity())
|
||||
except Exception, e:
|
||||
LOG.exception(_("Error in resize %s"), e)
|
||||
return faults.Fault(exc.HTTPBadRequest())
|
||||
return exc.HTTPAccepted()
|
||||
|
||||
def _action_rebuild(self, info, request, instance_id):
|
||||
context = request.environ['nova.context']
|
||||
instance_id = int(instance_id)
|
||||
@@ -695,6 +665,22 @@ class ControllerV11(Controller):
|
||||
LOG.info(msg)
|
||||
raise faults.Fault(exc.HTTPBadRequest(explanation=msg))
|
||||
|
||||
def _action_resize(self, input_dict, req, id):
|
||||
""" Resizes a given instance to the flavor size requested """
|
||||
try:
|
||||
if 'resize' in input_dict and 'flavorRef' in input_dict['resize']:
|
||||
flavor_ref = input_dict['resize']['flavorRef']
|
||||
flavor_id = common.get_id_from_href(flavor_ref)
|
||||
self.compute_api.resize(req.environ['nova.context'], id,
|
||||
flavor_id)
|
||||
else:
|
||||
LOG.exception(_("Missing 'flavorRef' argument for resize"))
|
||||
return faults.Fault(exc.HTTPUnprocessableEntity())
|
||||
except Exception, e:
|
||||
LOG.exception(_("Error in resize %s"), e)
|
||||
return faults.Fault(exc.HTTPBadRequest())
|
||||
return exc.HTTPAccepted()
|
||||
|
||||
def _action_rebuild(self, info, request, instance_id):
|
||||
context = request.environ['nova.context']
|
||||
instance_id = int(instance_id)
|
||||
@@ -737,11 +723,8 @@ class ControllerV11(Controller):
|
||||
raise exc.HTTPBadRequest(msg)
|
||||
return password
|
||||
|
||||
def get_default_xmlns(self, req):
|
||||
return common.XML_NS_V11
|
||||
|
||||
|
||||
class ServerCreateRequestXMLDeserializer(object):
|
||||
class ServerXMLDeserializer(wsgi.XMLDeserializer):
|
||||
"""
|
||||
Deserializer to handle xml-formatted server create requests.
|
||||
|
||||
@@ -749,7 +732,7 @@ class ServerCreateRequestXMLDeserializer(object):
|
||||
and personality attributes
|
||||
"""
|
||||
|
||||
def deserialize(self, string):
|
||||
def create(self, string):
|
||||
"""Deserialize an xml-formatted server create request"""
|
||||
dom = minidom.parseString(string)
|
||||
server = self._extract_server(dom)
|
||||
@@ -816,3 +799,43 @@ class ServerCreateRequestXMLDeserializer(object):
|
||||
if child.nodeType == child.TEXT_NODE:
|
||||
return child.nodeValue
|
||||
return ""
|
||||
|
||||
|
||||
def create_resource(version='1.0'):
|
||||
controller = {
|
||||
'1.0': ControllerV10,
|
||||
'1.1': ControllerV11,
|
||||
}[version]()
|
||||
|
||||
metadata = {
|
||||
"attributes": {
|
||||
"server": ["id", "imageId", "name", "flavorId", "hostId",
|
||||
"status", "progress", "adminPass", "flavorRef",
|
||||
"imageRef"],
|
||||
"link": ["rel", "type", "href"],
|
||||
},
|
||||
"dict_collections": {
|
||||
"metadata": {"item_name": "meta", "item_key": "key"},
|
||||
},
|
||||
"list_collections": {
|
||||
"public": {"item_name": "ip", "item_key": "addr"},
|
||||
"private": {"item_name": "ip", "item_key": "addr"},
|
||||
},
|
||||
}
|
||||
|
||||
xmlns = {
|
||||
'1.0': wsgi.XMLNS_V10,
|
||||
'1.1': wsgi.XMLNS_V11,
|
||||
}[version]
|
||||
|
||||
serializers = {
|
||||
'application/xml': wsgi.XMLDictSerializer(metadata=metadata,
|
||||
xmlns=xmlns),
|
||||
}
|
||||
|
||||
deserializers = {
|
||||
'application/xml': ServerXMLDeserializer(),
|
||||
}
|
||||
|
||||
return wsgi.Resource(controller, serializers=serializers,
|
||||
deserializers=deserializers)
|
||||
|
||||
@@ -17,29 +17,13 @@
|
||||
|
||||
from webob import exc
|
||||
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import faults
|
||||
from nova.api.openstack import wsgi
|
||||
|
||||
|
||||
def _translate_keys(inst):
|
||||
""" Coerces a shared IP group instance into proper dictionary format """
|
||||
return dict(sharedIpGroup=inst)
|
||||
|
||||
|
||||
def _translate_detail_keys(inst):
|
||||
""" Coerces a shared IP group instance into proper dictionary format with
|
||||
correctly mapped attributes """
|
||||
return dict(sharedIpGroups=inst)
|
||||
|
||||
|
||||
class Controller(common.OpenstackController):
|
||||
class Controller(object):
|
||||
""" The Shared IP Groups Controller for the Openstack API """
|
||||
|
||||
_serialization_metadata = {
|
||||
'application/xml': {
|
||||
'attributes': {
|
||||
'sharedIpGroup': []}}}
|
||||
|
||||
def index(self, req):
|
||||
""" Returns a list of Shared IP Groups for the user """
|
||||
raise faults.Fault(exc.HTTPNotImplemented())
|
||||
@@ -48,7 +32,7 @@ class Controller(common.OpenstackController):
|
||||
""" Shows in-depth information on a specific Shared IP Group """
|
||||
raise faults.Fault(exc.HTTPNotImplemented())
|
||||
|
||||
def update(self, req, id):
|
||||
def update(self, req, id, body):
|
||||
""" You can't update a Shared IP Group """
|
||||
raise faults.Fault(exc.HTTPNotImplemented())
|
||||
|
||||
@@ -60,6 +44,10 @@ class Controller(common.OpenstackController):
|
||||
""" Returns a complete list of Shared IP Groups """
|
||||
raise faults.Fault(exc.HTTPNotImplemented())
|
||||
|
||||
def create(self, req):
|
||||
def create(self, req, body):
|
||||
""" Creates a new Shared IP group """
|
||||
raise faults.Fault(exc.HTTPNotImplemented())
|
||||
|
||||
|
||||
def create_resource():
|
||||
return wsgi.Resource(Controller())
|
||||
|
||||
@@ -20,8 +20,10 @@ from nova import flags
|
||||
from nova import log as logging
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import faults
|
||||
from nova.api.openstack import wsgi
|
||||
from nova.auth import manager
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
LOG = logging.getLogger('nova.api.openstack')
|
||||
|
||||
@@ -34,12 +36,7 @@ def _translate_keys(user):
|
||||
admin=user.admin)
|
||||
|
||||
|
||||
class Controller(common.OpenstackController):
|
||||
|
||||
_serialization_metadata = {
|
||||
'application/xml': {
|
||||
"attributes": {
|
||||
"user": ["id", "name", "access", "secret", "admin"]}}}
|
||||
class Controller(object):
|
||||
|
||||
def __init__(self):
|
||||
self.manager = manager.AuthManager()
|
||||
@@ -81,23 +78,35 @@ class Controller(common.OpenstackController):
|
||||
self.manager.delete_user(id)
|
||||
return {}
|
||||
|
||||
def create(self, req):
|
||||
def create(self, req, body):
|
||||
self._check_admin(req.environ['nova.context'])
|
||||
env = self._deserialize(req.body, req.get_content_type())
|
||||
is_admin = env['user'].get('admin') in ('T', 'True', True)
|
||||
name = env['user'].get('name')
|
||||
access = env['user'].get('access')
|
||||
secret = env['user'].get('secret')
|
||||
is_admin = body['user'].get('admin') in ('T', 'True', True)
|
||||
name = body['user'].get('name')
|
||||
access = body['user'].get('access')
|
||||
secret = body['user'].get('secret')
|
||||
user = self.manager.create_user(name, access, secret, is_admin)
|
||||
return dict(user=_translate_keys(user))
|
||||
|
||||
def update(self, req, id):
|
||||
def update(self, req, id, body):
|
||||
self._check_admin(req.environ['nova.context'])
|
||||
env = self._deserialize(req.body, req.get_content_type())
|
||||
is_admin = env['user'].get('admin')
|
||||
is_admin = body['user'].get('admin')
|
||||
if is_admin is not None:
|
||||
is_admin = is_admin in ('T', 'True', True)
|
||||
access = env['user'].get('access')
|
||||
secret = env['user'].get('secret')
|
||||
access = body['user'].get('access')
|
||||
secret = body['user'].get('secret')
|
||||
self.manager.modify_user(id, access, secret, is_admin)
|
||||
return dict(user=_translate_keys(self.manager.get_user(id)))
|
||||
|
||||
|
||||
def create_resource():
|
||||
metadata = {
|
||||
"attributes": {
|
||||
"user": ["id", "name", "access", "secret", "admin"],
|
||||
},
|
||||
}
|
||||
|
||||
serializers = {
|
||||
'application/xml': wsgi.XMLDictSerializer(metadata=metadata),
|
||||
}
|
||||
|
||||
return wsgi.Resource(Controller(), serializers=serializers)
|
||||
|
||||
@@ -18,13 +18,26 @@
|
||||
import webob
|
||||
import webob.dec
|
||||
|
||||
from nova import wsgi
|
||||
import nova.api.openstack.views.versions
|
||||
from nova.api.openstack import wsgi
|
||||
|
||||
|
||||
class Versions(wsgi.Application):
|
||||
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||
def __call__(self, req):
|
||||
class Versions(wsgi.Resource):
|
||||
def __init__(self):
|
||||
metadata = {
|
||||
"attributes": {
|
||||
"version": ["status", "id"],
|
||||
"link": ["rel", "href"],
|
||||
}
|
||||
}
|
||||
|
||||
serializers = {
|
||||
'application/xml': wsgi.XMLDictSerializer(metadata=metadata),
|
||||
}
|
||||
|
||||
super(Versions, self).__init__(None, serializers=serializers)
|
||||
|
||||
def dispatch(self, request, *args):
|
||||
"""Respond to a request for all OpenStack API versions."""
|
||||
version_objs = [
|
||||
{
|
||||
@@ -37,24 +50,6 @@ class Versions(wsgi.Application):
|
||||
},
|
||||
]
|
||||
|
||||
builder = nova.api.openstack.views.versions.get_view_builder(req)
|
||||
builder = nova.api.openstack.views.versions.get_view_builder(request)
|
||||
versions = [builder.build(version) for version in version_objs]
|
||||
response = dict(versions=versions)
|
||||
|
||||
metadata = {
|
||||
"application/xml": {
|
||||
"attributes": {
|
||||
"version": ["status", "id"],
|
||||
"link": ["rel", "href"],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
content_type = req.best_match_content_type()
|
||||
body = wsgi.Serializer(metadata).serialize(response, content_type)
|
||||
|
||||
response = webob.Response()
|
||||
response.content_type = content_type
|
||||
response.body = body
|
||||
|
||||
return response
|
||||
return dict(versions=versions)
|
||||
|
||||
380
nova/api/openstack/wsgi.py
Normal file
380
nova/api/openstack/wsgi.py
Normal file
@@ -0,0 +1,380 @@
|
||||
|
||||
import json
|
||||
import webob
|
||||
from xml.dom import minidom
|
||||
|
||||
from nova import exception
|
||||
from nova import log as logging
|
||||
from nova import utils
|
||||
from nova import wsgi
|
||||
|
||||
|
||||
XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0'
|
||||
XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
|
||||
|
||||
LOG = logging.getLogger('nova.api.openstack.wsgi')
|
||||
|
||||
|
||||
class Request(webob.Request):
|
||||
"""Add some Openstack API-specific logic to the base webob.Request."""
|
||||
|
||||
def best_match_content_type(self):
|
||||
"""Determine the requested response content-type.
|
||||
|
||||
Based on the query extension then the Accept header.
|
||||
|
||||
"""
|
||||
supported = ('application/json', 'application/xml')
|
||||
|
||||
parts = self.path.rsplit('.', 1)
|
||||
if len(parts) > 1:
|
||||
ctype = 'application/{0}'.format(parts[1])
|
||||
if ctype in supported:
|
||||
return ctype
|
||||
|
||||
bm = self.accept.best_match(supported)
|
||||
|
||||
# default to application/json if we don't find a preference
|
||||
return bm or 'application/json'
|
||||
|
||||
def get_content_type(self):
|
||||
"""Determine content type of the request body.
|
||||
|
||||
Does not do any body introspection, only checks header
|
||||
|
||||
"""
|
||||
if not "Content-Type" in self.headers:
|
||||
raise exception.InvalidContentType(content_type=None)
|
||||
|
||||
allowed_types = ("application/xml", "application/json")
|
||||
content_type = self.content_type
|
||||
|
||||
if content_type not in allowed_types:
|
||||
raise exception.InvalidContentType(content_type=content_type)
|
||||
else:
|
||||
return content_type
|
||||
|
||||
|
||||
class TextDeserializer(object):
|
||||
"""Custom request body deserialization based on controller action name."""
|
||||
|
||||
def deserialize(self, datastring, action='default'):
|
||||
"""Find local deserialization method and parse request body."""
|
||||
action_method = getattr(self, action, self.default)
|
||||
return action_method(datastring)
|
||||
|
||||
def default(self, datastring):
|
||||
"""Default deserialization code should live here"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class JSONDeserializer(TextDeserializer):
|
||||
|
||||
def default(self, datastring):
|
||||
return utils.loads(datastring)
|
||||
|
||||
|
||||
class XMLDeserializer(TextDeserializer):
|
||||
|
||||
def __init__(self, metadata=None):
|
||||
"""
|
||||
:param metadata: information needed to deserialize xml into
|
||||
a dictionary.
|
||||
"""
|
||||
super(XMLDeserializer, self).__init__()
|
||||
self.metadata = metadata or {}
|
||||
|
||||
def default(self, datastring):
|
||||
plurals = set(self.metadata.get('plurals', {}))
|
||||
node = minidom.parseString(datastring).childNodes[0]
|
||||
return {node.nodeName: self._from_xml_node(node, plurals)}
|
||||
|
||||
def _from_xml_node(self, node, listnames):
|
||||
"""Convert a minidom node to a simple Python type.
|
||||
|
||||
:param listnames: list of XML node names whose subnodes should
|
||||
be considered list items.
|
||||
|
||||
"""
|
||||
if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3:
|
||||
return node.childNodes[0].nodeValue
|
||||
elif node.nodeName in listnames:
|
||||
return [self._from_xml_node(n, listnames) for n in node.childNodes]
|
||||
else:
|
||||
result = dict()
|
||||
for attr in node.attributes.keys():
|
||||
result[attr] = node.attributes[attr].nodeValue
|
||||
for child in node.childNodes:
|
||||
if child.nodeType != node.TEXT_NODE:
|
||||
result[child.nodeName] = self._from_xml_node(child,
|
||||
listnames)
|
||||
return result
|
||||
|
||||
|
||||
class RequestDeserializer(object):
|
||||
"""Break up a Request object into more useful pieces."""
|
||||
|
||||
def __init__(self, deserializers=None):
|
||||
"""
|
||||
:param deserializers: dictionary of content-type-specific deserializers
|
||||
|
||||
"""
|
||||
self.deserializers = {
|
||||
'application/xml': XMLDeserializer(),
|
||||
'application/json': JSONDeserializer(),
|
||||
}
|
||||
|
||||
self.deserializers.update(deserializers or {})
|
||||
|
||||
def deserialize(self, request):
|
||||
"""Extract necessary pieces of the request.
|
||||
|
||||
:param request: Request object
|
||||
:returns tuple of expected controller action name, dictionary of
|
||||
keyword arguments to pass to the controller, the expected
|
||||
content type of the response
|
||||
|
||||
"""
|
||||
action_args = self.get_action_args(request.environ)
|
||||
action = action_args.pop('action', None)
|
||||
|
||||
if request.method.lower() in ('post', 'put'):
|
||||
if len(request.body) == 0:
|
||||
action_args['body'] = None
|
||||
else:
|
||||
content_type = request.get_content_type()
|
||||
deserializer = self.get_deserializer(content_type)
|
||||
|
||||
try:
|
||||
body = deserializer.deserialize(request.body, action)
|
||||
action_args['body'] = body
|
||||
except exception.InvalidContentType:
|
||||
action_args['body'] = None
|
||||
|
||||
accept = self.get_expected_content_type(request)
|
||||
|
||||
return (action, action_args, accept)
|
||||
|
||||
def get_deserializer(self, content_type):
|
||||
try:
|
||||
return self.deserializers[content_type]
|
||||
except (KeyError, TypeError):
|
||||
raise exception.InvalidContentType(content_type=content_type)
|
||||
|
||||
def get_expected_content_type(self, request):
|
||||
return request.best_match_content_type()
|
||||
|
||||
def get_action_args(self, request_environment):
|
||||
"""Parse dictionary created by routes library."""
|
||||
try:
|
||||
args = request_environment['wsgiorg.routing_args'][1].copy()
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
try:
|
||||
del args['controller']
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
try:
|
||||
del args['format']
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
return args
|
||||
|
||||
|
||||
class DictSerializer(object):
|
||||
"""Custom response body serialization based on controller action name."""
|
||||
|
||||
def serialize(self, data, action='default'):
|
||||
"""Find local serialization method and encode response body."""
|
||||
action_method = getattr(self, action, self.default)
|
||||
return action_method(data)
|
||||
|
||||
def default(self, data):
|
||||
"""Default serialization code should live here"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class JSONDictSerializer(DictSerializer):
|
||||
|
||||
def default(self, data):
|
||||
return utils.dumps(data)
|
||||
|
||||
|
||||
class XMLDictSerializer(DictSerializer):
|
||||
|
||||
def __init__(self, metadata=None, xmlns=None):
|
||||
"""
|
||||
:param metadata: information needed to deserialize xml into
|
||||
a dictionary.
|
||||
:param xmlns: XML namespace to include with serialized xml
|
||||
"""
|
||||
super(XMLDictSerializer, self).__init__()
|
||||
self.metadata = metadata or {}
|
||||
self.xmlns = xmlns
|
||||
|
||||
def default(self, data):
|
||||
# We expect data to contain a single key which is the XML root.
|
||||
root_key = data.keys()[0]
|
||||
doc = minidom.Document()
|
||||
node = self._to_xml_node(doc, self.metadata, root_key, data[root_key])
|
||||
|
||||
xmlns = node.getAttribute('xmlns')
|
||||
if not xmlns and self.xmlns:
|
||||
node.setAttribute('xmlns', self.xmlns)
|
||||
|
||||
return node.toprettyxml(indent=' ')
|
||||
|
||||
def _to_xml_node(self, doc, metadata, nodename, data):
|
||||
"""Recursive method to convert data members to XML nodes."""
|
||||
result = doc.createElement(nodename)
|
||||
|
||||
# Set the xml namespace if one is specified
|
||||
# TODO(justinsb): We could also use prefixes on the keys
|
||||
xmlns = metadata.get('xmlns', None)
|
||||
if xmlns:
|
||||
result.setAttribute('xmlns', xmlns)
|
||||
|
||||
#TODO(bcwaldon): accomplish this without a type-check
|
||||
if type(data) is list:
|
||||
collections = metadata.get('list_collections', {})
|
||||
if nodename in collections:
|
||||
metadata = collections[nodename]
|
||||
for item in data:
|
||||
node = doc.createElement(metadata['item_name'])
|
||||
node.setAttribute(metadata['item_key'], str(item))
|
||||
result.appendChild(node)
|
||||
return result
|
||||
singular = metadata.get('plurals', {}).get(nodename, None)
|
||||
if singular is None:
|
||||
if nodename.endswith('s'):
|
||||
singular = nodename[:-1]
|
||||
else:
|
||||
singular = 'item'
|
||||
for item in data:
|
||||
node = self._to_xml_node(doc, metadata, singular, item)
|
||||
result.appendChild(node)
|
||||
#TODO(bcwaldon): accomplish this without a type-check
|
||||
elif type(data) is dict:
|
||||
collections = metadata.get('dict_collections', {})
|
||||
if nodename in collections:
|
||||
metadata = collections[nodename]
|
||||
for k, v in data.items():
|
||||
node = doc.createElement(metadata['item_name'])
|
||||
node.setAttribute(metadata['item_key'], str(k))
|
||||
text = doc.createTextNode(str(v))
|
||||
node.appendChild(text)
|
||||
result.appendChild(node)
|
||||
return result
|
||||
attrs = metadata.get('attributes', {}).get(nodename, {})
|
||||
for k, v in data.items():
|
||||
if k in attrs:
|
||||
result.setAttribute(k, str(v))
|
||||
else:
|
||||
node = self._to_xml_node(doc, metadata, k, v)
|
||||
result.appendChild(node)
|
||||
else:
|
||||
# Type is atom
|
||||
node = doc.createTextNode(str(data))
|
||||
result.appendChild(node)
|
||||
return result
|
||||
|
||||
|
||||
class ResponseSerializer(object):
|
||||
"""Encode the necessary pieces into a response object"""
|
||||
|
||||
def __init__(self, serializers=None):
|
||||
"""
|
||||
:param serializers: dictionary of content-type-specific serializers
|
||||
|
||||
"""
|
||||
self.serializers = {
|
||||
'application/xml': XMLDictSerializer(),
|
||||
'application/json': JSONDictSerializer(),
|
||||
}
|
||||
self.serializers.update(serializers or {})
|
||||
|
||||
def serialize(self, response_data, content_type):
|
||||
"""Serialize a dict into a string and wrap in a wsgi.Request object.
|
||||
|
||||
:param response_data: dict produced by the Controller
|
||||
:param content_type: expected mimetype of serialized response body
|
||||
|
||||
"""
|
||||
response = webob.Response()
|
||||
response.headers['Content-Type'] = content_type
|
||||
|
||||
serializer = self.get_serializer(content_type)
|
||||
response.body = serializer.serialize(response_data)
|
||||
|
||||
return response
|
||||
|
||||
def get_serializer(self, content_type):
|
||||
try:
|
||||
return self.serializers[content_type]
|
||||
except (KeyError, TypeError):
|
||||
raise exception.InvalidContentType(content_type=content_type)
|
||||
|
||||
|
||||
class Resource(wsgi.Application):
|
||||
"""WSGI app that handles (de)serialization and controller dispatch.
|
||||
|
||||
WSGI app that reads routing information supplied by RoutesMiddleware
|
||||
and calls the requested action method upon its controller. All
|
||||
controller action methods must accept a 'req' argument, which is the
|
||||
incoming wsgi.Request. If the operation is a PUT or POST, the controller
|
||||
method must also accept a 'body' argument (the deserialized request body).
|
||||
They may raise a webob.exc exception or return a dict, which will be
|
||||
serialized by requested content type.
|
||||
|
||||
"""
|
||||
def __init__(self, controller, serializers=None, deserializers=None):
|
||||
"""
|
||||
:param controller: object that implement methods created by routes lib
|
||||
:param serializers: dict of content-type specific text serializers
|
||||
:param deserializers: dict of content-type specific text deserializers
|
||||
|
||||
"""
|
||||
self.controller = controller
|
||||
self.serializer = ResponseSerializer(serializers)
|
||||
self.deserializer = RequestDeserializer(deserializers)
|
||||
|
||||
@webob.dec.wsgify(RequestClass=Request)
|
||||
def __call__(self, request):
|
||||
"""WSGI method that controls (de)serialization and method dispatch."""
|
||||
|
||||
LOG.debug("%(method)s %(url)s" % {"method": request.method,
|
||||
"url": request.url})
|
||||
|
||||
try:
|
||||
action, action_args, accept = self.deserializer.deserialize(
|
||||
request)
|
||||
except exception.InvalidContentType:
|
||||
return webob.exc.HTTPBadRequest(_("Unsupported Content-Type"))
|
||||
|
||||
action_result = self.dispatch(request, action, action_args)
|
||||
|
||||
#TODO(bcwaldon): find a more elegant way to pass through non-dict types
|
||||
if type(action_result) is dict:
|
||||
response = self.serializer.serialize(action_result, accept)
|
||||
else:
|
||||
response = action_result
|
||||
|
||||
try:
|
||||
msg_dict = dict(url=request.url, status=response.status_int)
|
||||
msg = _("%(url)s returned with HTTP %(status)d") % msg_dict
|
||||
except AttributeError:
|
||||
msg_dict = dict(url=request.url)
|
||||
msg = _("%(url)s returned a fault")
|
||||
|
||||
LOG.debug(msg)
|
||||
|
||||
return response
|
||||
|
||||
def dispatch(self, request, action, action_args):
|
||||
"""Find action-spefic method on controller and call it."""
|
||||
|
||||
controller_method = getattr(self.controller, action)
|
||||
return controller_method(req=request, **action_args)
|
||||
@@ -22,6 +22,7 @@ from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import wsgi
|
||||
from nova.scheduler import api
|
||||
|
||||
|
||||
@@ -52,12 +53,7 @@ def _scrub_zone(zone):
|
||||
'deleted', 'deleted_at', 'updated_at'))
|
||||
|
||||
|
||||
class Controller(common.OpenstackController):
|
||||
|
||||
_serialization_metadata = {
|
||||
'application/xml': {
|
||||
"attributes": {
|
||||
"zone": ["id", "api_url", "name", "capabilities"]}}}
|
||||
class Controller(object):
|
||||
|
||||
def index(self, req):
|
||||
"""Return all zones in brief"""
|
||||
@@ -96,17 +92,15 @@ class Controller(common.OpenstackController):
|
||||
api.zone_delete(req.environ['nova.context'], zone_id)
|
||||
return {}
|
||||
|
||||
def create(self, req):
|
||||
def create(self, req, body):
|
||||
context = req.environ['nova.context']
|
||||
env = self._deserialize(req.body, req.get_content_type())
|
||||
zone = api.zone_create(context, env["zone"])
|
||||
zone = api.zone_create(context, body["zone"])
|
||||
return dict(zone=_scrub_zone(zone))
|
||||
|
||||
def update(self, req, id):
|
||||
def update(self, req, id, body):
|
||||
context = req.environ['nova.context']
|
||||
env = self._deserialize(req.body, req.get_content_type())
|
||||
zone_id = int(id)
|
||||
zone = api.zone_update(context, zone_id, env["zone"])
|
||||
zone = api.zone_update(context, zone_id, body["zone"])
|
||||
return dict(zone=_scrub_zone(zone))
|
||||
|
||||
def select(self, req):
|
||||
@@ -140,3 +134,18 @@ class Controller(common.OpenstackController):
|
||||
cooked.append(dict(weight=entry['weight'],
|
||||
blob=cipher_text))
|
||||
return cooked
|
||||
|
||||
|
||||
def create_resource():
|
||||
metadata = {
|
||||
"attributes": {
|
||||
"zone": ["id", "api_url", "name", "capabilities"],
|
||||
},
|
||||
}
|
||||
|
||||
serializers = {
|
||||
'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V10,
|
||||
metadata=metadata),
|
||||
}
|
||||
|
||||
return wsgi.Resource(Controller(), serializers=serializers)
|
||||
|
||||
@@ -91,7 +91,6 @@ class API(base.Base):
|
||||
"""Enforce quota limits on injected files.
|
||||
|
||||
Raises a QuotaError if any limit is exceeded.
|
||||
|
||||
"""
|
||||
if injected_files is None:
|
||||
return
|
||||
@@ -140,7 +139,6 @@ class API(base.Base):
|
||||
"""Create the number and type of instances requested.
|
||||
|
||||
Verifies that quota and other arguments are valid.
|
||||
|
||||
"""
|
||||
if not instance_type:
|
||||
instance_type = instance_types.get_default_instance_type()
|
||||
@@ -268,10 +266,17 @@ class API(base.Base):
|
||||
{"method": "run_instance",
|
||||
"args": {"topic": FLAGS.compute_topic,
|
||||
"instance_id": instance_id,
|
||||
"instance_type": instance_type,
|
||||
"request_spec": {
|
||||
'instance_type': instance_type,
|
||||
'filter':
|
||||
'nova.scheduler.host_filter.'
|
||||
'InstanceTypeFilter',
|
||||
},
|
||||
"availability_zone": availability_zone,
|
||||
"injected_files": injected_files,
|
||||
"admin_password": admin_password}})
|
||||
"admin_password": admin_password,
|
||||
},
|
||||
})
|
||||
|
||||
for group_id in security_groups:
|
||||
self.trigger_security_group_members_refresh(elevated, group_id)
|
||||
@@ -294,7 +299,6 @@ class API(base.Base):
|
||||
already exist.
|
||||
|
||||
:param context: the security context
|
||||
|
||||
"""
|
||||
try:
|
||||
db.security_group_get_by_name(context, context.project_id,
|
||||
@@ -327,7 +331,6 @@ class API(base.Base):
|
||||
|
||||
Sends an update request to each compute node for whom this is
|
||||
relevant.
|
||||
|
||||
"""
|
||||
# First, we get the security group rules that reference this group as
|
||||
# the grantee..
|
||||
@@ -384,7 +387,6 @@ class API(base.Base):
|
||||
updated
|
||||
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
rv = self.db.instance_update(context, instance_id, kwargs)
|
||||
return dict(rv.iteritems())
|
||||
@@ -434,7 +436,6 @@ class API(base.Base):
|
||||
Use this method instead of get() if this is the only operation you
|
||||
intend to to. It will route to novaclient.get if the instance is not
|
||||
found.
|
||||
|
||||
"""
|
||||
return self.get(context, instance_id)
|
||||
|
||||
@@ -444,7 +445,6 @@ class API(base.Base):
|
||||
|
||||
If there is no filter and the context is an admin, it will retreive
|
||||
all instances in the system.
|
||||
|
||||
"""
|
||||
if reservation_id is not None:
|
||||
return self.db.instance_get_all_by_reservation(
|
||||
@@ -474,7 +474,6 @@ class API(base.Base):
|
||||
compute worker
|
||||
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
if not params:
|
||||
params = {}
|
||||
@@ -524,7 +523,6 @@ class API(base.Base):
|
||||
"""Snapshot the given instance.
|
||||
|
||||
:returns: A dict containing image metadata
|
||||
|
||||
"""
|
||||
properties = {'instance_id': str(instance_id),
|
||||
'user_id': str(context.user_id)}
|
||||
|
||||
@@ -160,7 +160,7 @@ def convert_backward(migrate_engine, old_quotas, new_quotas):
|
||||
'project_id': quota.project_id,
|
||||
'created_at': quota.created_at,
|
||||
'updated_at': quota.updated_at,
|
||||
quota.resource: quota.hard_limit
|
||||
quota.resource: quota.hard_limit,
|
||||
}
|
||||
else:
|
||||
quotas[quota.project_id]['created_at'] = earliest(
|
||||
|
||||
@@ -14,23 +14,10 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from sqlalchemy import Column, Integer, MetaData, String, Table
|
||||
#from nova import log as logging
|
||||
from sqlalchemy import MetaData, Table
|
||||
|
||||
meta = MetaData()
|
||||
|
||||
c_manageent = Column('server_manageent_url',
|
||||
String(length=255, convert_unicode=False,
|
||||
assert_unicode=None, unicode_error=None,
|
||||
_warn_on_bytestring=False),
|
||||
nullable=True)
|
||||
|
||||
c_management = Column('server_management_url',
|
||||
String(length=255, convert_unicode=False,
|
||||
assert_unicode=None, unicode_error=None,
|
||||
_warn_on_bytestring=False),
|
||||
nullable=True)
|
||||
|
||||
|
||||
def upgrade(migrate_engine):
|
||||
# Upgrade operations go here. Don't create your own engine;
|
||||
@@ -40,11 +27,8 @@ def upgrade(migrate_engine):
|
||||
tokens = Table('auth_tokens', meta, autoload=True,
|
||||
autoload_with=migrate_engine)
|
||||
|
||||
tokens.create_column(c_management)
|
||||
migrate_engine.execute(tokens.update()
|
||||
.values(server_management_url=tokens.c.server_manageent_url))
|
||||
|
||||
tokens.c.server_manageent_url.drop()
|
||||
c_manageent = tokens.c.server_manageent_url
|
||||
c_manageent.alter(name='server_management_url')
|
||||
|
||||
|
||||
def downgrade(migrate_engine):
|
||||
@@ -53,8 +37,5 @@ def downgrade(migrate_engine):
|
||||
tokens = Table('auth_tokens', meta, autoload=True,
|
||||
autoload_with=migrate_engine)
|
||||
|
||||
tokens.create_column(c_manageent)
|
||||
migrate_engine.execute(tokens.update()
|
||||
.values(server_manageent_url=tokens.c.server_management_url))
|
||||
|
||||
tokens.c.server_management_url.drop()
|
||||
c_management = tokens.c.server_management_url
|
||||
c_management.alter(name='server_manageent_url')
|
||||
|
||||
@@ -473,9 +473,8 @@ class ZoneNotFound(NotFound):
|
||||
message = _("Zone %(zone_id)s could not be found.")
|
||||
|
||||
|
||||
class SchedulerHostFilterDriverNotFound(NotFound):
|
||||
message = _("Scheduler Host Filter Driver %(driver_name)s could"
|
||||
" not be found.")
|
||||
class SchedulerHostFilterNotFound(NotFound):
|
||||
message = _("Scheduler Host Filter %(filter_name)s could not be found.")
|
||||
|
||||
|
||||
class InstanceMetadataNotFound(NotFound):
|
||||
|
||||
@@ -296,6 +296,7 @@ DEFINE_bool('fake_network', False,
|
||||
'should we use fake network devices and addresses')
|
||||
DEFINE_string('rabbit_host', 'localhost', 'rabbit host')
|
||||
DEFINE_integer('rabbit_port', 5672, 'rabbit port')
|
||||
DEFINE_bool('rabbit_use_ssl', False, 'connect over SSL')
|
||||
DEFINE_string('rabbit_userid', 'guest', 'rabbit userid')
|
||||
DEFINE_string('rabbit_password', 'guest', 'rabbit password')
|
||||
DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host')
|
||||
|
||||
@@ -31,12 +31,14 @@ import eventlet
|
||||
from nova import crypto
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import utils
|
||||
from nova.auth import manager
|
||||
from nova.image import service
|
||||
from nova.api.ec2 import ec2utils
|
||||
|
||||
|
||||
LOG = logging.getLogger("nova.image.s3")
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_string('image_decryption_dir', '/tmp',
|
||||
'parent dir for tempdir used for image decryption')
|
||||
@@ -161,43 +163,83 @@ class S3ImageService(service.BaseImageService):
|
||||
|
||||
def delayed_create():
|
||||
"""This handles the fetching and decrypting of the part files."""
|
||||
parts = []
|
||||
for fn_element in manifest.find('image').getiterator('filename'):
|
||||
part = self._download_file(bucket, fn_element.text, image_path)
|
||||
parts.append(part)
|
||||
metadata['properties']['image_state'] = 'downloading'
|
||||
self.service.update(context, image_id, metadata)
|
||||
|
||||
# NOTE(vish): this may be suboptimal, should we use cat?
|
||||
encrypted_filename = os.path.join(image_path, 'image.encrypted')
|
||||
with open(encrypted_filename, 'w') as combined:
|
||||
for filename in parts:
|
||||
with open(filename) as part:
|
||||
shutil.copyfileobj(part, combined)
|
||||
try:
|
||||
parts = []
|
||||
elements = manifest.find('image').getiterator('filename')
|
||||
for fn_element in elements:
|
||||
part = self._download_file(bucket,
|
||||
fn_element.text,
|
||||
image_path)
|
||||
parts.append(part)
|
||||
|
||||
# NOTE(vish): this may be suboptimal, should we use cat?
|
||||
enc_filename = os.path.join(image_path, 'image.encrypted')
|
||||
with open(enc_filename, 'w') as combined:
|
||||
for filename in parts:
|
||||
with open(filename) as part:
|
||||
shutil.copyfileobj(part, combined)
|
||||
|
||||
except Exception:
|
||||
LOG.error(_("Failed to download %(image_location)s "
|
||||
"to %(image_path)s"), locals())
|
||||
metadata['properties']['image_state'] = 'failed_download'
|
||||
self.service.update(context, image_id, metadata)
|
||||
raise
|
||||
|
||||
metadata['properties']['image_state'] = 'decrypting'
|
||||
self.service.update(context, image_id, metadata)
|
||||
|
||||
hex_key = manifest.find('image/ec2_encrypted_key').text
|
||||
encrypted_key = binascii.a2b_hex(hex_key)
|
||||
hex_iv = manifest.find('image/ec2_encrypted_iv').text
|
||||
encrypted_iv = binascii.a2b_hex(hex_iv)
|
||||
try:
|
||||
hex_key = manifest.find('image/ec2_encrypted_key').text
|
||||
encrypted_key = binascii.a2b_hex(hex_key)
|
||||
hex_iv = manifest.find('image/ec2_encrypted_iv').text
|
||||
encrypted_iv = binascii.a2b_hex(hex_iv)
|
||||
|
||||
# FIXME(vish): grab key from common service so this can run on
|
||||
# any host.
|
||||
cloud_pk = crypto.key_path(context.project_id)
|
||||
# FIXME(vish): grab key from common service so this can run on
|
||||
# any host.
|
||||
cloud_pk = crypto.key_path(context.project_id)
|
||||
|
||||
decrypted_filename = os.path.join(image_path, 'image.tar.gz')
|
||||
self._decrypt_image(encrypted_filename, encrypted_key,
|
||||
encrypted_iv, cloud_pk, decrypted_filename)
|
||||
dec_filename = os.path.join(image_path, 'image.tar.gz')
|
||||
self._decrypt_image(enc_filename, encrypted_key,
|
||||
encrypted_iv, cloud_pk,
|
||||
dec_filename)
|
||||
except Exception:
|
||||
LOG.error(_("Failed to decrypt %(image_location)s "
|
||||
"to %(image_path)s"), locals())
|
||||
metadata['properties']['image_state'] = 'failed_decrypt'
|
||||
self.service.update(context, image_id, metadata)
|
||||
raise
|
||||
|
||||
metadata['properties']['image_state'] = 'untarring'
|
||||
self.service.update(context, image_id, metadata)
|
||||
|
||||
unz_filename = self._untarzip_image(image_path, decrypted_filename)
|
||||
try:
|
||||
unz_filename = self._untarzip_image(image_path, dec_filename)
|
||||
except Exception:
|
||||
LOG.error(_("Failed to untar %(image_location)s "
|
||||
"to %(image_path)s"), locals())
|
||||
metadata['properties']['image_state'] = 'failed_untar'
|
||||
self.service.update(context, image_id, metadata)
|
||||
raise
|
||||
|
||||
metadata['properties']['image_state'] = 'uploading'
|
||||
with open(unz_filename) as image_file:
|
||||
self.service.update(context, image_id, metadata, image_file)
|
||||
self.service.update(context, image_id, metadata)
|
||||
try:
|
||||
with open(unz_filename) as image_file:
|
||||
self.service.update(context, image_id,
|
||||
metadata, image_file)
|
||||
except Exception:
|
||||
LOG.error(_("Failed to upload %(image_location)s "
|
||||
"to %(image_path)s"), locals())
|
||||
metadata['properties']['image_state'] = 'failed_upload'
|
||||
self.service.update(context, image_id, metadata)
|
||||
raise
|
||||
|
||||
metadata['properties']['image_state'] = 'available'
|
||||
metadata['status'] = 'active'
|
||||
self.service.update(context, image_id, metadata)
|
||||
|
||||
shutil.rmtree(image_path)
|
||||
|
||||
@@ -81,7 +81,7 @@ class S3Application(wsgi.Router):
|
||||
super(S3Application, self).__init__(mapper)
|
||||
|
||||
|
||||
class BaseRequestHandler(wsgi.Controller):
|
||||
class BaseRequestHandler(object):
|
||||
"""Base class emulating Tornado's web framework pattern in WSGI.
|
||||
|
||||
This is a direct port of Tornado's implementation, so some key decisions
|
||||
|
||||
@@ -65,6 +65,7 @@ class Connection(carrot_connection.BrokerConnection):
|
||||
if new or not hasattr(cls, '_instance'):
|
||||
params = dict(hostname=FLAGS.rabbit_host,
|
||||
port=FLAGS.rabbit_port,
|
||||
ssl=FLAGS.rabbit_use_ssl,
|
||||
userid=FLAGS.rabbit_userid,
|
||||
password=FLAGS.rabbit_password,
|
||||
virtual_host=FLAGS.rabbit_virtual_host)
|
||||
|
||||
@@ -14,8 +14,8 @@
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Host Filter is a driver mechanism for requesting instance resources.
|
||||
Three drivers are included: AllHosts, Flavor & JSON. AllHosts just
|
||||
Host Filter is a mechanism for requesting instance resources.
|
||||
Three filters are included: AllHosts, Flavor & JSON. AllHosts just
|
||||
returns the full, unfiltered list of hosts. Flavor is a hard coded
|
||||
matching mechanism based on flavor criteria and JSON is an ad-hoc
|
||||
filter grammar.
|
||||
@@ -42,17 +42,18 @@ from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import utils
|
||||
from nova.scheduler import zone_aware_scheduler
|
||||
|
||||
LOG = logging.getLogger('nova.scheduler.host_filter')
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_string('default_host_filter_driver',
|
||||
flags.DEFINE_string('default_host_filter',
|
||||
'nova.scheduler.host_filter.AllHostsFilter',
|
||||
'Which driver to use for filtering hosts.')
|
||||
'Which filter to use for filtering hosts.')
|
||||
|
||||
|
||||
class HostFilter(object):
|
||||
"""Base class for host filter drivers."""
|
||||
"""Base class for host filters."""
|
||||
|
||||
def instance_type_to_filter(self, instance_type):
|
||||
"""Convert instance_type into a filter for most common use-case."""
|
||||
@@ -63,14 +64,15 @@ class HostFilter(object):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _full_name(self):
|
||||
"""module.classname of the filter driver"""
|
||||
"""module.classname of the filter."""
|
||||
return "%s.%s" % (self.__module__, self.__class__.__name__)
|
||||
|
||||
|
||||
class AllHostsFilter(HostFilter):
|
||||
"""NOP host filter driver. Returns all hosts in ZoneManager.
|
||||
""" NOP host filter. Returns all hosts in ZoneManager.
|
||||
This essentially does what the old Scheduler+Chance used
|
||||
to give us."""
|
||||
to give us.
|
||||
"""
|
||||
|
||||
def instance_type_to_filter(self, instance_type):
|
||||
"""Return anything to prevent base-class from raising
|
||||
@@ -83,8 +85,8 @@ class AllHostsFilter(HostFilter):
|
||||
for host, services in zone_manager.service_states.iteritems()]
|
||||
|
||||
|
||||
class FlavorFilter(HostFilter):
|
||||
"""HostFilter driver hard-coded to work with flavors."""
|
||||
class InstanceTypeFilter(HostFilter):
|
||||
"""HostFilter hard-coded to work with InstanceType records."""
|
||||
|
||||
def instance_type_to_filter(self, instance_type):
|
||||
"""Use instance_type to filter hosts."""
|
||||
@@ -98,9 +100,10 @@ class FlavorFilter(HostFilter):
|
||||
capabilities = services.get('compute', {})
|
||||
host_ram_mb = capabilities['host_memory_free']
|
||||
disk_bytes = capabilities['disk_available']
|
||||
if host_ram_mb >= instance_type['memory_mb'] and \
|
||||
disk_bytes >= instance_type['local_gb']:
|
||||
selected_hosts.append((host, capabilities))
|
||||
spec_ram = instance_type['memory_mb']
|
||||
spec_disk = instance_type['local_gb']
|
||||
if host_ram_mb >= spec_ram and disk_bytes >= spec_disk:
|
||||
selected_hosts.append((host, capabilities))
|
||||
return selected_hosts
|
||||
|
||||
#host entries (currently) are like:
|
||||
@@ -109,15 +112,15 @@ class FlavorFilter(HostFilter):
|
||||
# 'host_memory_total': 8244539392,
|
||||
# 'host_memory_overhead': 184225792,
|
||||
# 'host_memory_free': 3868327936,
|
||||
# 'host_memory_free_computed': 3840843776},
|
||||
# 'host_other-config': {},
|
||||
# 'host_memory_free_computed': 3840843776,
|
||||
# 'host_other_config': {},
|
||||
# 'host_ip_address': '192.168.1.109',
|
||||
# 'host_cpu_info': {},
|
||||
# 'disk_available': 32954957824,
|
||||
# 'disk_total': 50394562560,
|
||||
# 'disk_used': 17439604736},
|
||||
# 'disk_used': 17439604736,
|
||||
# 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f',
|
||||
# 'host_name-label': 'xs-mini'}
|
||||
# 'host_name_label': 'xs-mini'}
|
||||
|
||||
# instance_type table has:
|
||||
#name = Column(String(255), unique=True)
|
||||
@@ -131,8 +134,9 @@ class FlavorFilter(HostFilter):
|
||||
|
||||
|
||||
class JsonFilter(HostFilter):
|
||||
"""Host Filter driver to allow simple JSON-based grammar for
|
||||
selecting hosts."""
|
||||
"""Host Filter to allow simple JSON-based grammar for
|
||||
selecting hosts.
|
||||
"""
|
||||
|
||||
def _equals(self, args):
|
||||
"""First term is == all the other terms."""
|
||||
@@ -222,13 +226,14 @@ class JsonFilter(HostFilter):
|
||||
required_disk = instance_type['local_gb']
|
||||
query = ['and',
|
||||
['>=', '$compute.host_memory_free', required_ram],
|
||||
['>=', '$compute.disk_available', required_disk]
|
||||
['>=', '$compute.disk_available', required_disk],
|
||||
]
|
||||
return (self._full_name(), json.dumps(query))
|
||||
|
||||
def _parse_string(self, string, host, services):
|
||||
"""Strings prefixed with $ are capability lookups in the
|
||||
form '$service.capability[.subcap*]'"""
|
||||
form '$service.capability[.subcap*]'
|
||||
"""
|
||||
if not string:
|
||||
return None
|
||||
if string[0] != '$':
|
||||
@@ -271,18 +276,48 @@ class JsonFilter(HostFilter):
|
||||
return hosts
|
||||
|
||||
|
||||
DRIVERS = [AllHostsFilter, FlavorFilter, JsonFilter]
|
||||
FILTERS = [AllHostsFilter, InstanceTypeFilter, JsonFilter]
|
||||
|
||||
|
||||
def choose_driver(driver_name=None):
|
||||
"""Since the caller may specify which driver to use we need
|
||||
to have an authoritative list of what is permissible. This
|
||||
function checks the driver name against a predefined set
|
||||
of acceptable drivers."""
|
||||
def choose_host_filter(filter_name=None):
|
||||
"""Since the caller may specify which filter to use we need
|
||||
to have an authoritative list of what is permissible. This
|
||||
function checks the filter name against a predefined set
|
||||
of acceptable filters.
|
||||
"""
|
||||
|
||||
if not driver_name:
|
||||
driver_name = FLAGS.default_host_filter_driver
|
||||
for driver in DRIVERS:
|
||||
if "%s.%s" % (driver.__module__, driver.__name__) == driver_name:
|
||||
return driver()
|
||||
raise exception.SchedulerHostFilterDriverNotFound(driver_name=driver_name)
|
||||
if not filter_name:
|
||||
filter_name = FLAGS.default_host_filter
|
||||
for filter_class in FILTERS:
|
||||
host_match = "%s.%s" % (filter_class.__module__, filter_class.__name__)
|
||||
if host_match == filter_name:
|
||||
return filter_class()
|
||||
raise exception.SchedulerHostFilterNotFound(filter_name=filter_name)
|
||||
|
||||
|
||||
class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler):
|
||||
"""The HostFilterScheduler uses the HostFilter to filter
|
||||
hosts for weighing. The particular filter used may be passed in
|
||||
as an argument or the default will be used.
|
||||
|
||||
request_spec = {'filter': <Filter name>,
|
||||
'instance_type': <InstanceType dict>}
|
||||
"""
|
||||
|
||||
def filter_hosts(self, num, request_spec):
|
||||
"""Filter the full host list (from the ZoneManager)"""
|
||||
filter_name = request_spec.get('filter', None)
|
||||
host_filter = choose_host_filter(filter_name)
|
||||
|
||||
# TODO(sandy): We're only using InstanceType-based specs
|
||||
# currently. Later we'll need to snoop for more detailed
|
||||
# host filter requests.
|
||||
instance_type = request_spec['instance_type']
|
||||
name, query = host_filter.instance_type_to_filter(instance_type)
|
||||
return host_filter.filter_hosts(self.zone_manager, query)
|
||||
|
||||
def weigh_hosts(self, num, request_spec, hosts):
|
||||
"""Derived classes must override this method and return
|
||||
a lists of hosts in [{weight, hostname}] format.
|
||||
"""
|
||||
return [dict(weight=1, hostname=host) for host, caps in hosts]
|
||||
|
||||
@@ -83,11 +83,16 @@ class SchedulerManager(manager.Manager):
|
||||
except AttributeError:
|
||||
host = self.driver.schedule(elevated, topic, *args, **kwargs)
|
||||
|
||||
if not host:
|
||||
LOG.debug(_("%(topic)s %(method)s handled in Scheduler")
|
||||
% locals())
|
||||
return
|
||||
|
||||
rpc.cast(context,
|
||||
db.queue_get_for(context, topic, host),
|
||||
{"method": method,
|
||||
"args": kwargs})
|
||||
LOG.debug(_("Casting to %(topic)s %(host)s for %(method)s") % locals())
|
||||
LOG.debug(_("Casted to %(topic)s %(host)s for %(method)s") % locals())
|
||||
|
||||
# NOTE (masumotok) : This method should be moved to nova.api.ec2.admin.
|
||||
# Based on bexar design summit discussion,
|
||||
|
||||
@@ -22,7 +22,9 @@ across zones. There are two expansion points to this class for:
|
||||
|
||||
import operator
|
||||
|
||||
from nova import db
|
||||
from nova import log as logging
|
||||
from nova import rpc
|
||||
from nova.scheduler import api
|
||||
from nova.scheduler import driver
|
||||
|
||||
@@ -36,7 +38,7 @@ class ZoneAwareScheduler(driver.Scheduler):
|
||||
"""Call novaclient zone method. Broken out for testing."""
|
||||
return api.call_zone_method(context, method, specs=specs)
|
||||
|
||||
def schedule_run_instance(self, context, topic='compute', specs={},
|
||||
def schedule_run_instance(self, context, instance_id, request_spec,
|
||||
*args, **kwargs):
|
||||
"""This method is called from nova.compute.api to provision
|
||||
an instance. However we need to look at the parameters being
|
||||
@@ -44,56 +46,83 @@ class ZoneAwareScheduler(driver.Scheduler):
|
||||
1. Create a Build Plan and then provision, or
|
||||
2. Use the Build Plan information in the request parameters
|
||||
to simply create the instance (either in this zone or
|
||||
a child zone)."""
|
||||
a child zone).
|
||||
"""
|
||||
|
||||
if 'blob' in specs:
|
||||
return self.provision_instance(context, topic, specs)
|
||||
# TODO(sandy): We'll have to look for richer specs at some point.
|
||||
|
||||
if 'blob' in request_spec:
|
||||
self.provision_resource(context, request_spec, instance_id, kwargs)
|
||||
return None
|
||||
|
||||
# Create build plan and provision ...
|
||||
build_plan = self.select(context, specs)
|
||||
build_plan = self.select(context, request_spec)
|
||||
if not build_plan:
|
||||
raise driver.NoValidHost(_('No hosts were available'))
|
||||
|
||||
for item in build_plan:
|
||||
self.provision_instance(context, topic, item)
|
||||
self.provision_resource(context, item, instance_id, kwargs)
|
||||
|
||||
def provision_instance(context, topic, item):
|
||||
"""Create the requested instance in this Zone or a child zone."""
|
||||
pass
|
||||
# Returning None short-circuits the routing to Compute (since
|
||||
# we've already done it here)
|
||||
return None
|
||||
|
||||
def select(self, context, *args, **kwargs):
|
||||
def provision_resource(self, context, item, instance_id, kwargs):
|
||||
"""Create the requested resource in this Zone or a child zone."""
|
||||
if "hostname" in item:
|
||||
host = item['hostname']
|
||||
kwargs['instance_id'] = instance_id
|
||||
rpc.cast(context,
|
||||
db.queue_get_for(context, "compute", host),
|
||||
{"method": "run_instance",
|
||||
"args": kwargs})
|
||||
LOG.debug(_("Casted to compute %(host)s for run_instance")
|
||||
% locals())
|
||||
else:
|
||||
# TODO(sandy) Provision in child zone ...
|
||||
LOG.warning(_("Provision to Child Zone not supported (yet)"))
|
||||
pass
|
||||
|
||||
def select(self, context, request_spec, *args, **kwargs):
|
||||
"""Select returns a list of weights and zone/host information
|
||||
corresponding to the best hosts to service the request. Any
|
||||
child zone information has been encrypted so as not to reveal
|
||||
anything about the children."""
|
||||
return self._schedule(context, "compute", *args, **kwargs)
|
||||
anything about the children.
|
||||
"""
|
||||
return self._schedule(context, "compute", request_spec,
|
||||
*args, **kwargs)
|
||||
|
||||
def schedule(self, context, topic, *args, **kwargs):
|
||||
# TODO(sandy): We're only focused on compute instances right now,
|
||||
# so we don't implement the default "schedule()" method required
|
||||
# of Schedulers.
|
||||
def schedule(self, context, topic, request_spec, *args, **kwargs):
|
||||
"""The schedule() contract requires we return the one
|
||||
best-suited host for this request.
|
||||
"""
|
||||
res = self._schedule(context, topic, *args, **kwargs)
|
||||
# TODO(sirp): should this be a host object rather than a weight-dict?
|
||||
if not res:
|
||||
raise driver.NoValidHost(_('No hosts were available'))
|
||||
return res[0]
|
||||
raise driver.NoValidHost(_('No hosts were available'))
|
||||
|
||||
def _schedule(self, context, topic, *args, **kwargs):
|
||||
def _schedule(self, context, topic, request_spec, *args, **kwargs):
|
||||
"""Returns a list of hosts that meet the required specs,
|
||||
ordered by their fitness.
|
||||
"""
|
||||
|
||||
#TODO(sandy): extract these from args.
|
||||
if topic != "compute":
|
||||
raise NotImplemented(_("Zone Aware Scheduler only understands "
|
||||
"Compute nodes (for now)"))
|
||||
|
||||
#TODO(sandy): how to infer this from OS API params?
|
||||
num_instances = 1
|
||||
specs = {}
|
||||
|
||||
# Filter local hosts based on requirements ...
|
||||
host_list = self.filter_hosts(num_instances, specs)
|
||||
host_list = self.filter_hosts(num_instances, request_spec)
|
||||
|
||||
# then weigh the selected hosts.
|
||||
# weighted = [{weight=weight, name=hostname}, ...]
|
||||
weighted = self.weigh_hosts(num_instances, specs, host_list)
|
||||
weighted = self.weigh_hosts(num_instances, request_spec, host_list)
|
||||
|
||||
# Next, tack on the best weights from the child zones ...
|
||||
child_results = self._call_zone_method(context, "select",
|
||||
specs=specs)
|
||||
specs=request_spec)
|
||||
for child_zone, result in child_results:
|
||||
for weighting in result:
|
||||
# Remember the child_zone so we can get back to
|
||||
@@ -108,12 +137,14 @@ class ZoneAwareScheduler(driver.Scheduler):
|
||||
weighted.sort(key=operator.itemgetter('weight'))
|
||||
return weighted
|
||||
|
||||
def filter_hosts(self, num, specs):
|
||||
def filter_hosts(self, num, request_spec):
|
||||
"""Derived classes must override this method and return
|
||||
a list of hosts in [(hostname, capability_dict)] format."""
|
||||
a list of hosts in [(hostname, capability_dict)] format.
|
||||
"""
|
||||
raise NotImplemented()
|
||||
|
||||
def weigh_hosts(self, num, specs, hosts):
|
||||
def weigh_hosts(self, num, request_spec, hosts):
|
||||
"""Derived classes must override this method and return
|
||||
a lists of hosts in [{weight, hostname}] format."""
|
||||
a lists of hosts in [{weight, hostname}] format.
|
||||
"""
|
||||
raise NotImplemented()
|
||||
|
||||
@@ -17,12 +17,10 @@
|
||||
|
||||
import json
|
||||
|
||||
from nova import wsgi
|
||||
|
||||
from nova.api.openstack import extensions
|
||||
|
||||
|
||||
class FoxInSocksController(wsgi.Controller):
|
||||
class FoxInSocksController(object):
|
||||
|
||||
def index(self, req):
|
||||
return "Try to say this Mr. Knox, sir..."
|
||||
|
||||
@@ -26,15 +26,15 @@ from nova import flags
|
||||
from nova.api import openstack
|
||||
from nova.api.openstack import extensions
|
||||
from nova.api.openstack import flavors
|
||||
from nova.api.openstack import wsgi
|
||||
from nova.tests.api.openstack import fakes
|
||||
import nova.wsgi
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
response_body = "Try to say this Mr. Knox, sir..."
|
||||
|
||||
|
||||
class StubController(nova.wsgi.Controller):
|
||||
class StubController(object):
|
||||
|
||||
def __init__(self, body):
|
||||
self.body = body
|
||||
|
||||
@@ -73,7 +73,7 @@ class LimitsControllerV10Test(BaseLimitTestSuite):
|
||||
def setUp(self):
|
||||
"""Run before each test."""
|
||||
BaseLimitTestSuite.setUp(self)
|
||||
self.controller = limits.LimitsControllerV10()
|
||||
self.controller = limits.create_resource('1.0')
|
||||
|
||||
def _get_index_request(self, accept_header="application/json"):
|
||||
"""Helper to set routing arguments."""
|
||||
@@ -209,7 +209,7 @@ class LimitsControllerV11Test(BaseLimitTestSuite):
|
||||
def setUp(self):
|
||||
"""Run before each test."""
|
||||
BaseLimitTestSuite.setUp(self)
|
||||
self.controller = limits.LimitsControllerV11()
|
||||
self.controller = limits.create_resource('1.1')
|
||||
|
||||
def _get_index_request(self, accept_header="application/json"):
|
||||
"""Helper to set routing arguments."""
|
||||
|
||||
@@ -217,7 +217,6 @@ class ServersTest(test.TestCase):
|
||||
},
|
||||
]
|
||||
|
||||
print res_dict['server']
|
||||
self.assertEqual(res_dict['server']['links'], expected_links)
|
||||
|
||||
def test_get_server_by_id_with_addresses_xml(self):
|
||||
@@ -773,9 +772,7 @@ class ServersTest(test.TestCase):
|
||||
self.body = json.dumps(dict(server=inst_dict))
|
||||
|
||||
def server_update(context, id, params):
|
||||
filtered_dict = dict(
|
||||
display_name='server_test'
|
||||
)
|
||||
filtered_dict = dict(display_name='server_test')
|
||||
self.assertEqual(params, filtered_dict)
|
||||
return filtered_dict
|
||||
|
||||
@@ -844,7 +841,6 @@ class ServersTest(test.TestCase):
|
||||
req = webob.Request.blank('/v1.0/servers/detail')
|
||||
req.headers['Accept'] = 'application/xml'
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
print res.body
|
||||
dom = minidom.parseString(res.body)
|
||||
for i, server in enumerate(dom.getElementsByTagName('server')):
|
||||
self.assertEqual(server.getAttribute('id'), str(i))
|
||||
@@ -1008,6 +1004,14 @@ class ServersTest(test.TestCase):
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 501)
|
||||
|
||||
def test_server_change_password_xml(self):
|
||||
req = webob.Request.blank('/v1.0/servers/1/action')
|
||||
req.method = 'POST'
|
||||
req.content_type = 'application/xml'
|
||||
req.body = '<changePassword adminPass="1234pass">'
|
||||
# res = req.get_response(fakes.wsgi_app())
|
||||
# self.assertEqual(res.status_int, 501)
|
||||
|
||||
def test_server_change_password_v1_1(self):
|
||||
mock_method = MockSetAdminPassword()
|
||||
self.stubs.Set(nova.compute.api.API, 'set_admin_password', mock_method)
|
||||
@@ -1267,6 +1271,25 @@ class ServersTest(test.TestCase):
|
||||
self.assertEqual(res.status_int, 202)
|
||||
self.assertEqual(self.resize_called, True)
|
||||
|
||||
def test_resize_server_v11(self):
|
||||
|
||||
req = webob.Request.blank('/v1.1/servers/1/action')
|
||||
req.content_type = 'application/json'
|
||||
req.method = 'POST'
|
||||
body_dict = dict(resize=dict(flavorRef="http://localhost/3"))
|
||||
req.body = json.dumps(body_dict)
|
||||
|
||||
self.resize_called = False
|
||||
|
||||
def resize_mock(*args):
|
||||
self.resize_called = True
|
||||
|
||||
self.stubs.Set(nova.compute.api.API, 'resize', resize_mock)
|
||||
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 202)
|
||||
self.assertEqual(self.resize_called, True)
|
||||
|
||||
def test_resize_bad_flavor_fails(self):
|
||||
req = self.webreq('/1/action', 'POST', dict(resize=dict(derp=3)))
|
||||
|
||||
@@ -1380,13 +1403,13 @@ class ServersTest(test.TestCase):
|
||||
class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.deserializer = servers.ServerCreateRequestXMLDeserializer()
|
||||
self.deserializer = servers.ServerXMLDeserializer()
|
||||
|
||||
def test_minimal_request(self):
|
||||
serial_request = """
|
||||
<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
|
||||
name="new-server-test" imageId="1" flavorId="1"/>"""
|
||||
request = self.deserializer.deserialize(serial_request)
|
||||
request = self.deserializer.deserialize(serial_request, 'create')
|
||||
expected = {"server": {
|
||||
"name": "new-server-test",
|
||||
"imageId": "1",
|
||||
@@ -1400,7 +1423,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
|
||||
name="new-server-test" imageId="1" flavorId="1">
|
||||
<metadata/>
|
||||
</server>"""
|
||||
request = self.deserializer.deserialize(serial_request)
|
||||
request = self.deserializer.deserialize(serial_request, 'create')
|
||||
expected = {"server": {
|
||||
"name": "new-server-test",
|
||||
"imageId": "1",
|
||||
@@ -1415,7 +1438,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
|
||||
name="new-server-test" imageId="1" flavorId="1">
|
||||
<personality/>
|
||||
</server>"""
|
||||
request = self.deserializer.deserialize(serial_request)
|
||||
request = self.deserializer.deserialize(serial_request, 'create')
|
||||
expected = {"server": {
|
||||
"name": "new-server-test",
|
||||
"imageId": "1",
|
||||
@@ -1431,7 +1454,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
|
||||
<metadata/>
|
||||
<personality/>
|
||||
</server>"""
|
||||
request = self.deserializer.deserialize(serial_request)
|
||||
request = self.deserializer.deserialize(serial_request, 'create')
|
||||
expected = {"server": {
|
||||
"name": "new-server-test",
|
||||
"imageId": "1",
|
||||
@@ -1448,7 +1471,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
|
||||
<personality/>
|
||||
<metadata/>
|
||||
</server>"""
|
||||
request = self.deserializer.deserialize(serial_request)
|
||||
request = self.deserializer.deserialize(serial_request, 'create')
|
||||
expected = {"server": {
|
||||
"name": "new-server-test",
|
||||
"imageId": "1",
|
||||
@@ -1466,7 +1489,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
|
||||
<file path="/etc/conf">aabbccdd</file>
|
||||
</personality>
|
||||
</server>"""
|
||||
request = self.deserializer.deserialize(serial_request)
|
||||
request = self.deserializer.deserialize(serial_request, 'create')
|
||||
expected = [{"path": "/etc/conf", "contents": "aabbccdd"}]
|
||||
self.assertEquals(request["server"]["personality"], expected)
|
||||
|
||||
@@ -1476,7 +1499,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
|
||||
name="new-server-test" imageId="1" flavorId="1">
|
||||
<personality><file path="/etc/conf">aabbccdd</file>
|
||||
<file path="/etc/sudoers">abcd</file></personality></server>"""
|
||||
request = self.deserializer.deserialize(serial_request)
|
||||
request = self.deserializer.deserialize(serial_request, 'create')
|
||||
expected = [{"path": "/etc/conf", "contents": "aabbccdd"},
|
||||
{"path": "/etc/sudoers", "contents": "abcd"}]
|
||||
self.assertEquals(request["server"]["personality"], expected)
|
||||
@@ -1492,7 +1515,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
|
||||
<file path="/etc/ignoreme">anything</file>
|
||||
</personality>
|
||||
</server>"""
|
||||
request = self.deserializer.deserialize(serial_request)
|
||||
request = self.deserializer.deserialize(serial_request, 'create')
|
||||
expected = [{"path": "/etc/conf", "contents": "aabbccdd"}]
|
||||
self.assertEquals(request["server"]["personality"], expected)
|
||||
|
||||
@@ -1501,7 +1524,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
|
||||
<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
|
||||
name="new-server-test" imageId="1" flavorId="1">
|
||||
<personality><file>aabbccdd</file></personality></server>"""
|
||||
request = self.deserializer.deserialize(serial_request)
|
||||
request = self.deserializer.deserialize(serial_request, 'create')
|
||||
expected = [{"contents": "aabbccdd"}]
|
||||
self.assertEquals(request["server"]["personality"], expected)
|
||||
|
||||
@@ -1510,7 +1533,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
|
||||
<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
|
||||
name="new-server-test" imageId="1" flavorId="1">
|
||||
<personality><file path="/etc/conf"></file></personality></server>"""
|
||||
request = self.deserializer.deserialize(serial_request)
|
||||
request = self.deserializer.deserialize(serial_request, 'create')
|
||||
expected = [{"path": "/etc/conf", "contents": ""}]
|
||||
self.assertEquals(request["server"]["personality"], expected)
|
||||
|
||||
@@ -1519,7 +1542,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
|
||||
<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
|
||||
name="new-server-test" imageId="1" flavorId="1">
|
||||
<personality><file path="/etc/conf"/></personality></server>"""
|
||||
request = self.deserializer.deserialize(serial_request)
|
||||
request = self.deserializer.deserialize(serial_request, 'create')
|
||||
expected = [{"path": "/etc/conf", "contents": ""}]
|
||||
self.assertEquals(request["server"]["personality"], expected)
|
||||
|
||||
@@ -1531,7 +1554,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
|
||||
<meta key="alpha">beta</meta>
|
||||
</metadata>
|
||||
</server>"""
|
||||
request = self.deserializer.deserialize(serial_request)
|
||||
request = self.deserializer.deserialize(serial_request, 'create')
|
||||
expected = {"alpha": "beta"}
|
||||
self.assertEquals(request["server"]["metadata"], expected)
|
||||
|
||||
@@ -1544,7 +1567,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
|
||||
<meta key="foo">bar</meta>
|
||||
</metadata>
|
||||
</server>"""
|
||||
request = self.deserializer.deserialize(serial_request)
|
||||
request = self.deserializer.deserialize(serial_request, 'create')
|
||||
expected = {"alpha": "beta", "foo": "bar"}
|
||||
self.assertEquals(request["server"]["metadata"], expected)
|
||||
|
||||
@@ -1556,7 +1579,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
|
||||
<meta key="alpha"></meta>
|
||||
</metadata>
|
||||
</server>"""
|
||||
request = self.deserializer.deserialize(serial_request)
|
||||
request = self.deserializer.deserialize(serial_request, 'create')
|
||||
expected = {"alpha": ""}
|
||||
self.assertEquals(request["server"]["metadata"], expected)
|
||||
|
||||
@@ -1569,7 +1592,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
|
||||
<meta key="delta"/>
|
||||
</metadata>
|
||||
</server>"""
|
||||
request = self.deserializer.deserialize(serial_request)
|
||||
request = self.deserializer.deserialize(serial_request, 'create')
|
||||
expected = {"alpha": "", "delta": ""}
|
||||
self.assertEquals(request["server"]["metadata"], expected)
|
||||
|
||||
@@ -1581,7 +1604,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
|
||||
<meta>beta</meta>
|
||||
</metadata>
|
||||
</server>"""
|
||||
request = self.deserializer.deserialize(serial_request)
|
||||
request = self.deserializer.deserialize(serial_request, 'create')
|
||||
expected = {"": "beta"}
|
||||
self.assertEquals(request["server"]["metadata"], expected)
|
||||
|
||||
@@ -1594,7 +1617,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
|
||||
<meta>gamma</meta>
|
||||
</metadata>
|
||||
</server>"""
|
||||
request = self.deserializer.deserialize(serial_request)
|
||||
request = self.deserializer.deserialize(serial_request, 'create')
|
||||
expected = {"": "gamma"}
|
||||
self.assertEquals(request["server"]["metadata"], expected)
|
||||
|
||||
@@ -1607,7 +1630,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
|
||||
<meta key="foo">baz</meta>
|
||||
</metadata>
|
||||
</server>"""
|
||||
request = self.deserializer.deserialize(serial_request)
|
||||
request = self.deserializer.deserialize(serial_request, 'create')
|
||||
expected = {"foo": "baz"}
|
||||
self.assertEquals(request["server"]["metadata"], expected)
|
||||
|
||||
@@ -1654,7 +1677,7 @@ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==""",
|
||||
},
|
||||
],
|
||||
}}
|
||||
request = self.deserializer.deserialize(serial_request)
|
||||
request = self.deserializer.deserialize(serial_request, 'create')
|
||||
self.assertEqual(request, expected)
|
||||
|
||||
def test_request_xmlser_with_flavor_image_ref(self):
|
||||
@@ -1664,7 +1687,7 @@ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==""",
|
||||
imageRef="http://localhost:8774/v1.1/images/1"
|
||||
flavorRef="http://localhost:8774/v1.1/flavors/1">
|
||||
</server>"""
|
||||
request = self.deserializer.deserialize(serial_request)
|
||||
request = self.deserializer.deserialize(serial_request, 'create')
|
||||
self.assertEquals(request["server"]["flavorRef"],
|
||||
"http://localhost:8774/v1.1/flavors/1")
|
||||
self.assertEquals(request["server"]["imageRef"],
|
||||
|
||||
293
nova/tests/api/openstack/test_wsgi.py
Normal file
293
nova/tests/api/openstack/test_wsgi.py
Normal file
@@ -0,0 +1,293 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
import json
|
||||
import webob
|
||||
|
||||
from nova import exception
|
||||
from nova import test
|
||||
from nova.api.openstack import wsgi
|
||||
|
||||
|
||||
class RequestTest(test.TestCase):
|
||||
def test_content_type_missing(self):
|
||||
request = wsgi.Request.blank('/tests/123')
|
||||
request.body = "<body />"
|
||||
self.assertRaises(exception.InvalidContentType,
|
||||
request.get_content_type)
|
||||
|
||||
def test_content_type_unsupported(self):
|
||||
request = wsgi.Request.blank('/tests/123')
|
||||
request.headers["Content-Type"] = "text/html"
|
||||
request.body = "asdf<br />"
|
||||
self.assertRaises(exception.InvalidContentType,
|
||||
request.get_content_type)
|
||||
|
||||
def test_content_type_with_charset(self):
|
||||
request = wsgi.Request.blank('/tests/123')
|
||||
request.headers["Content-Type"] = "application/json; charset=UTF-8"
|
||||
result = request.get_content_type()
|
||||
self.assertEqual(result, "application/json")
|
||||
|
||||
def test_content_type_from_accept_xml(self):
|
||||
request = wsgi.Request.blank('/tests/123')
|
||||
request.headers["Accept"] = "application/xml"
|
||||
result = request.best_match_content_type()
|
||||
self.assertEqual(result, "application/xml")
|
||||
|
||||
request = wsgi.Request.blank('/tests/123')
|
||||
request.headers["Accept"] = "application/json"
|
||||
result = request.best_match_content_type()
|
||||
self.assertEqual(result, "application/json")
|
||||
|
||||
request = wsgi.Request.blank('/tests/123')
|
||||
request.headers["Accept"] = "application/xml, application/json"
|
||||
result = request.best_match_content_type()
|
||||
self.assertEqual(result, "application/json")
|
||||
|
||||
request = wsgi.Request.blank('/tests/123')
|
||||
request.headers["Accept"] = \
|
||||
"application/json; q=0.3, application/xml; q=0.9"
|
||||
result = request.best_match_content_type()
|
||||
self.assertEqual(result, "application/xml")
|
||||
|
||||
def test_content_type_from_query_extension(self):
|
||||
request = wsgi.Request.blank('/tests/123.xml')
|
||||
result = request.best_match_content_type()
|
||||
self.assertEqual(result, "application/xml")
|
||||
|
||||
request = wsgi.Request.blank('/tests/123.json')
|
||||
result = request.best_match_content_type()
|
||||
self.assertEqual(result, "application/json")
|
||||
|
||||
request = wsgi.Request.blank('/tests/123.invalid')
|
||||
result = request.best_match_content_type()
|
||||
self.assertEqual(result, "application/json")
|
||||
|
||||
def test_content_type_accept_and_query_extension(self):
|
||||
request = wsgi.Request.blank('/tests/123.xml')
|
||||
request.headers["Accept"] = "application/json"
|
||||
result = request.best_match_content_type()
|
||||
self.assertEqual(result, "application/xml")
|
||||
|
||||
def test_content_type_accept_default(self):
|
||||
request = wsgi.Request.blank('/tests/123.unsupported')
|
||||
request.headers["Accept"] = "application/unsupported1"
|
||||
result = request.best_match_content_type()
|
||||
self.assertEqual(result, "application/json")
|
||||
|
||||
|
||||
class DictSerializerTest(test.TestCase):
|
||||
def test_dispatch(self):
|
||||
serializer = wsgi.DictSerializer()
|
||||
serializer.create = lambda x: 'pants'
|
||||
serializer.default = lambda x: 'trousers'
|
||||
self.assertEqual(serializer.serialize({}, 'create'), 'pants')
|
||||
|
||||
def test_dispatch_default(self):
|
||||
serializer = wsgi.DictSerializer()
|
||||
serializer.create = lambda x: 'pants'
|
||||
serializer.default = lambda x: 'trousers'
|
||||
self.assertEqual(serializer.serialize({}, 'update'), 'trousers')
|
||||
|
||||
|
||||
class XMLDictSerializerTest(test.TestCase):
|
||||
def test_xml(self):
|
||||
input_dict = dict(servers=dict(a=(2, 3)))
|
||||
expected_xml = '<serversxmlns="asdf"><a>(2,3)</a></servers>'
|
||||
serializer = wsgi.XMLDictSerializer(xmlns="asdf")
|
||||
result = serializer.serialize(input_dict)
|
||||
result = result.replace('\n', '').replace(' ', '')
|
||||
self.assertEqual(result, expected_xml)
|
||||
|
||||
|
||||
class JSONDictSerializerTest(test.TestCase):
|
||||
def test_json(self):
|
||||
input_dict = dict(servers=dict(a=(2, 3)))
|
||||
expected_json = '{"servers":{"a":[2,3]}}'
|
||||
serializer = wsgi.JSONDictSerializer()
|
||||
result = serializer.serialize(input_dict)
|
||||
result = result.replace('\n', '').replace(' ', '')
|
||||
self.assertEqual(result, expected_json)
|
||||
|
||||
|
||||
class TextDeserializerTest(test.TestCase):
|
||||
def test_dispatch(self):
|
||||
deserializer = wsgi.TextDeserializer()
|
||||
deserializer.create = lambda x: 'pants'
|
||||
deserializer.default = lambda x: 'trousers'
|
||||
self.assertEqual(deserializer.deserialize({}, 'create'), 'pants')
|
||||
|
||||
def test_dispatch_default(self):
|
||||
deserializer = wsgi.TextDeserializer()
|
||||
deserializer.create = lambda x: 'pants'
|
||||
deserializer.default = lambda x: 'trousers'
|
||||
self.assertEqual(deserializer.deserialize({}, 'update'), 'trousers')
|
||||
|
||||
|
||||
class JSONDeserializerTest(test.TestCase):
|
||||
def test_json(self):
|
||||
data = """{"a": {
|
||||
"a1": "1",
|
||||
"a2": "2",
|
||||
"bs": ["1", "2", "3", {"c": {"c1": "1"}}],
|
||||
"d": {"e": "1"},
|
||||
"f": "1"}}"""
|
||||
as_dict = dict(a={
|
||||
'a1': '1',
|
||||
'a2': '2',
|
||||
'bs': ['1', '2', '3', {'c': dict(c1='1')}],
|
||||
'd': {'e': '1'},
|
||||
'f': '1'})
|
||||
deserializer = wsgi.JSONDeserializer()
|
||||
self.assertEqual(deserializer.deserialize(data), as_dict)
|
||||
|
||||
|
||||
class XMLDeserializerTest(test.TestCase):
|
||||
def test_xml(self):
|
||||
xml = """
|
||||
<a a1="1" a2="2">
|
||||
<bs><b>1</b><b>2</b><b>3</b><b><c c1="1"/></b></bs>
|
||||
<d><e>1</e></d>
|
||||
<f>1</f>
|
||||
</a>
|
||||
""".strip()
|
||||
as_dict = dict(a={
|
||||
'a1': '1',
|
||||
'a2': '2',
|
||||
'bs': ['1', '2', '3', {'c': dict(c1='1')}],
|
||||
'd': {'e': '1'},
|
||||
'f': '1'})
|
||||
metadata = {'plurals': {'bs': 'b', 'ts': 't'}}
|
||||
deserializer = wsgi.XMLDeserializer(metadata=metadata)
|
||||
self.assertEqual(deserializer.deserialize(xml), as_dict)
|
||||
|
||||
def test_xml_empty(self):
|
||||
xml = """<a></a>"""
|
||||
as_dict = {"a": {}}
|
||||
deserializer = wsgi.XMLDeserializer()
|
||||
self.assertEqual(deserializer.deserialize(xml), as_dict)
|
||||
|
||||
|
||||
class ResponseSerializerTest(test.TestCase):
|
||||
def setUp(self):
|
||||
class JSONSerializer(object):
|
||||
def serialize(self, data):
|
||||
return 'pew_json'
|
||||
|
||||
class XMLSerializer(object):
|
||||
def serialize(self, data):
|
||||
return 'pew_xml'
|
||||
|
||||
self.serializers = {
|
||||
'application/json': JSONSerializer(),
|
||||
'application/XML': XMLSerializer(),
|
||||
}
|
||||
|
||||
self.serializer = wsgi.ResponseSerializer(serializers=self.serializers)
|
||||
|
||||
def tearDown(self):
|
||||
pass
|
||||
|
||||
def test_get_serializer(self):
|
||||
self.assertEqual(self.serializer.get_serializer('application/json'),
|
||||
self.serializers['application/json'])
|
||||
|
||||
def test_get_serializer_unknown_content_type(self):
|
||||
self.assertRaises(exception.InvalidContentType,
|
||||
self.serializer.get_serializer,
|
||||
'application/unknown')
|
||||
|
||||
def test_serialize_response(self):
|
||||
response = self.serializer.serialize({}, 'application/json')
|
||||
self.assertEqual(response.headers['Content-Type'], 'application/json')
|
||||
self.assertEqual(response.body, 'pew_json')
|
||||
|
||||
def test_serialize_response_dict_to_unknown_content_type(self):
|
||||
self.assertRaises(exception.InvalidContentType,
|
||||
self.serializer.serialize,
|
||||
{}, 'application/unknown')
|
||||
|
||||
|
||||
class RequestDeserializerTest(test.TestCase):
|
||||
def setUp(self):
|
||||
class JSONDeserializer(object):
|
||||
def deserialize(self, data):
|
||||
return 'pew_json'
|
||||
|
||||
class XMLDeserializer(object):
|
||||
def deserialize(self, data):
|
||||
return 'pew_xml'
|
||||
|
||||
self.deserializers = {
|
||||
'application/json': JSONDeserializer(),
|
||||
'application/XML': XMLDeserializer(),
|
||||
}
|
||||
|
||||
self.deserializer = wsgi.RequestDeserializer(
|
||||
deserializers=self.deserializers)
|
||||
|
||||
def tearDown(self):
|
||||
pass
|
||||
|
||||
def test_get_deserializer(self):
|
||||
expected = self.deserializer.get_deserializer('application/json')
|
||||
self.assertEqual(expected, self.deserializers['application/json'])
|
||||
|
||||
def test_get_deserializer_unknown_content_type(self):
|
||||
self.assertRaises(exception.InvalidContentType,
|
||||
self.deserializer.get_deserializer,
|
||||
'application/unknown')
|
||||
|
||||
def test_get_expected_content_type(self):
|
||||
request = wsgi.Request.blank('/')
|
||||
request.headers['Accept'] = 'application/json'
|
||||
self.assertEqual(self.deserializer.get_expected_content_type(request),
|
||||
'application/json')
|
||||
|
||||
def test_get_action_args(self):
|
||||
env = {
|
||||
'wsgiorg.routing_args': [None, {
|
||||
'controller': None,
|
||||
'format': None,
|
||||
'action': 'update',
|
||||
'id': 12,
|
||||
}],
|
||||
}
|
||||
|
||||
expected = {'action': 'update', 'id': 12}
|
||||
|
||||
self.assertEqual(self.deserializer.get_action_args(env), expected)
|
||||
|
||||
def test_deserialize(self):
|
||||
def fake_get_routing_args(request):
|
||||
return {'action': 'create'}
|
||||
self.deserializer.get_action_args = fake_get_routing_args
|
||||
|
||||
request = wsgi.Request.blank('/')
|
||||
request.headers['Accept'] = 'application/xml'
|
||||
|
||||
deserialized = self.deserializer.deserialize(request)
|
||||
expected = ('create', {}, 'application/xml')
|
||||
|
||||
self.assertEqual(expected, deserialized)
|
||||
|
||||
|
||||
class ResourceTest(test.TestCase):
|
||||
def test_dispatch(self):
|
||||
class Controller(object):
|
||||
def index(self, req, pants=None):
|
||||
return pants
|
||||
|
||||
resource = wsgi.Resource(Controller())
|
||||
actual = resource.dispatch(None, 'index', {'pants': 'off'})
|
||||
expected = 'off'
|
||||
self.assertEqual(actual, expected)
|
||||
|
||||
def test_dispatch_unknown_controller_action(self):
|
||||
class Controller(object):
|
||||
def index(self, req, pants=None):
|
||||
return pants
|
||||
|
||||
resource = wsgi.Resource(Controller())
|
||||
self.assertRaises(AttributeError, resource.dispatch,
|
||||
None, 'create', {})
|
||||
@@ -67,192 +67,3 @@ class Test(test.TestCase):
|
||||
self.assertEqual(result.body, "Router result")
|
||||
result = webob.Request.blank('/bad').get_response(Router())
|
||||
self.assertNotEqual(result.body, "Router result")
|
||||
|
||||
|
||||
class ControllerTest(test.TestCase):
|
||||
|
||||
class TestRouter(wsgi.Router):
|
||||
|
||||
class TestController(wsgi.Controller):
|
||||
|
||||
_serialization_metadata = {
|
||||
'application/xml': {
|
||||
"attributes": {
|
||||
"test": ["id"]}}}
|
||||
|
||||
def show(self, req, id): # pylint: disable=W0622,C0103
|
||||
return {"test": {"id": id}}
|
||||
|
||||
def __init__(self):
|
||||
mapper = routes.Mapper()
|
||||
mapper.resource("test", "tests", controller=self.TestController())
|
||||
wsgi.Router.__init__(self, mapper)
|
||||
|
||||
def test_show(self):
|
||||
request = wsgi.Request.blank('/tests/123')
|
||||
result = request.get_response(self.TestRouter())
|
||||
self.assertEqual(json.loads(result.body), {"test": {"id": "123"}})
|
||||
|
||||
def test_response_content_type_from_accept_xml(self):
|
||||
request = webob.Request.blank('/tests/123')
|
||||
request.headers["Accept"] = "application/xml"
|
||||
result = request.get_response(self.TestRouter())
|
||||
self.assertEqual(result.headers["Content-Type"], "application/xml")
|
||||
|
||||
def test_response_content_type_from_accept_json(self):
|
||||
request = wsgi.Request.blank('/tests/123')
|
||||
request.headers["Accept"] = "application/json"
|
||||
result = request.get_response(self.TestRouter())
|
||||
self.assertEqual(result.headers["Content-Type"], "application/json")
|
||||
|
||||
def test_response_content_type_from_query_extension_xml(self):
|
||||
request = wsgi.Request.blank('/tests/123.xml')
|
||||
result = request.get_response(self.TestRouter())
|
||||
self.assertEqual(result.headers["Content-Type"], "application/xml")
|
||||
|
||||
def test_response_content_type_from_query_extension_json(self):
|
||||
request = wsgi.Request.blank('/tests/123.json')
|
||||
result = request.get_response(self.TestRouter())
|
||||
self.assertEqual(result.headers["Content-Type"], "application/json")
|
||||
|
||||
def test_response_content_type_default_when_unsupported(self):
|
||||
request = wsgi.Request.blank('/tests/123.unsupported')
|
||||
request.headers["Accept"] = "application/unsupported1"
|
||||
result = request.get_response(self.TestRouter())
|
||||
self.assertEqual(result.status_int, 200)
|
||||
self.assertEqual(result.headers["Content-Type"], "application/json")
|
||||
|
||||
|
||||
class RequestTest(test.TestCase):
|
||||
|
||||
def test_request_content_type_missing(self):
|
||||
request = wsgi.Request.blank('/tests/123')
|
||||
request.body = "<body />"
|
||||
self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type)
|
||||
|
||||
def test_request_content_type_unsupported(self):
|
||||
request = wsgi.Request.blank('/tests/123')
|
||||
request.headers["Content-Type"] = "text/html"
|
||||
request.body = "asdf<br />"
|
||||
self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type)
|
||||
|
||||
def test_request_content_type_with_charset(self):
|
||||
request = wsgi.Request.blank('/tests/123')
|
||||
request.headers["Content-Type"] = "application/json; charset=UTF-8"
|
||||
result = request.get_content_type()
|
||||
self.assertEqual(result, "application/json")
|
||||
|
||||
def test_content_type_from_accept_xml(self):
|
||||
request = wsgi.Request.blank('/tests/123')
|
||||
request.headers["Accept"] = "application/xml"
|
||||
result = request.best_match_content_type()
|
||||
self.assertEqual(result, "application/xml")
|
||||
|
||||
request = wsgi.Request.blank('/tests/123')
|
||||
request.headers["Accept"] = "application/json"
|
||||
result = request.best_match_content_type()
|
||||
self.assertEqual(result, "application/json")
|
||||
|
||||
request = wsgi.Request.blank('/tests/123')
|
||||
request.headers["Accept"] = "application/xml, application/json"
|
||||
result = request.best_match_content_type()
|
||||
self.assertEqual(result, "application/json")
|
||||
|
||||
request = wsgi.Request.blank('/tests/123')
|
||||
request.headers["Accept"] = \
|
||||
"application/json; q=0.3, application/xml; q=0.9"
|
||||
result = request.best_match_content_type()
|
||||
self.assertEqual(result, "application/xml")
|
||||
|
||||
def test_content_type_from_query_extension(self):
|
||||
request = wsgi.Request.blank('/tests/123.xml')
|
||||
result = request.best_match_content_type()
|
||||
self.assertEqual(result, "application/xml")
|
||||
|
||||
request = wsgi.Request.blank('/tests/123.json')
|
||||
result = request.best_match_content_type()
|
||||
self.assertEqual(result, "application/json")
|
||||
|
||||
request = wsgi.Request.blank('/tests/123.invalid')
|
||||
result = request.best_match_content_type()
|
||||
self.assertEqual(result, "application/json")
|
||||
|
||||
def test_content_type_accept_and_query_extension(self):
|
||||
request = wsgi.Request.blank('/tests/123.xml')
|
||||
request.headers["Accept"] = "application/json"
|
||||
result = request.best_match_content_type()
|
||||
self.assertEqual(result, "application/xml")
|
||||
|
||||
def test_content_type_accept_default(self):
|
||||
request = wsgi.Request.blank('/tests/123.unsupported')
|
||||
request.headers["Accept"] = "application/unsupported1"
|
||||
result = request.best_match_content_type()
|
||||
self.assertEqual(result, "application/json")
|
||||
|
||||
|
||||
class SerializerTest(test.TestCase):
|
||||
|
||||
def test_xml(self):
|
||||
input_dict = dict(servers=dict(a=(2, 3)))
|
||||
expected_xml = '<servers><a>(2,3)</a></servers>'
|
||||
serializer = wsgi.Serializer()
|
||||
result = serializer.serialize(input_dict, "application/xml")
|
||||
result = result.replace('\n', '').replace(' ', '')
|
||||
self.assertEqual(result, expected_xml)
|
||||
|
||||
def test_json(self):
|
||||
input_dict = dict(servers=dict(a=(2, 3)))
|
||||
expected_json = '{"servers":{"a":[2,3]}}'
|
||||
serializer = wsgi.Serializer()
|
||||
result = serializer.serialize(input_dict, "application/json")
|
||||
result = result.replace('\n', '').replace(' ', '')
|
||||
self.assertEqual(result, expected_json)
|
||||
|
||||
def test_unsupported_content_type(self):
|
||||
serializer = wsgi.Serializer()
|
||||
self.assertRaises(exception.InvalidContentType, serializer.serialize,
|
||||
{}, "text/null")
|
||||
|
||||
def test_deserialize_json(self):
|
||||
data = """{"a": {
|
||||
"a1": "1",
|
||||
"a2": "2",
|
||||
"bs": ["1", "2", "3", {"c": {"c1": "1"}}],
|
||||
"d": {"e": "1"},
|
||||
"f": "1"}}"""
|
||||
as_dict = dict(a={
|
||||
'a1': '1',
|
||||
'a2': '2',
|
||||
'bs': ['1', '2', '3', {'c': dict(c1='1')}],
|
||||
'd': {'e': '1'},
|
||||
'f': '1'})
|
||||
metadata = {}
|
||||
serializer = wsgi.Serializer(metadata)
|
||||
self.assertEqual(serializer.deserialize(data, "application/json"),
|
||||
as_dict)
|
||||
|
||||
def test_deserialize_xml(self):
|
||||
xml = """
|
||||
<a a1="1" a2="2">
|
||||
<bs><b>1</b><b>2</b><b>3</b><b><c c1="1"/></b></bs>
|
||||
<d><e>1</e></d>
|
||||
<f>1</f>
|
||||
</a>
|
||||
""".strip()
|
||||
as_dict = dict(a={
|
||||
'a1': '1',
|
||||
'a2': '2',
|
||||
'bs': ['1', '2', '3', {'c': dict(c1='1')}],
|
||||
'd': {'e': '1'},
|
||||
'f': '1'})
|
||||
metadata = {'application/xml': dict(plurals={'bs': 'b', 'ts': 't'})}
|
||||
serializer = wsgi.Serializer(metadata)
|
||||
self.assertEqual(serializer.deserialize(xml, "application/xml"),
|
||||
as_dict)
|
||||
|
||||
def test_deserialize_empty_xml(self):
|
||||
xml = """<a></a>"""
|
||||
as_dict = {"a": {}}
|
||||
serializer = wsgi.Serializer()
|
||||
self.assertEqual(serializer.deserialize(xml, "application/xml"),
|
||||
as_dict)
|
||||
|
||||
@@ -194,7 +194,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
|
||||
post = {}
|
||||
post['rebuild'] = {
|
||||
"imageRef": "https://localhost/v1.1/32278/images/2",
|
||||
"name": "blah"
|
||||
"name": "blah",
|
||||
}
|
||||
|
||||
self.api.post_server_action(created_server_id, post)
|
||||
@@ -224,7 +224,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
|
||||
post = {}
|
||||
post['rebuild'] = {
|
||||
"imageRef": "https://localhost/v1.1/32278/images/2",
|
||||
"name": "blah"
|
||||
"name": "blah",
|
||||
}
|
||||
|
||||
metadata = {}
|
||||
@@ -267,7 +267,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
|
||||
post = {}
|
||||
post['rebuild'] = {
|
||||
"imageRef": "https://localhost/v1.1/32278/images/2",
|
||||
"name": "blah"
|
||||
"name": "blah",
|
||||
}
|
||||
|
||||
metadata = {}
|
||||
|
||||
@@ -32,7 +32,7 @@ class XmlTests(integrated_helpers._IntegratedTestBase):
|
||||
""""Some basic XML sanity checks."""
|
||||
|
||||
def test_namespace_limits(self):
|
||||
"""/limits should have v1.0 namespace (hasn't changed in 1.1)."""
|
||||
"""/limits should have v1.1 namespace (has changed in 1.1)."""
|
||||
headers = {}
|
||||
headers['Accept'] = 'application/xml'
|
||||
|
||||
@@ -40,7 +40,7 @@ class XmlTests(integrated_helpers._IntegratedTestBase):
|
||||
data = response.read()
|
||||
LOG.debug("data: %s" % data)
|
||||
|
||||
prefix = '<limits xmlns="%s"' % common.XML_NS_V10
|
||||
prefix = '<limits xmlns="%s"' % common.XML_NS_V11
|
||||
self.assertTrue(data.startswith(prefix))
|
||||
|
||||
def test_namespace_servers(self):
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Tests For Scheduler Host Filter Drivers.
|
||||
Tests For Scheduler Host Filters.
|
||||
"""
|
||||
|
||||
import json
|
||||
@@ -31,7 +31,7 @@ class FakeZoneManager:
|
||||
|
||||
|
||||
class HostFilterTestCase(test.TestCase):
|
||||
"""Test case for host filter drivers."""
|
||||
"""Test case for host filters."""
|
||||
|
||||
def _host_caps(self, multiplier):
|
||||
# Returns host capabilities in the following way:
|
||||
@@ -57,8 +57,8 @@ class HostFilterTestCase(test.TestCase):
|
||||
'host_name-label': 'xs-%s' % multiplier}
|
||||
|
||||
def setUp(self):
|
||||
self.old_flag = FLAGS.default_host_filter_driver
|
||||
FLAGS.default_host_filter_driver = \
|
||||
self.old_flag = FLAGS.default_host_filter
|
||||
FLAGS.default_host_filter = \
|
||||
'nova.scheduler.host_filter.AllHostsFilter'
|
||||
self.instance_type = dict(name='tiny',
|
||||
memory_mb=50,
|
||||
@@ -76,51 +76,52 @@ class HostFilterTestCase(test.TestCase):
|
||||
self.zone_manager.service_states = states
|
||||
|
||||
def tearDown(self):
|
||||
FLAGS.default_host_filter_driver = self.old_flag
|
||||
FLAGS.default_host_filter = self.old_flag
|
||||
|
||||
def test_choose_driver(self):
|
||||
# Test default driver ...
|
||||
driver = host_filter.choose_driver()
|
||||
self.assertEquals(driver._full_name(),
|
||||
def test_choose_filter(self):
|
||||
# Test default filter ...
|
||||
hf = host_filter.choose_host_filter()
|
||||
self.assertEquals(hf._full_name(),
|
||||
'nova.scheduler.host_filter.AllHostsFilter')
|
||||
# Test valid driver ...
|
||||
driver = host_filter.choose_driver(
|
||||
'nova.scheduler.host_filter.FlavorFilter')
|
||||
self.assertEquals(driver._full_name(),
|
||||
'nova.scheduler.host_filter.FlavorFilter')
|
||||
# Test invalid driver ...
|
||||
# Test valid filter ...
|
||||
hf = host_filter.choose_host_filter(
|
||||
'nova.scheduler.host_filter.InstanceTypeFilter')
|
||||
self.assertEquals(hf._full_name(),
|
||||
'nova.scheduler.host_filter.InstanceTypeFilter')
|
||||
# Test invalid filter ...
|
||||
try:
|
||||
host_filter.choose_driver('does not exist')
|
||||
self.fail("Should not find driver")
|
||||
except exception.SchedulerHostFilterDriverNotFound:
|
||||
host_filter.choose_host_filter('does not exist')
|
||||
self.fail("Should not find host filter.")
|
||||
except exception.SchedulerHostFilterNotFound:
|
||||
pass
|
||||
|
||||
def test_all_host_driver(self):
|
||||
driver = host_filter.AllHostsFilter()
|
||||
cooked = driver.instance_type_to_filter(self.instance_type)
|
||||
hosts = driver.filter_hosts(self.zone_manager, cooked)
|
||||
def test_all_host_filter(self):
|
||||
hf = host_filter.AllHostsFilter()
|
||||
cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(10, len(hosts))
|
||||
for host, capabilities in hosts:
|
||||
self.assertTrue(host.startswith('host'))
|
||||
|
||||
def test_flavor_driver(self):
|
||||
driver = host_filter.FlavorFilter()
|
||||
def test_instance_type_filter(self):
|
||||
hf = host_filter.InstanceTypeFilter()
|
||||
# filter all hosts that can support 50 ram and 500 disk
|
||||
name, cooked = driver.instance_type_to_filter(self.instance_type)
|
||||
self.assertEquals('nova.scheduler.host_filter.FlavorFilter', name)
|
||||
hosts = driver.filter_hosts(self.zone_manager, cooked)
|
||||
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter',
|
||||
name)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(6, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
self.assertEquals('host05', just_hosts[0])
|
||||
self.assertEquals('host10', just_hosts[5])
|
||||
|
||||
def test_json_driver(self):
|
||||
driver = host_filter.JsonFilter()
|
||||
def test_json_filter(self):
|
||||
hf = host_filter.JsonFilter()
|
||||
# filter all hosts that can support 50 ram and 500 disk
|
||||
name, cooked = driver.instance_type_to_filter(self.instance_type)
|
||||
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
self.assertEquals('nova.scheduler.host_filter.JsonFilter', name)
|
||||
hosts = driver.filter_hosts(self.zone_manager, cooked)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(6, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
@@ -132,15 +133,16 @@ class HostFilterTestCase(test.TestCase):
|
||||
raw = ['or',
|
||||
['and',
|
||||
['<', '$compute.host_memory_free', 30],
|
||||
['<', '$compute.disk_available', 300]
|
||||
['<', '$compute.disk_available', 300],
|
||||
],
|
||||
['and',
|
||||
['>', '$compute.host_memory_free', 70],
|
||||
['>', '$compute.disk_available', 700]
|
||||
]
|
||||
['>', '$compute.disk_available', 700],
|
||||
],
|
||||
]
|
||||
|
||||
cooked = json.dumps(raw)
|
||||
hosts = driver.filter_hosts(self.zone_manager, cooked)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(5, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
@@ -152,7 +154,7 @@ class HostFilterTestCase(test.TestCase):
|
||||
['=', '$compute.host_memory_free', 30],
|
||||
]
|
||||
cooked = json.dumps(raw)
|
||||
hosts = driver.filter_hosts(self.zone_manager, cooked)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(9, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
@@ -162,7 +164,7 @@ class HostFilterTestCase(test.TestCase):
|
||||
|
||||
raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100]
|
||||
cooked = json.dumps(raw)
|
||||
hosts = driver.filter_hosts(self.zone_manager, cooked)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(5, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
@@ -174,35 +176,30 @@ class HostFilterTestCase(test.TestCase):
|
||||
raw = ['unknown command', ]
|
||||
cooked = json.dumps(raw)
|
||||
try:
|
||||
driver.filter_hosts(self.zone_manager, cooked)
|
||||
hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.fail("Should give KeyError")
|
||||
except KeyError, e:
|
||||
pass
|
||||
|
||||
self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps([])))
|
||||
self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps({})))
|
||||
self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps(
|
||||
['not', True, False, True, False]
|
||||
)))
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([])))
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({})))
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps(
|
||||
['not', True, False, True, False])))
|
||||
|
||||
try:
|
||||
driver.filter_hosts(self.zone_manager, json.dumps(
|
||||
'not', True, False, True, False
|
||||
))
|
||||
hf.filter_hosts(self.zone_manager, json.dumps(
|
||||
'not', True, False, True, False))
|
||||
self.fail("Should give KeyError")
|
||||
except KeyError, e:
|
||||
pass
|
||||
|
||||
self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
|
||||
['=', '$foo', 100]
|
||||
)))
|
||||
self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
|
||||
['=', '$.....', 100]
|
||||
)))
|
||||
self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
|
||||
['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]
|
||||
)))
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(['=', '$foo', 100])))
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(['=', '$.....', 100])))
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(
|
||||
['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]])))
|
||||
|
||||
self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
|
||||
['=', {}, ['>', '$missing....foo']]
|
||||
)))
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(['=', {}, ['>', '$missing....foo']])))
|
||||
|
||||
@@ -18,6 +18,7 @@ import eventlet
|
||||
import mox
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
from xml.etree.ElementTree import fromstring as xml_to_tree
|
||||
@@ -645,6 +646,8 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
except Exception, e:
|
||||
count = (0 <= str(e.message).find('Unexpected method call'))
|
||||
|
||||
shutil.rmtree(os.path.join(FLAGS.instances_path, instance.name))
|
||||
|
||||
self.assertTrue(count)
|
||||
|
||||
def test_get_host_ip_addr(self):
|
||||
|
||||
@@ -38,16 +38,16 @@ class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler):
|
||||
class FakeZoneManager(zone_manager.ZoneManager):
|
||||
def __init__(self):
|
||||
self.service_states = {
|
||||
'host1': {
|
||||
'compute': {'ram': 1000}
|
||||
},
|
||||
'host2': {
|
||||
'compute': {'ram': 2000}
|
||||
},
|
||||
'host3': {
|
||||
'compute': {'ram': 3000}
|
||||
}
|
||||
}
|
||||
'host1': {
|
||||
'compute': {'ram': 1000},
|
||||
},
|
||||
'host2': {
|
||||
'compute': {'ram': 2000},
|
||||
},
|
||||
'host3': {
|
||||
'compute': {'ram': 3000},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class FakeEmptyZoneManager(zone_manager.ZoneManager):
|
||||
@@ -116,4 +116,6 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
|
||||
sched.set_zone_manager(zm)
|
||||
|
||||
fake_context = {}
|
||||
self.assertRaises(driver.NoValidHost, sched.schedule, fake_context, {})
|
||||
self.assertRaises(driver.NoValidHost, sched.schedule_run_instance,
|
||||
fake_context, 1,
|
||||
dict(host_filter=None, instance_type={}))
|
||||
|
||||
@@ -82,6 +82,21 @@ class FakeConnection(driver.ComputeDriver):
|
||||
|
||||
def __init__(self):
|
||||
self.instances = {}
|
||||
self.host_status = {
|
||||
'host_name-description': 'Fake Host',
|
||||
'host_hostname': 'fake-mini',
|
||||
'host_memory_total': 8000000000,
|
||||
'host_memory_overhead': 10000000,
|
||||
'host_memory_free': 7900000000,
|
||||
'host_memory_free_computed': 7900000000,
|
||||
'host_other_config': {},
|
||||
'host_ip_address': '192.168.1.109',
|
||||
'host_cpu_info': {},
|
||||
'disk_available': 500000000000,
|
||||
'disk_total': 600000000000,
|
||||
'disk_used': 100000000000,
|
||||
'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f',
|
||||
'host_name_label': 'fake-mini'}
|
||||
|
||||
@classmethod
|
||||
def instance(cls):
|
||||
@@ -472,3 +487,11 @@ class FakeConnection(driver.ComputeDriver):
|
||||
def test_remove_vm(self, instance_name):
|
||||
""" Removes the named VM, as if it crashed. For testing"""
|
||||
self.instances.pop(instance_name)
|
||||
|
||||
def update_host_status(self):
|
||||
"""Return fake Host Status of ram, disk, network."""
|
||||
return self.host_status
|
||||
|
||||
def get_host_stats(self, refresh=False):
|
||||
"""Return fake Host Status of ram, disk, network."""
|
||||
return self.host_status
|
||||
|
||||
@@ -488,19 +488,27 @@ class LibvirtConnection(driver.ComputeDriver):
|
||||
|
||||
@exception.wrap_exception
|
||||
def pause(self, instance, callback):
|
||||
raise exception.ApiError("pause not supported for libvirt.")
|
||||
"""Pause VM instance"""
|
||||
dom = self._lookup_by_name(instance.name)
|
||||
dom.suspend()
|
||||
|
||||
@exception.wrap_exception
|
||||
def unpause(self, instance, callback):
|
||||
raise exception.ApiError("unpause not supported for libvirt.")
|
||||
"""Unpause paused VM instance"""
|
||||
dom = self._lookup_by_name(instance.name)
|
||||
dom.resume()
|
||||
|
||||
@exception.wrap_exception
|
||||
def suspend(self, instance, callback):
|
||||
raise exception.ApiError("suspend not supported for libvirt")
|
||||
"""Suspend the specified instance"""
|
||||
dom = self._lookup_by_name(instance.name)
|
||||
dom.managedSave(0)
|
||||
|
||||
@exception.wrap_exception
|
||||
def resume(self, instance, callback):
|
||||
raise exception.ApiError("resume not supported for libvirt")
|
||||
"""resume the specified instance"""
|
||||
dom = self._lookup_by_name(instance.name)
|
||||
dom.create()
|
||||
|
||||
@exception.wrap_exception
|
||||
def rescue(self, instance):
|
||||
|
||||
252
nova/wsgi.py
252
nova/wsgi.py
@@ -85,36 +85,7 @@ class Server(object):
|
||||
|
||||
|
||||
class Request(webob.Request):
|
||||
|
||||
def best_match_content_type(self):
|
||||
"""Determine the most acceptable content-type.
|
||||
|
||||
Based on the query extension then the Accept header.
|
||||
|
||||
"""
|
||||
parts = self.path.rsplit('.', 1)
|
||||
|
||||
if len(parts) > 1:
|
||||
format = parts[1]
|
||||
if format in ['json', 'xml']:
|
||||
return 'application/{0}'.format(parts[1])
|
||||
|
||||
ctypes = ['application/json', 'application/xml']
|
||||
bm = self.accept.best_match(ctypes)
|
||||
|
||||
return bm or 'application/json'
|
||||
|
||||
def get_content_type(self):
|
||||
allowed_types = ("application/xml", "application/json")
|
||||
if not "Content-Type" in self.headers:
|
||||
msg = _("Missing Content-Type")
|
||||
LOG.debug(msg)
|
||||
raise webob.exc.HTTPBadRequest(msg)
|
||||
type = self.content_type
|
||||
if type in allowed_types:
|
||||
return type
|
||||
LOG.debug(_("Wrong Content-Type: %s") % type)
|
||||
raise webob.exc.HTTPBadRequest("Invalid content type")
|
||||
pass
|
||||
|
||||
|
||||
class Application(object):
|
||||
@@ -289,8 +260,8 @@ class Router(object):
|
||||
|
||||
Each route in `mapper` must specify a 'controller', which is a
|
||||
WSGI app to call. You'll probably want to specify an 'action' as
|
||||
well and have your controller be a wsgi.Controller, who will route
|
||||
the request to the action method.
|
||||
well and have your controller be an object that can route
|
||||
the request to the action-specific method.
|
||||
|
||||
Examples:
|
||||
mapper = routes.Mapper()
|
||||
@@ -338,223 +309,6 @@ class Router(object):
|
||||
return app
|
||||
|
||||
|
||||
class Controller(object):
|
||||
"""WSGI app that dispatched to methods.
|
||||
|
||||
WSGI app that reads routing information supplied by RoutesMiddleware
|
||||
and calls the requested action method upon itself. All action methods
|
||||
must, in addition to their normal parameters, accept a 'req' argument
|
||||
which is the incoming wsgi.Request. They raise a webob.exc exception,
|
||||
or return a dict which will be serialized by requested content type.
|
||||
|
||||
"""
|
||||
|
||||
@webob.dec.wsgify(RequestClass=Request)
|
||||
def __call__(self, req):
|
||||
"""Call the method specified in req.environ by RoutesMiddleware."""
|
||||
arg_dict = req.environ['wsgiorg.routing_args'][1]
|
||||
action = arg_dict['action']
|
||||
method = getattr(self, action)
|
||||
LOG.debug("%s %s" % (req.method, req.url))
|
||||
del arg_dict['controller']
|
||||
del arg_dict['action']
|
||||
if 'format' in arg_dict:
|
||||
del arg_dict['format']
|
||||
arg_dict['req'] = req
|
||||
result = method(**arg_dict)
|
||||
|
||||
if type(result) is dict:
|
||||
content_type = req.best_match_content_type()
|
||||
default_xmlns = self.get_default_xmlns(req)
|
||||
body = self._serialize(result, content_type, default_xmlns)
|
||||
|
||||
response = webob.Response()
|
||||
response.headers['Content-Type'] = content_type
|
||||
response.body = body
|
||||
msg_dict = dict(url=req.url, status=response.status_int)
|
||||
msg = _("%(url)s returned with HTTP %(status)d") % msg_dict
|
||||
LOG.debug(msg)
|
||||
return response
|
||||
else:
|
||||
return result
|
||||
|
||||
def _serialize(self, data, content_type, default_xmlns):
|
||||
"""Serialize the given dict to the provided content_type.
|
||||
|
||||
Uses self._serialization_metadata if it exists, which is a dict mapping
|
||||
MIME types to information needed to serialize to that type.
|
||||
|
||||
"""
|
||||
_metadata = getattr(type(self), '_serialization_metadata', {})
|
||||
|
||||
serializer = Serializer(_metadata, default_xmlns)
|
||||
try:
|
||||
return serializer.serialize(data, content_type)
|
||||
except exception.InvalidContentType:
|
||||
raise webob.exc.HTTPNotAcceptable()
|
||||
|
||||
def _deserialize(self, data, content_type):
|
||||
"""Deserialize the request body to the specefied content type.
|
||||
|
||||
Uses self._serialization_metadata if it exists, which is a dict mapping
|
||||
MIME types to information needed to serialize to that type.
|
||||
|
||||
"""
|
||||
_metadata = getattr(type(self), '_serialization_metadata', {})
|
||||
serializer = Serializer(_metadata)
|
||||
return serializer.deserialize(data, content_type)
|
||||
|
||||
def get_default_xmlns(self, req):
|
||||
"""Provide the XML namespace to use if none is otherwise specified."""
|
||||
return None
|
||||
|
||||
|
||||
class Serializer(object):
|
||||
"""Serializes and deserializes dictionaries to certain MIME types."""
|
||||
|
||||
def __init__(self, metadata=None, default_xmlns=None):
|
||||
"""Create a serializer based on the given WSGI environment.
|
||||
|
||||
'metadata' is an optional dict mapping MIME types to information
|
||||
needed to serialize a dictionary to that type.
|
||||
|
||||
"""
|
||||
self.metadata = metadata or {}
|
||||
self.default_xmlns = default_xmlns
|
||||
|
||||
def _get_serialize_handler(self, content_type):
|
||||
handlers = {
|
||||
'application/json': self._to_json,
|
||||
'application/xml': self._to_xml,
|
||||
}
|
||||
|
||||
try:
|
||||
return handlers[content_type]
|
||||
except Exception:
|
||||
raise exception.InvalidContentType(content_type=content_type)
|
||||
|
||||
def serialize(self, data, content_type):
|
||||
"""Serialize a dictionary into the specified content type."""
|
||||
return self._get_serialize_handler(content_type)(data)
|
||||
|
||||
def deserialize(self, datastring, content_type):
|
||||
"""Deserialize a string to a dictionary.
|
||||
|
||||
The string must be in the format of a supported MIME type.
|
||||
|
||||
"""
|
||||
return self.get_deserialize_handler(content_type)(datastring)
|
||||
|
||||
def get_deserialize_handler(self, content_type):
|
||||
handlers = {
|
||||
'application/json': self._from_json,
|
||||
'application/xml': self._from_xml,
|
||||
}
|
||||
|
||||
try:
|
||||
return handlers[content_type]
|
||||
except Exception:
|
||||
raise exception.InvalidContentType(content_type=content_type)
|
||||
|
||||
def _from_json(self, datastring):
|
||||
return utils.loads(datastring)
|
||||
|
||||
def _from_xml(self, datastring):
|
||||
xmldata = self.metadata.get('application/xml', {})
|
||||
plurals = set(xmldata.get('plurals', {}))
|
||||
node = minidom.parseString(datastring).childNodes[0]
|
||||
return {node.nodeName: self._from_xml_node(node, plurals)}
|
||||
|
||||
def _from_xml_node(self, node, listnames):
|
||||
"""Convert a minidom node to a simple Python type.
|
||||
|
||||
listnames is a collection of names of XML nodes whose subnodes should
|
||||
be considered list items.
|
||||
|
||||
"""
|
||||
if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3:
|
||||
return node.childNodes[0].nodeValue
|
||||
elif node.nodeName in listnames:
|
||||
return [self._from_xml_node(n, listnames) for n in node.childNodes]
|
||||
else:
|
||||
result = dict()
|
||||
for attr in node.attributes.keys():
|
||||
result[attr] = node.attributes[attr].nodeValue
|
||||
for child in node.childNodes:
|
||||
if child.nodeType != node.TEXT_NODE:
|
||||
result[child.nodeName] = self._from_xml_node(child,
|
||||
listnames)
|
||||
return result
|
||||
|
||||
def _to_json(self, data):
|
||||
return utils.dumps(data)
|
||||
|
||||
def _to_xml(self, data):
|
||||
metadata = self.metadata.get('application/xml', {})
|
||||
# We expect data to contain a single key which is the XML root.
|
||||
root_key = data.keys()[0]
|
||||
doc = minidom.Document()
|
||||
node = self._to_xml_node(doc, metadata, root_key, data[root_key])
|
||||
|
||||
xmlns = node.getAttribute('xmlns')
|
||||
if not xmlns and self.default_xmlns:
|
||||
node.setAttribute('xmlns', self.default_xmlns)
|
||||
|
||||
return node.toprettyxml(indent=' ')
|
||||
|
||||
def _to_xml_node(self, doc, metadata, nodename, data):
|
||||
"""Recursive method to convert data members to XML nodes."""
|
||||
result = doc.createElement(nodename)
|
||||
|
||||
# Set the xml namespace if one is specified
|
||||
# TODO(justinsb): We could also use prefixes on the keys
|
||||
xmlns = metadata.get('xmlns', None)
|
||||
if xmlns:
|
||||
result.setAttribute('xmlns', xmlns)
|
||||
|
||||
if type(data) is list:
|
||||
collections = metadata.get('list_collections', {})
|
||||
if nodename in collections:
|
||||
metadata = collections[nodename]
|
||||
for item in data:
|
||||
node = doc.createElement(metadata['item_name'])
|
||||
node.setAttribute(metadata['item_key'], str(item))
|
||||
result.appendChild(node)
|
||||
return result
|
||||
singular = metadata.get('plurals', {}).get(nodename, None)
|
||||
if singular is None:
|
||||
if nodename.endswith('s'):
|
||||
singular = nodename[:-1]
|
||||
else:
|
||||
singular = 'item'
|
||||
for item in data:
|
||||
node = self._to_xml_node(doc, metadata, singular, item)
|
||||
result.appendChild(node)
|
||||
elif type(data) is dict:
|
||||
collections = metadata.get('dict_collections', {})
|
||||
if nodename in collections:
|
||||
metadata = collections[nodename]
|
||||
for k, v in data.items():
|
||||
node = doc.createElement(metadata['item_name'])
|
||||
node.setAttribute(metadata['item_key'], str(k))
|
||||
text = doc.createTextNode(str(v))
|
||||
node.appendChild(text)
|
||||
result.appendChild(node)
|
||||
return result
|
||||
attrs = metadata.get('attributes', {}).get(nodename, {})
|
||||
for k, v in data.items():
|
||||
if k in attrs:
|
||||
result.setAttribute(k, str(v))
|
||||
else:
|
||||
node = self._to_xml_node(doc, metadata, k, v)
|
||||
result.appendChild(node)
|
||||
else:
|
||||
# Type is atom
|
||||
node = doc.createTextNode(str(data))
|
||||
result.appendChild(node)
|
||||
return result
|
||||
|
||||
|
||||
def paste_config_file(basename):
|
||||
"""Find the best location in the system for a paste config file.
|
||||
|
||||
|
||||
96
plugins/xenserver/networking/etc/init.d/openvswitch-nova
Executable file
96
plugins/xenserver/networking/etc/init.d/openvswitch-nova
Executable file
@@ -0,0 +1,96 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# openvswitch-nova
|
||||
#
|
||||
# chkconfig: 2345 96 89
|
||||
# description: Apply initial OVS flows for Nova
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# Copyright (C) 2009, 2010, 2011 Nicira Networks, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# source function library
|
||||
if [ -f /etc/init.d/functions ]; then
|
||||
. /etc/init.d/functions
|
||||
elif [ -f /etc/rc.d/init.d/functions ]; then
|
||||
. /etc/rc.d/init.d/functions
|
||||
elif [ -f /lib/lsb/init-functions ]; then
|
||||
. /lib/lsb/init-functions
|
||||
else
|
||||
echo "$0: missing LSB shell function library" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
OVS_CONFIGURE_BASE_FLOWS=/etc/xensource/scripts/ovs_configure_base_flows.py
|
||||
|
||||
if test -e /etc/sysconfig/openvswitch-nova; then
|
||||
. /etc/sysconfig/openvswitch-nova
|
||||
else
|
||||
echo "$0: missing configuration file: /etc/sysconfig/openvswitch-nova"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if test -e /etc/xensource/network.conf; then
|
||||
NETWORK_MODE=$(cat /etc/xensource/network.conf)
|
||||
fi
|
||||
|
||||
case ${NETWORK_MODE:=openvswitch} in
|
||||
vswitch|openvswitch)
|
||||
;;
|
||||
bridge)
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "Open vSwitch disabled (/etc/xensource/network.conf is invalid)" >&2
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
|
||||
function run_ovs_conf_base_flows {
|
||||
# expected format: DEVICE_BRIDGES="eth0:xenbr0 eth1:xenbr1"
|
||||
for pair in $DEVICE_BRIDGES; do
|
||||
# below in $info, physical device is [0], bridge name is [1]
|
||||
info=${pair//:/ }
|
||||
/usr/bin/python $OVS_CONFIGURE_BASE_FLOWS $1 ${info[0]} ${info[1]}
|
||||
done
|
||||
}
|
||||
|
||||
function start {
|
||||
run_ovs_conf_base_flows online
|
||||
}
|
||||
|
||||
function stop {
|
||||
run_ovs_conf_base_flows offline
|
||||
}
|
||||
|
||||
function restart {
|
||||
run_ovs_conf_base_flows reset
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
start
|
||||
;;
|
||||
stop)
|
||||
stop
|
||||
;;
|
||||
restart)
|
||||
restart
|
||||
;;
|
||||
*)
|
||||
echo "usage: openvswitch-nova [start|stop|restart]"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
@@ -0,0 +1 @@
|
||||
#DEVICE_BRIDGES="eth0:xenbr0 eth1:xenbr1"
|
||||
@@ -0,0 +1,3 @@
|
||||
SUBSYSTEM=="xen-backend", KERNEL=="vif*", RUN+="/etc/xensource/scripts/ovs_configure_vif_flows.py $env{ACTION} %k all"
|
||||
# is this one needed?
|
||||
#SUBSYSTEM=="net", KERNEL=="tap*", RUN+="/etc/xensource/scripts/ovs_configure_vif_flows.py $env{ACTION} %k all"
|
||||
@@ -0,0 +1,40 @@
|
||||
#!/usr/bin/env python
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
|
||||
def execute_get_output(*command):
|
||||
"""Execute and return stdout"""
|
||||
devnull = open(os.devnull, 'w')
|
||||
command = map(str, command)
|
||||
proc = subprocess.Popen(command, close_fds=True,
|
||||
stdout=subprocess.PIPE, stderr=devnull)
|
||||
devnull.close()
|
||||
return proc.stdout.read().strip()
|
||||
|
||||
|
||||
def execute(*command):
|
||||
"""Execute without returning stdout"""
|
||||
devnull = open(os.devnull, 'w')
|
||||
command = map(str, command)
|
||||
proc = subprocess.Popen(command, close_fds=True,
|
||||
stdout=subprocess.PIPE, stderr=devnull)
|
||||
devnull.close()
|
||||
@@ -0,0 +1,62 @@
|
||||
#!/usr/bin/env python
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
This script is used to configure base openvswitch flows for XenServer hosts.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
from novalib import execute, execute_get_output
|
||||
|
||||
|
||||
def main(command, phys_dev_name, bridge_name):
|
||||
ovs_ofctl = lambda *rule: execute('/usr/bin/ovs-ofctl', *rule)
|
||||
|
||||
# always clear all flows first
|
||||
ovs_ofctl('del-flows', bridge_name)
|
||||
|
||||
if command in ('online', 'reset'):
|
||||
pnic_ofport = execute_get_output('/usr/bin/ovs-vsctl', 'get',
|
||||
'Interface', phys_dev_name, 'ofport')
|
||||
|
||||
# these flows are lower priority than all VM-specific flows.
|
||||
|
||||
# allow all traffic from the physical NIC, as it is trusted (i.e.,
|
||||
# from a filtered vif, or from the physical infrastructure)
|
||||
ovs_ofctl('add-flow', bridge_name,
|
||||
"priority=2,in_port=%s,actions=normal" % pnic_ofport)
|
||||
|
||||
# default drop
|
||||
ovs_ofctl('add-flow', bridge_name, 'priority=1,actions=drop')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) != 4 or sys.argv[1] not in ('online', 'offline', 'reset'):
|
||||
print sys.argv
|
||||
script_name = os.path.basename(sys.argv[0])
|
||||
print "This script configures base ovs flows."
|
||||
print "usage: %s [online|offline|reset] phys-dev-name bridge-name" \
|
||||
% script_name
|
||||
print " ex: %s online eth0 xenbr0" % script_name
|
||||
sys.exit(1)
|
||||
else:
|
||||
command, phys_dev_name, bridge_name = sys.argv[1:4]
|
||||
main(command, phys_dev_name, bridge_name)
|
||||
180
plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py
Executable file
180
plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py
Executable file
@@ -0,0 +1,180 @@
|
||||
#!/usr/bin/env python
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
This script is used to configure openvswitch flows on XenServer hosts.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
# This is written to Python 2.4, since that is what is available on XenServer
|
||||
import netaddr
|
||||
import simplejson as json
|
||||
|
||||
from novalib import execute, execute_get_output
|
||||
|
||||
|
||||
OVS_OFCTL = '/usr/bin/ovs-ofctl'
|
||||
|
||||
|
||||
class OvsFlow(object):
|
||||
def __init__(self, bridge, params):
|
||||
self.bridge = bridge
|
||||
self.params = params
|
||||
|
||||
def add(self, rule):
|
||||
execute(OVS_OFCTL, 'add-flow', self.bridge, rule % self.params)
|
||||
|
||||
def clear_flows(self, ofport):
|
||||
execute(OVS_OFCTL, 'del-flows', self.bridge, "in_port=%s" % ofport)
|
||||
|
||||
|
||||
def main(command, vif_raw, net_type):
|
||||
if command not in ('online', 'offline'):
|
||||
return
|
||||
|
||||
vif_name, dom_id, vif_index = vif_raw.split('-')
|
||||
vif = "%s%s.%s" % (vif_name, dom_id, vif_index)
|
||||
bridge = "xenbr%s" % vif_index
|
||||
|
||||
xsls = execute_get_output('/usr/bin/xenstore-ls',
|
||||
'/local/domain/%s/vm-data/networking' % dom_id)
|
||||
macs = [line.split("=")[0].strip() for line in xsls.splitlines()]
|
||||
|
||||
for mac in macs:
|
||||
xsread = execute_get_output('/usr/bin/xenstore-read',
|
||||
'/local/domain/%s/vm-data/networking/%s' %
|
||||
(dom_id, mac))
|
||||
data = json.loads(xsread)
|
||||
if data["label"] == "public":
|
||||
this_vif = "vif%s.0" % dom_id
|
||||
else:
|
||||
this_vif = "vif%s.1" % dom_id
|
||||
|
||||
if vif == this_vif:
|
||||
vif_ofport = execute_get_output('/usr/bin/ovs-vsctl', 'get',
|
||||
'Interface', vif, 'ofport')
|
||||
|
||||
params = dict(VIF_NAME=vif,
|
||||
MAC=data['mac'],
|
||||
OF_PORT=vif_ofport)
|
||||
|
||||
ovs = OvsFlow(bridge, params)
|
||||
|
||||
if command == 'offline':
|
||||
# I haven't found a way to clear only IPv4 or IPv6 rules.
|
||||
ovs.clear_flows(vif_ofport)
|
||||
|
||||
if command == 'online':
|
||||
if net_type in ('ipv4', 'all') and 'ips' in data:
|
||||
for ip4 in data['ips']:
|
||||
ovs.params.update({'IPV4_ADDR': ip4['ip']})
|
||||
apply_ovs_ipv4_flows(ovs, bridge, params)
|
||||
if net_type in ('ipv6', 'all') and 'ip6s' in data:
|
||||
for ip6 in data['ip6s']:
|
||||
link_local = str(netaddr.EUI(data['mac']).eui64()\
|
||||
.ipv6_link_local())
|
||||
ovs.params.update({'IPV6_LINK_LOCAL_ADDR': link_local})
|
||||
ovs.params.update({'IPV6_GLOBAL_ADDR': ip6['ip']})
|
||||
apply_ovs_ipv6_flows(ovs, bridge, params)
|
||||
|
||||
|
||||
def apply_ovs_ipv4_flows(ovs, bridge, params):
|
||||
# allow valid ARP outbound (both request / reply)
|
||||
ovs.add("priority=3,in_port=%(OF_PORT)s,dl_src=%(MAC)s,arp,"
|
||||
"arp_sha=%(MAC)s,nw_src=%(IPV4_ADDR)s,actions=normal")
|
||||
|
||||
ovs.add("priority=3,in_port=%(OF_PORT)s,dl_src=%(MAC)s,arp,"
|
||||
"arp_sha=%(MAC)s,nw_src=0.0.0.0,actions=normal")
|
||||
|
||||
# allow valid IPv4 outbound
|
||||
ovs.add("priority=3,in_port=%(OF_PORT)s,dl_src=%(MAC)s,ip,"
|
||||
"nw_src=%(IPV4_ADDR)s,actions=normal")
|
||||
|
||||
|
||||
def apply_ovs_ipv6_flows(ovs, bridge, params):
|
||||
# allow valid IPv6 ND outbound (are both global and local IPs needed?)
|
||||
# Neighbor Solicitation
|
||||
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
|
||||
"ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=135,nd_sll=%(MAC)s,"
|
||||
"actions=normal")
|
||||
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
|
||||
"ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=135,actions=normal")
|
||||
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
|
||||
"ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=135,nd_sll=%(MAC)s,"
|
||||
"actions=normal")
|
||||
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
|
||||
"ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=135,actions=normal")
|
||||
|
||||
# Neighbor Advertisement
|
||||
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
|
||||
"ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=136,"
|
||||
"nd_target=%(IPV6_LINK_LOCAL_ADDR)s,actions=normal")
|
||||
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
|
||||
"ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=136,actions=normal")
|
||||
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
|
||||
"ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=136,"
|
||||
"nd_target=%(IPV6_GLOBAL_ADDR)s,actions=normal")
|
||||
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
|
||||
"ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=136,actions=normal")
|
||||
|
||||
# drop all other neighbor discovery (req b/c we permit all icmp6 below)
|
||||
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=135,actions=drop")
|
||||
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=136,actions=drop")
|
||||
|
||||
# do not allow sending specifc ICMPv6 types
|
||||
# Router Advertisement
|
||||
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=134,actions=drop")
|
||||
# Redirect Gateway
|
||||
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=137,actions=drop")
|
||||
# Mobile Prefix Solicitation
|
||||
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=146,actions=drop")
|
||||
# Mobile Prefix Advertisement
|
||||
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=147,actions=drop")
|
||||
# Multicast Router Advertisement
|
||||
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=151,actions=drop")
|
||||
# Multicast Router Solicitation
|
||||
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=152,actions=drop")
|
||||
# Multicast Router Termination
|
||||
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=153,actions=drop")
|
||||
|
||||
# allow valid IPv6 outbound, by type
|
||||
ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
|
||||
"ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp6,actions=normal")
|
||||
ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
|
||||
"ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp6,actions=normal")
|
||||
ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
|
||||
"ipv6_src=%(IPV6_GLOBAL_ADDR)s,tcp6,actions=normal")
|
||||
ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
|
||||
"ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,tcp6,actions=normal")
|
||||
ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
|
||||
"ipv6_src=%(IPV6_GLOBAL_ADDR)s,udp6,actions=normal")
|
||||
ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
|
||||
"ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,udp6,actions=normal")
|
||||
# all else will be dropped ...
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) != 4:
|
||||
print "usage: %s [online|offline] vif-domid-idx [ipv4|ipv6|all] " % \
|
||||
os.path.basename(sys.argv[0])
|
||||
sys.exit(1)
|
||||
else:
|
||||
command, vif_raw, net_type = sys.argv[1:4]
|
||||
main(command, vif_raw, net_type)
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 OpenStack LLC.
|
||||
# Copyright 2010-2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
@@ -29,15 +29,18 @@ import sys
|
||||
import simplejson as json
|
||||
|
||||
|
||||
from novalib import execute, execute_get_output
|
||||
|
||||
|
||||
def main(dom_id, command, only_this_vif=None):
|
||||
xsls = execute('/usr/bin/xenstore-ls',
|
||||
'/local/domain/%s/vm-data/networking' % dom_id, True)
|
||||
xsls = execute_get_output('/usr/bin/xenstore-ls',
|
||||
'/local/domain/%s/vm-data/networking' % dom_id)
|
||||
macs = [line.split("=")[0].strip() for line in xsls.splitlines()]
|
||||
|
||||
for mac in macs:
|
||||
xsread = execute('/usr/bin/enstore-read',
|
||||
'/local/domain/%s/vm-data/networking/%s' %
|
||||
(dom_id, mac), True)
|
||||
xsread = execute_get_output('/usr/bin/xenstore-read',
|
||||
'/local/domain/%s/vm-data/networking/%s' %
|
||||
(dom_id, mac))
|
||||
data = json.loads(xsread)
|
||||
for ip in data['ips']:
|
||||
if data["label"] == "public":
|
||||
@@ -52,17 +55,6 @@ def main(dom_id, command, only_this_vif=None):
|
||||
apply_iptables_rules(command, params)
|
||||
|
||||
|
||||
def execute(*command, return_stdout=False):
|
||||
devnull = open(os.devnull, 'w')
|
||||
command = map(str, command)
|
||||
proc = subprocess.Popen(command, close_fds=True,
|
||||
stdout=subprocess.PIPE, stderr=devnull)
|
||||
devnull.close()
|
||||
if return_stdout:
|
||||
return proc.stdout.read()
|
||||
else:
|
||||
return None
|
||||
|
||||
# A note about adding rules:
|
||||
# Whenever we add any rule to iptables, arptables or ebtables we first
|
||||
# delete the same rule to ensure the rule only exists once.
|
||||
@@ -113,8 +105,8 @@ def apply_ebtables_rules(command, params):
|
||||
ebtables('-D', 'FORWARD', '-p', '0806', '-o', params['VIF'],
|
||||
'--arp-ip-dst', params['IP'],
|
||||
'-j', 'ACCEPT')
|
||||
ebtables('-D', 'FORWARD', '-p', '0800', '-o',
|
||||
params['VIF'], '--ip-dst', params['IP'],
|
||||
ebtables('-D', 'FORWARD', '-p', '0800', '-o', params['VIF'],
|
||||
'--ip-dst', params['IP'],
|
||||
'-j', 'ACCEPT')
|
||||
if command == 'online':
|
||||
ebtables('-A', 'FORWARD', '-p', '0806',
|
||||
|
||||
@@ -36,7 +36,7 @@ PY_VERSION = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
|
||||
|
||||
|
||||
def die(message, *args):
|
||||
print >>sys.stderr, message % args
|
||||
print >> sys.stderr, message % args
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user