remerge trunk (again). fix issues caused by changes to deserialization calls on controllers.
This commit is contained in:
166
bin/nova-manage
166
bin/nova-manage
@@ -55,6 +55,8 @@
|
|||||||
|
|
||||||
import datetime
|
import datetime
|
||||||
import gettext
|
import gettext
|
||||||
|
import glob
|
||||||
|
import json
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
@@ -81,7 +83,7 @@ from nova import log as logging
|
|||||||
from nova import quota
|
from nova import quota
|
||||||
from nova import rpc
|
from nova import rpc
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova.api.ec2.cloud import ec2_id_to_id
|
from nova.api.ec2 import ec2utils
|
||||||
from nova.auth import manager
|
from nova.auth import manager
|
||||||
from nova.cloudpipe import pipelib
|
from nova.cloudpipe import pipelib
|
||||||
from nova.compute import instance_types
|
from nova.compute import instance_types
|
||||||
@@ -94,6 +96,7 @@ flags.DECLARE('network_size', 'nova.network.manager')
|
|||||||
flags.DECLARE('vlan_start', 'nova.network.manager')
|
flags.DECLARE('vlan_start', 'nova.network.manager')
|
||||||
flags.DECLARE('vpn_start', 'nova.network.manager')
|
flags.DECLARE('vpn_start', 'nova.network.manager')
|
||||||
flags.DECLARE('fixed_range_v6', 'nova.network.manager')
|
flags.DECLARE('fixed_range_v6', 'nova.network.manager')
|
||||||
|
flags.DECLARE('images_path', 'nova.image.local')
|
||||||
flags.DEFINE_flag(flags.HelpFlag())
|
flags.DEFINE_flag(flags.HelpFlag())
|
||||||
flags.DEFINE_flag(flags.HelpshortFlag())
|
flags.DEFINE_flag(flags.HelpshortFlag())
|
||||||
flags.DEFINE_flag(flags.HelpXMLFlag())
|
flags.DEFINE_flag(flags.HelpXMLFlag())
|
||||||
@@ -104,7 +107,7 @@ def param2id(object_id):
|
|||||||
args: [object_id], e.g. 'vol-0000000a' or 'volume-0000000a' or '10'
|
args: [object_id], e.g. 'vol-0000000a' or 'volume-0000000a' or '10'
|
||||||
"""
|
"""
|
||||||
if '-' in object_id:
|
if '-' in object_id:
|
||||||
return ec2_id_to_id(object_id)
|
return ec2utils.ec2_id_to_id(object_id)
|
||||||
else:
|
else:
|
||||||
return int(object_id)
|
return int(object_id)
|
||||||
|
|
||||||
@@ -547,6 +550,15 @@ class NetworkCommands(object):
|
|||||||
network.dhcp_start,
|
network.dhcp_start,
|
||||||
network.dns)
|
network.dns)
|
||||||
|
|
||||||
|
def delete(self, fixed_range):
|
||||||
|
"""Deletes a network"""
|
||||||
|
network = db.network_get_by_cidr(context.get_admin_context(), \
|
||||||
|
fixed_range)
|
||||||
|
if network.project_id is not None:
|
||||||
|
raise ValueError(_('Network must be disassociated from project %s'
|
||||||
|
' before delete' % network.project_id))
|
||||||
|
db.network_delete_safe(context.get_admin_context(), network.id)
|
||||||
|
|
||||||
|
|
||||||
class ServiceCommands(object):
|
class ServiceCommands(object):
|
||||||
"""Enable and disable running services"""
|
"""Enable and disable running services"""
|
||||||
@@ -737,6 +749,155 @@ class InstanceTypeCommands(object):
|
|||||||
self._print_instance_types(name, inst_types)
|
self._print_instance_types(name, inst_types)
|
||||||
|
|
||||||
|
|
||||||
|
class ImageCommands(object):
|
||||||
|
"""Methods for dealing with a cloud in an odd state"""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
self.image_service = utils.import_object(FLAGS.image_service)
|
||||||
|
|
||||||
|
def _register(self, image_type, disk_format, container_format,
|
||||||
|
path, owner, name=None, is_public='T',
|
||||||
|
architecture='x86_64', kernel_id=None, ramdisk_id=None):
|
||||||
|
meta = {'is_public': True,
|
||||||
|
'name': name,
|
||||||
|
'disk_format': disk_format,
|
||||||
|
'container_format': container_format,
|
||||||
|
'properties': {'image_state': 'available',
|
||||||
|
'owner': owner,
|
||||||
|
'type': image_type,
|
||||||
|
'architecture': architecture,
|
||||||
|
'image_location': 'local',
|
||||||
|
'is_public': (is_public == 'T')}}
|
||||||
|
print image_type, meta
|
||||||
|
if kernel_id:
|
||||||
|
meta['properties']['kernel_id'] = int(kernel_id)
|
||||||
|
if ramdisk_id:
|
||||||
|
meta['properties']['ramdisk_id'] = int(ramdisk_id)
|
||||||
|
elevated = context.get_admin_context()
|
||||||
|
try:
|
||||||
|
with open(path) as ifile:
|
||||||
|
image = self.image_service.create(elevated, meta, ifile)
|
||||||
|
new = image['id']
|
||||||
|
print _("Image registered to %(new)s (%(new)08x).") % locals()
|
||||||
|
return new
|
||||||
|
except Exception as exc:
|
||||||
|
print _("Failed to register %(path)s: %(exc)s") % locals()
|
||||||
|
|
||||||
|
def all_register(self, image, kernel, ramdisk, owner, name=None,
|
||||||
|
is_public='T', architecture='x86_64'):
|
||||||
|
"""Uploads an image, kernel, and ramdisk into the image_service
|
||||||
|
arguments: image kernel ramdisk owner [name] [is_public='T']
|
||||||
|
[architecture='x86_64']"""
|
||||||
|
kernel_id = self.kernel_register(kernel, owner, None,
|
||||||
|
is_public, architecture)
|
||||||
|
ramdisk_id = self.ramdisk_register(ramdisk, owner, None,
|
||||||
|
is_public, architecture)
|
||||||
|
self.image_register(image, owner, name, is_public,
|
||||||
|
architecture, kernel_id, ramdisk_id)
|
||||||
|
|
||||||
|
def image_register(self, path, owner, name=None, is_public='T',
|
||||||
|
architecture='x86_64', kernel_id=None, ramdisk_id=None,
|
||||||
|
disk_format='ami', container_format='ami'):
|
||||||
|
"""Uploads an image into the image_service
|
||||||
|
arguments: path owner [name] [is_public='T'] [architecture='x86_64']
|
||||||
|
[kernel_id=None] [ramdisk_id=None]
|
||||||
|
[disk_format='ami'] [container_format='ami']"""
|
||||||
|
return self._register('machine', disk_format, container_format, path,
|
||||||
|
owner, name, is_public, architecture,
|
||||||
|
kernel_id, ramdisk_id)
|
||||||
|
|
||||||
|
def kernel_register(self, path, owner, name=None, is_public='T',
|
||||||
|
architecture='x86_64'):
|
||||||
|
"""Uploads a kernel into the image_service
|
||||||
|
arguments: path owner [name] [is_public='T'] [architecture='x86_64']
|
||||||
|
"""
|
||||||
|
return self._register('kernel', 'aki', 'aki', path, owner, name,
|
||||||
|
is_public, architecture)
|
||||||
|
|
||||||
|
def ramdisk_register(self, path, owner, name=None, is_public='T',
|
||||||
|
architecture='x86_64'):
|
||||||
|
"""Uploads a ramdisk into the image_service
|
||||||
|
arguments: path owner [name] [is_public='T'] [architecture='x86_64']
|
||||||
|
"""
|
||||||
|
return self._register('ramdisk', 'ari', 'ari', path, owner, name,
|
||||||
|
is_public, architecture)
|
||||||
|
|
||||||
|
def _lookup(self, old_image_id):
|
||||||
|
try:
|
||||||
|
internal_id = ec2utils.ec2_id_to_id(old_image_id)
|
||||||
|
image = self.image_service.show(context, internal_id)
|
||||||
|
except exception.NotFound:
|
||||||
|
image = self.image_service.show_by_name(context, old_image_id)
|
||||||
|
return image['id']
|
||||||
|
|
||||||
|
def _old_to_new(self, old):
|
||||||
|
mapping = {'machine': 'ami',
|
||||||
|
'kernel': 'aki',
|
||||||
|
'ramdisk': 'ari'}
|
||||||
|
container_format = mapping[old['type']]
|
||||||
|
disk_format = container_format
|
||||||
|
new = {'disk_format': disk_format,
|
||||||
|
'container_format': container_format,
|
||||||
|
'is_public': True,
|
||||||
|
'name': old['imageId'],
|
||||||
|
'properties': {'image_state': old['imageState'],
|
||||||
|
'owner': old['imageOwnerId'],
|
||||||
|
'architecture': old['architecture'],
|
||||||
|
'type': old['type'],
|
||||||
|
'image_location': old['imageLocation'],
|
||||||
|
'is_public': old['isPublic']}}
|
||||||
|
if old.get('kernelId'):
|
||||||
|
new['properties']['kernel_id'] = self._lookup(old['kernelId'])
|
||||||
|
if old.get('ramdiskId'):
|
||||||
|
new['properties']['ramdisk_id'] = self._lookup(old['ramdiskId'])
|
||||||
|
return new
|
||||||
|
|
||||||
|
def _convert_images(self, images):
|
||||||
|
elevated = context.get_admin_context()
|
||||||
|
for image_path, image_metadata in images.iteritems():
|
||||||
|
meta = self._old_to_new(image_metadata)
|
||||||
|
old = meta['name']
|
||||||
|
try:
|
||||||
|
with open(image_path) as ifile:
|
||||||
|
image = self.image_service.create(elevated, meta, ifile)
|
||||||
|
new = image['id']
|
||||||
|
print _("Image %(old)s converted to " \
|
||||||
|
"%(new)s (%(new)08x).") % locals()
|
||||||
|
except Exception as exc:
|
||||||
|
print _("Failed to convert %(old)s: %(exc)s") % locals()
|
||||||
|
|
||||||
|
def convert(self, directory):
|
||||||
|
"""Uploads old objectstore images in directory to new service
|
||||||
|
arguments: directory"""
|
||||||
|
machine_images = {}
|
||||||
|
other_images = {}
|
||||||
|
directory = os.path.abspath(directory)
|
||||||
|
# NOTE(vish): If we're importing from the images path dir, attempt
|
||||||
|
# to move the files out of the way before importing
|
||||||
|
# so we aren't writing to the same directory. This
|
||||||
|
# may fail if the dir was a mointpoint.
|
||||||
|
if (FLAGS.image_service == 'nova.image.local.LocalImageService'
|
||||||
|
and directory == os.path.abspath(FLAGS.images_path)):
|
||||||
|
new_dir = "%s_bak" % directory
|
||||||
|
os.move(directory, new_dir)
|
||||||
|
os.mkdir(directory)
|
||||||
|
directory = new_dir
|
||||||
|
for fn in glob.glob("%s/*/info.json" % directory):
|
||||||
|
try:
|
||||||
|
image_path = os.path.join(fn.rpartition('/')[0], 'image')
|
||||||
|
with open(fn) as metadata_file:
|
||||||
|
image_metadata = json.load(metadata_file)
|
||||||
|
if image_metadata['type'] == 'machine':
|
||||||
|
machine_images[image_path] = image_metadata
|
||||||
|
else:
|
||||||
|
other_images[image_path] = image_metadata
|
||||||
|
except Exception as exc:
|
||||||
|
print _("Failed to load %(fn)s.") % locals()
|
||||||
|
# NOTE(vish): do kernels and ramdisks first so images
|
||||||
|
self._convert_images(other_images)
|
||||||
|
self._convert_images(machine_images)
|
||||||
|
|
||||||
|
|
||||||
CATEGORIES = [
|
CATEGORIES = [
|
||||||
('user', UserCommands),
|
('user', UserCommands),
|
||||||
('account', AccountCommands),
|
('account', AccountCommands),
|
||||||
@@ -752,6 +913,7 @@ CATEGORIES = [
|
|||||||
('db', DbCommands),
|
('db', DbCommands),
|
||||||
('volume', VolumeCommands),
|
('volume', VolumeCommands),
|
||||||
('instance_type', InstanceTypeCommands),
|
('instance_type', InstanceTypeCommands),
|
||||||
|
('image', ImageCommands),
|
||||||
('flavor', InstanceTypeCommands)]
|
('flavor', InstanceTypeCommands)]
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -166,9 +166,6 @@ NOVA_CONF_EOF
|
|||||||
$NOVA_DIR/bin/nova-manage user admin admin admin admin
|
$NOVA_DIR/bin/nova-manage user admin admin admin admin
|
||||||
# create a project called 'admin' with project manager of 'admin'
|
# create a project called 'admin' with project manager of 'admin'
|
||||||
$NOVA_DIR/bin/nova-manage project create admin admin
|
$NOVA_DIR/bin/nova-manage project create admin admin
|
||||||
# export environment variables for project 'admin' and user 'admin'
|
|
||||||
$NOVA_DIR/bin/nova-manage project zipfile admin admin $NOVA_DIR/nova.zip
|
|
||||||
unzip -o $NOVA_DIR/nova.zip -d $NOVA_DIR/
|
|
||||||
# create a small network
|
# create a small network
|
||||||
$NOVA_DIR/bin/nova-manage network create 10.0.0.0/8 1 32
|
$NOVA_DIR/bin/nova-manage network create 10.0.0.0/8 1 32
|
||||||
|
|
||||||
@@ -184,6 +181,11 @@ NOVA_CONF_EOF
|
|||||||
screen_it scheduler "$NOVA_DIR/bin/nova-scheduler"
|
screen_it scheduler "$NOVA_DIR/bin/nova-scheduler"
|
||||||
screen_it volume "$NOVA_DIR/bin/nova-volume"
|
screen_it volume "$NOVA_DIR/bin/nova-volume"
|
||||||
screen_it ajax_console_proxy "$NOVA_DIR/bin/nova-ajax-console-proxy"
|
screen_it ajax_console_proxy "$NOVA_DIR/bin/nova-ajax-console-proxy"
|
||||||
|
sleep 2
|
||||||
|
# export environment variables for project 'admin' and user 'admin'
|
||||||
|
$NOVA_DIR/bin/nova-manage project zipfile admin admin $NOVA_DIR/nova.zip
|
||||||
|
unzip -o $NOVA_DIR/nova.zip -d $NOVA_DIR/
|
||||||
|
|
||||||
screen_it test ". $NOVA_DIR/novarc"
|
screen_it test ". $NOVA_DIR/novarc"
|
||||||
screen -S nova -x
|
screen -S nova -x
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -8,5 +8,6 @@ from nova import utils
|
|||||||
def setup(app):
|
def setup(app):
|
||||||
rootdir = os.path.abspath(app.srcdir + '/..')
|
rootdir = os.path.abspath(app.srcdir + '/..')
|
||||||
print "**Autodocumenting from %s" % rootdir
|
print "**Autodocumenting from %s" % rootdir
|
||||||
rv = utils.execute('cd %s && ./generate_autodoc_index.sh' % rootdir)
|
os.chdir(rootdir)
|
||||||
|
rv = utils.execute('./generate_autodoc_index.sh')
|
||||||
print rv[0]
|
print rv[0]
|
||||||
|
|||||||
@@ -173,7 +173,10 @@ Nova Floating IPs
|
|||||||
``nova-manage floating create <host> <ip_range>``
|
``nova-manage floating create <host> <ip_range>``
|
||||||
|
|
||||||
Creates floating IP addresses for the named host by the given range.
|
Creates floating IP addresses for the named host by the given range.
|
||||||
floating delete <ip_range> Deletes floating IP addresses in the range given.
|
|
||||||
|
``nova-manage floating delete <ip_range>``
|
||||||
|
|
||||||
|
Deletes floating IP addresses in the range given.
|
||||||
|
|
||||||
``nova-manage floating list``
|
``nova-manage floating list``
|
||||||
|
|
||||||
@@ -193,7 +196,7 @@ Nova Flavor
|
|||||||
``nova-manage flavor create <name> <memory> <vCPU> <local_storage> <flavorID> <(optional) swap> <(optional) RXTX Quota> <(optional) RXTX Cap>``
|
``nova-manage flavor create <name> <memory> <vCPU> <local_storage> <flavorID> <(optional) swap> <(optional) RXTX Quota> <(optional) RXTX Cap>``
|
||||||
|
|
||||||
creates a flavor with the following positional arguments:
|
creates a flavor with the following positional arguments:
|
||||||
* memory (expressed in megabytes)
|
* memory (expressed in megabytes)
|
||||||
* vcpu(s) (integer)
|
* vcpu(s) (integer)
|
||||||
* local storage (expressed in gigabytes)
|
* local storage (expressed in gigabytes)
|
||||||
* flavorid (unique integer)
|
* flavorid (unique integer)
|
||||||
@@ -209,12 +212,33 @@ Nova Flavor
|
|||||||
|
|
||||||
Purges the flavor with the name <name>. This removes this flavor from the database.
|
Purges the flavor with the name <name>. This removes this flavor from the database.
|
||||||
|
|
||||||
|
|
||||||
Nova Instance_type
|
Nova Instance_type
|
||||||
~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
The instance_type command is provided as an alias for the flavor command. All the same subcommands and arguments from nova-manage flavor can be used.
|
The instance_type command is provided as an alias for the flavor command. All the same subcommands and arguments from nova-manage flavor can be used.
|
||||||
|
|
||||||
|
Nova Images
|
||||||
|
~~~~~~~~~~~
|
||||||
|
|
||||||
|
``nova-manage image image_register <path> <owner>``
|
||||||
|
|
||||||
|
Registers an image with the image service.
|
||||||
|
|
||||||
|
``nova-manage image kernel_register <path> <owner>``
|
||||||
|
|
||||||
|
Registers a kernel with the image service.
|
||||||
|
|
||||||
|
``nova-manage image ramdisk_register <path> <owner>``
|
||||||
|
|
||||||
|
Registers a ramdisk with the image service.
|
||||||
|
|
||||||
|
``nova-manage image all_register <image_path> <kernel_path> <ramdisk_path> <owner>``
|
||||||
|
|
||||||
|
Registers an image kernel and ramdisk with the image service.
|
||||||
|
|
||||||
|
``nova-manage image convert <directory>``
|
||||||
|
|
||||||
|
Converts all images in directory from the old (Bexar) format to the new format.
|
||||||
|
|
||||||
FILES
|
FILES
|
||||||
========
|
========
|
||||||
|
|||||||
@@ -182,6 +182,29 @@ Nova Floating IPs
|
|||||||
|
|
||||||
Displays a list of all floating IP addresses.
|
Displays a list of all floating IP addresses.
|
||||||
|
|
||||||
|
Nova Images
|
||||||
|
~~~~~~~~~~~
|
||||||
|
|
||||||
|
``nova-manage image image_register <path> <owner>``
|
||||||
|
|
||||||
|
Registers an image with the image service.
|
||||||
|
|
||||||
|
``nova-manage image kernel_register <path> <owner>``
|
||||||
|
|
||||||
|
Registers a kernel with the image service.
|
||||||
|
|
||||||
|
``nova-manage image ramdisk_register <path> <owner>``
|
||||||
|
|
||||||
|
Registers a ramdisk with the image service.
|
||||||
|
|
||||||
|
``nova-manage image all_register <image_path> <kernel_path> <ramdisk_path> <owner>``
|
||||||
|
|
||||||
|
Registers an image kernel and ramdisk with the image service.
|
||||||
|
|
||||||
|
``nova-manage image convert <directory>``
|
||||||
|
|
||||||
|
Converts all images in directory from the old (Bexar) format to the new format.
|
||||||
|
|
||||||
Concept: Flags
|
Concept: Flags
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
|
|||||||
@@ -187,7 +187,7 @@ class ServiceWrapper(wsgi.Controller):
|
|||||||
def __init__(self, service_handle):
|
def __init__(self, service_handle):
|
||||||
self.service_handle = service_handle
|
self.service_handle = service_handle
|
||||||
|
|
||||||
@webob.dec.wsgify
|
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||||
def __call__(self, req):
|
def __call__(self, req):
|
||||||
arg_dict = req.environ['wsgiorg.routing_args'][1]
|
arg_dict = req.environ['wsgiorg.routing_args'][1]
|
||||||
action = arg_dict['action']
|
action = arg_dict['action']
|
||||||
@@ -206,7 +206,7 @@ class ServiceWrapper(wsgi.Controller):
|
|||||||
params = dict([(str(k), v) for (k, v) in params.iteritems()])
|
params = dict([(str(k), v) for (k, v) in params.iteritems()])
|
||||||
result = method(context, **params)
|
result = method(context, **params)
|
||||||
if type(result) is dict or type(result) is list:
|
if type(result) is dict or type(result) is list:
|
||||||
return self._serialize(result, req)
|
return self._serialize(result, req.best_match_content_type())
|
||||||
else:
|
else:
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@@ -218,7 +218,7 @@ class Proxy(object):
|
|||||||
self.prefix = prefix
|
self.prefix = prefix
|
||||||
|
|
||||||
def __do_request(self, path, context, **kwargs):
|
def __do_request(self, path, context, **kwargs):
|
||||||
req = webob.Request.blank(path)
|
req = wsgi.Request.blank(path)
|
||||||
req.method = 'POST'
|
req.method = 'POST'
|
||||||
req.body = urllib.urlencode({'json': utils.dumps(kwargs)})
|
req.body = urllib.urlencode({'json': utils.dumps(kwargs)})
|
||||||
req.environ['openstack.context'] = context
|
req.environ['openstack.context'] = context
|
||||||
|
|||||||
@@ -53,7 +53,7 @@ flags.DEFINE_list('lockout_memcached_servers', None,
|
|||||||
class RequestLogging(wsgi.Middleware):
|
class RequestLogging(wsgi.Middleware):
|
||||||
"""Access-Log akin logging for all EC2 API requests."""
|
"""Access-Log akin logging for all EC2 API requests."""
|
||||||
|
|
||||||
@webob.dec.wsgify
|
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||||
def __call__(self, req):
|
def __call__(self, req):
|
||||||
start = utils.utcnow()
|
start = utils.utcnow()
|
||||||
rv = req.get_response(self.application)
|
rv = req.get_response(self.application)
|
||||||
@@ -112,7 +112,7 @@ class Lockout(wsgi.Middleware):
|
|||||||
debug=0)
|
debug=0)
|
||||||
super(Lockout, self).__init__(application)
|
super(Lockout, self).__init__(application)
|
||||||
|
|
||||||
@webob.dec.wsgify
|
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||||
def __call__(self, req):
|
def __call__(self, req):
|
||||||
access_key = str(req.params['AWSAccessKeyId'])
|
access_key = str(req.params['AWSAccessKeyId'])
|
||||||
failures_key = "authfailures-%s" % access_key
|
failures_key = "authfailures-%s" % access_key
|
||||||
@@ -141,7 +141,7 @@ class Authenticate(wsgi.Middleware):
|
|||||||
|
|
||||||
"""Authenticate an EC2 request and add 'ec2.context' to WSGI environ."""
|
"""Authenticate an EC2 request and add 'ec2.context' to WSGI environ."""
|
||||||
|
|
||||||
@webob.dec.wsgify
|
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||||
def __call__(self, req):
|
def __call__(self, req):
|
||||||
# Read request signature and access id.
|
# Read request signature and access id.
|
||||||
try:
|
try:
|
||||||
@@ -190,7 +190,7 @@ class Requestify(wsgi.Middleware):
|
|||||||
super(Requestify, self).__init__(app)
|
super(Requestify, self).__init__(app)
|
||||||
self.controller = utils.import_class(controller)()
|
self.controller = utils.import_class(controller)()
|
||||||
|
|
||||||
@webob.dec.wsgify
|
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||||
def __call__(self, req):
|
def __call__(self, req):
|
||||||
non_args = ['Action', 'Signature', 'AWSAccessKeyId', 'SignatureMethod',
|
non_args = ['Action', 'Signature', 'AWSAccessKeyId', 'SignatureMethod',
|
||||||
'SignatureVersion', 'Version', 'Timestamp']
|
'SignatureVersion', 'Version', 'Timestamp']
|
||||||
@@ -275,7 +275,7 @@ class Authorizer(wsgi.Middleware):
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@webob.dec.wsgify
|
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||||
def __call__(self, req):
|
def __call__(self, req):
|
||||||
context = req.environ['ec2.context']
|
context = req.environ['ec2.context']
|
||||||
controller = req.environ['ec2.request'].controller.__class__.__name__
|
controller = req.environ['ec2.request'].controller.__class__.__name__
|
||||||
@@ -309,7 +309,7 @@ class Executor(wsgi.Application):
|
|||||||
response, or a 400 upon failure.
|
response, or a 400 upon failure.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@webob.dec.wsgify
|
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||||
def __call__(self, req):
|
def __call__(self, req):
|
||||||
context = req.environ['ec2.context']
|
context = req.environ['ec2.context']
|
||||||
api_request = req.environ['ec2.request']
|
api_request = req.environ['ec2.request']
|
||||||
@@ -371,7 +371,7 @@ class Executor(wsgi.Application):
|
|||||||
|
|
||||||
class Versions(wsgi.Application):
|
class Versions(wsgi.Application):
|
||||||
|
|
||||||
@webob.dec.wsgify
|
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||||
def __call__(self, req):
|
def __call__(self, req):
|
||||||
"""Respond to a request for all EC2 versions."""
|
"""Respond to a request for all EC2 versions."""
|
||||||
# available api versions
|
# available api versions
|
||||||
|
|||||||
@@ -39,7 +39,9 @@ from nova import log as logging
|
|||||||
from nova import network
|
from nova import network
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova import volume
|
from nova import volume
|
||||||
|
from nova.api.ec2 import ec2utils
|
||||||
from nova.compute import instance_types
|
from nova.compute import instance_types
|
||||||
|
from nova.image import s3
|
||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
@@ -73,30 +75,19 @@ def _gen_key(context, user_id, key_name):
|
|||||||
return {'private_key': private_key, 'fingerprint': fingerprint}
|
return {'private_key': private_key, 'fingerprint': fingerprint}
|
||||||
|
|
||||||
|
|
||||||
def ec2_id_to_id(ec2_id):
|
|
||||||
"""Convert an ec2 ID (i-[base 16 number]) to an instance id (int)"""
|
|
||||||
return int(ec2_id.split('-')[-1], 16)
|
|
||||||
|
|
||||||
|
|
||||||
def id_to_ec2_id(instance_id, template='i-%08x'):
|
|
||||||
"""Convert an instance ID (int) to an ec2 ID (i-[base 16 number])"""
|
|
||||||
return template % instance_id
|
|
||||||
|
|
||||||
|
|
||||||
class CloudController(object):
|
class CloudController(object):
|
||||||
""" CloudController provides the critical dispatch between
|
""" CloudController provides the critical dispatch between
|
||||||
inbound API calls through the endpoint and messages
|
inbound API calls through the endpoint and messages
|
||||||
sent to the other nodes.
|
sent to the other nodes.
|
||||||
"""
|
"""
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.image_service = utils.import_object(FLAGS.image_service)
|
self.image_service = s3.S3ImageService()
|
||||||
self.network_api = network.API()
|
self.network_api = network.API()
|
||||||
self.volume_api = volume.API()
|
self.volume_api = volume.API()
|
||||||
self.compute_api = compute.API(
|
self.compute_api = compute.API(
|
||||||
network_api=self.network_api,
|
network_api=self.network_api,
|
||||||
image_service=self.image_service,
|
|
||||||
volume_api=self.volume_api,
|
volume_api=self.volume_api,
|
||||||
hostname_factory=id_to_ec2_id)
|
hostname_factory=ec2utils.id_to_ec2_id)
|
||||||
self.setup()
|
self.setup()
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
@@ -115,7 +106,7 @@ class CloudController(object):
|
|||||||
start = os.getcwd()
|
start = os.getcwd()
|
||||||
os.chdir(FLAGS.ca_path)
|
os.chdir(FLAGS.ca_path)
|
||||||
# TODO(vish): Do this with M2Crypto instead
|
# TODO(vish): Do this with M2Crypto instead
|
||||||
utils.runthis(_("Generating root CA: %s"), "sh genrootca.sh")
|
utils.runthis(_("Generating root CA: %s"), "sh", "genrootca.sh")
|
||||||
os.chdir(start)
|
os.chdir(start)
|
||||||
|
|
||||||
def _get_mpi_data(self, context, project_id):
|
def _get_mpi_data(self, context, project_id):
|
||||||
@@ -154,11 +145,14 @@ class CloudController(object):
|
|||||||
availability_zone = self._get_availability_zone_by_host(ctxt, host)
|
availability_zone = self._get_availability_zone_by_host(ctxt, host)
|
||||||
floating_ip = db.instance_get_floating_address(ctxt,
|
floating_ip = db.instance_get_floating_address(ctxt,
|
||||||
instance_ref['id'])
|
instance_ref['id'])
|
||||||
ec2_id = id_to_ec2_id(instance_ref['id'])
|
ec2_id = ec2utils.id_to_ec2_id(instance_ref['id'])
|
||||||
|
image_ec2_id = self._image_ec2_id(instance_ref['image_id'], 'machine')
|
||||||
|
k_ec2_id = self._image_ec2_id(instance_ref['kernel_id'], 'kernel')
|
||||||
|
r_ec2_id = self._image_ec2_id(instance_ref['ramdisk_id'], 'ramdisk')
|
||||||
data = {
|
data = {
|
||||||
'user-data': base64.b64decode(instance_ref['user_data']),
|
'user-data': base64.b64decode(instance_ref['user_data']),
|
||||||
'meta-data': {
|
'meta-data': {
|
||||||
'ami-id': instance_ref['image_id'],
|
'ami-id': image_ec2_id,
|
||||||
'ami-launch-index': instance_ref['launch_index'],
|
'ami-launch-index': instance_ref['launch_index'],
|
||||||
'ami-manifest-path': 'FIXME',
|
'ami-manifest-path': 'FIXME',
|
||||||
'block-device-mapping': {
|
'block-device-mapping': {
|
||||||
@@ -173,12 +167,12 @@ class CloudController(object):
|
|||||||
'instance-type': instance_ref['instance_type'],
|
'instance-type': instance_ref['instance_type'],
|
||||||
'local-hostname': hostname,
|
'local-hostname': hostname,
|
||||||
'local-ipv4': address,
|
'local-ipv4': address,
|
||||||
'kernel-id': instance_ref['kernel_id'],
|
'kernel-id': k_ec2_id,
|
||||||
|
'ramdisk-id': r_ec2_id,
|
||||||
'placement': {'availability-zone': availability_zone},
|
'placement': {'availability-zone': availability_zone},
|
||||||
'public-hostname': hostname,
|
'public-hostname': hostname,
|
||||||
'public-ipv4': floating_ip or '',
|
'public-ipv4': floating_ip or '',
|
||||||
'public-keys': keys,
|
'public-keys': keys,
|
||||||
'ramdisk-id': instance_ref['ramdisk_id'],
|
|
||||||
'reservation-id': instance_ref['reservation_id'],
|
'reservation-id': instance_ref['reservation_id'],
|
||||||
'security-groups': '',
|
'security-groups': '',
|
||||||
'mpi': mpi}}
|
'mpi': mpi}}
|
||||||
@@ -525,7 +519,7 @@ class CloudController(object):
|
|||||||
ec2_id = instance_id[0]
|
ec2_id = instance_id[0]
|
||||||
else:
|
else:
|
||||||
ec2_id = instance_id
|
ec2_id = instance_id
|
||||||
instance_id = ec2_id_to_id(ec2_id)
|
instance_id = ec2utils.ec2_id_to_id(ec2_id)
|
||||||
output = self.compute_api.get_console_output(
|
output = self.compute_api.get_console_output(
|
||||||
context, instance_id=instance_id)
|
context, instance_id=instance_id)
|
||||||
now = datetime.datetime.utcnow()
|
now = datetime.datetime.utcnow()
|
||||||
@@ -535,7 +529,7 @@ class CloudController(object):
|
|||||||
|
|
||||||
def get_ajax_console(self, context, instance_id, **kwargs):
|
def get_ajax_console(self, context, instance_id, **kwargs):
|
||||||
ec2_id = instance_id[0]
|
ec2_id = instance_id[0]
|
||||||
instance_id = ec2_id_to_id(ec2_id)
|
instance_id = ec2utils.ec2_id_to_id(ec2_id)
|
||||||
return self.compute_api.get_ajax_console(context,
|
return self.compute_api.get_ajax_console(context,
|
||||||
instance_id=instance_id)
|
instance_id=instance_id)
|
||||||
|
|
||||||
@@ -543,7 +537,7 @@ class CloudController(object):
|
|||||||
if volume_id:
|
if volume_id:
|
||||||
volumes = []
|
volumes = []
|
||||||
for ec2_id in volume_id:
|
for ec2_id in volume_id:
|
||||||
internal_id = ec2_id_to_id(ec2_id)
|
internal_id = ec2utils.ec2_id_to_id(ec2_id)
|
||||||
volume = self.volume_api.get(context, internal_id)
|
volume = self.volume_api.get(context, internal_id)
|
||||||
volumes.append(volume)
|
volumes.append(volume)
|
||||||
else:
|
else:
|
||||||
@@ -556,11 +550,11 @@ class CloudController(object):
|
|||||||
instance_data = None
|
instance_data = None
|
||||||
if volume.get('instance', None):
|
if volume.get('instance', None):
|
||||||
instance_id = volume['instance']['id']
|
instance_id = volume['instance']['id']
|
||||||
instance_ec2_id = id_to_ec2_id(instance_id)
|
instance_ec2_id = ec2utils.id_to_ec2_id(instance_id)
|
||||||
instance_data = '%s[%s]' % (instance_ec2_id,
|
instance_data = '%s[%s]' % (instance_ec2_id,
|
||||||
volume['instance']['host'])
|
volume['instance']['host'])
|
||||||
v = {}
|
v = {}
|
||||||
v['volumeId'] = id_to_ec2_id(volume['id'], 'vol-%08x')
|
v['volumeId'] = ec2utils.id_to_ec2_id(volume['id'], 'vol-%08x')
|
||||||
v['status'] = volume['status']
|
v['status'] = volume['status']
|
||||||
v['size'] = volume['size']
|
v['size'] = volume['size']
|
||||||
v['availabilityZone'] = volume['availability_zone']
|
v['availabilityZone'] = volume['availability_zone']
|
||||||
@@ -578,8 +572,7 @@ class CloudController(object):
|
|||||||
'device': volume['mountpoint'],
|
'device': volume['mountpoint'],
|
||||||
'instanceId': instance_ec2_id,
|
'instanceId': instance_ec2_id,
|
||||||
'status': 'attached',
|
'status': 'attached',
|
||||||
'volumeId': id_to_ec2_id(volume['id'],
|
'volumeId': v['volumeId']}]
|
||||||
'vol-%08x')}]
|
|
||||||
else:
|
else:
|
||||||
v['attachmentSet'] = [{}]
|
v['attachmentSet'] = [{}]
|
||||||
|
|
||||||
@@ -598,12 +591,12 @@ class CloudController(object):
|
|||||||
return {'volumeSet': [self._format_volume(context, dict(volume))]}
|
return {'volumeSet': [self._format_volume(context, dict(volume))]}
|
||||||
|
|
||||||
def delete_volume(self, context, volume_id, **kwargs):
|
def delete_volume(self, context, volume_id, **kwargs):
|
||||||
volume_id = ec2_id_to_id(volume_id)
|
volume_id = ec2utils.ec2_id_to_id(volume_id)
|
||||||
self.volume_api.delete(context, volume_id=volume_id)
|
self.volume_api.delete(context, volume_id=volume_id)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def update_volume(self, context, volume_id, **kwargs):
|
def update_volume(self, context, volume_id, **kwargs):
|
||||||
volume_id = ec2_id_to_id(volume_id)
|
volume_id = ec2utils.ec2_id_to_id(volume_id)
|
||||||
updatable_fields = ['display_name', 'display_description']
|
updatable_fields = ['display_name', 'display_description']
|
||||||
changes = {}
|
changes = {}
|
||||||
for field in updatable_fields:
|
for field in updatable_fields:
|
||||||
@@ -614,8 +607,8 @@ class CloudController(object):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
|
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
|
||||||
volume_id = ec2_id_to_id(volume_id)
|
volume_id = ec2utils.ec2_id_to_id(volume_id)
|
||||||
instance_id = ec2_id_to_id(instance_id)
|
instance_id = ec2utils.ec2_id_to_id(instance_id)
|
||||||
msg = _("Attach volume %(volume_id)s to instance %(instance_id)s"
|
msg = _("Attach volume %(volume_id)s to instance %(instance_id)s"
|
||||||
" at %(device)s") % locals()
|
" at %(device)s") % locals()
|
||||||
LOG.audit(msg, context=context)
|
LOG.audit(msg, context=context)
|
||||||
@@ -626,22 +619,22 @@ class CloudController(object):
|
|||||||
volume = self.volume_api.get(context, volume_id)
|
volume = self.volume_api.get(context, volume_id)
|
||||||
return {'attachTime': volume['attach_time'],
|
return {'attachTime': volume['attach_time'],
|
||||||
'device': volume['mountpoint'],
|
'device': volume['mountpoint'],
|
||||||
'instanceId': id_to_ec2_id(instance_id),
|
'instanceId': ec2utils.id_to_ec2_id(instance_id),
|
||||||
'requestId': context.request_id,
|
'requestId': context.request_id,
|
||||||
'status': volume['attach_status'],
|
'status': volume['attach_status'],
|
||||||
'volumeId': id_to_ec2_id(volume_id, 'vol-%08x')}
|
'volumeId': ec2utils.id_to_ec2_id(volume_id, 'vol-%08x')}
|
||||||
|
|
||||||
def detach_volume(self, context, volume_id, **kwargs):
|
def detach_volume(self, context, volume_id, **kwargs):
|
||||||
volume_id = ec2_id_to_id(volume_id)
|
volume_id = ec2utils.ec2_id_to_id(volume_id)
|
||||||
LOG.audit(_("Detach volume %s"), volume_id, context=context)
|
LOG.audit(_("Detach volume %s"), volume_id, context=context)
|
||||||
volume = self.volume_api.get(context, volume_id)
|
volume = self.volume_api.get(context, volume_id)
|
||||||
instance = self.compute_api.detach_volume(context, volume_id=volume_id)
|
instance = self.compute_api.detach_volume(context, volume_id=volume_id)
|
||||||
return {'attachTime': volume['attach_time'],
|
return {'attachTime': volume['attach_time'],
|
||||||
'device': volume['mountpoint'],
|
'device': volume['mountpoint'],
|
||||||
'instanceId': id_to_ec2_id(instance['id']),
|
'instanceId': ec2utils.id_to_ec2_id(instance['id']),
|
||||||
'requestId': context.request_id,
|
'requestId': context.request_id,
|
||||||
'status': volume['attach_status'],
|
'status': volume['attach_status'],
|
||||||
'volumeId': id_to_ec2_id(volume_id, 'vol-%08x')}
|
'volumeId': ec2utils.id_to_ec2_id(volume_id, 'vol-%08x')}
|
||||||
|
|
||||||
def _convert_to_set(self, lst, label):
|
def _convert_to_set(self, lst, label):
|
||||||
if lst == None or lst == []:
|
if lst == None or lst == []:
|
||||||
@@ -675,7 +668,7 @@ class CloudController(object):
|
|||||||
if instance_id:
|
if instance_id:
|
||||||
instances = []
|
instances = []
|
||||||
for ec2_id in instance_id:
|
for ec2_id in instance_id:
|
||||||
internal_id = ec2_id_to_id(ec2_id)
|
internal_id = ec2utils.ec2_id_to_id(ec2_id)
|
||||||
instance = self.compute_api.get(context,
|
instance = self.compute_api.get(context,
|
||||||
instance_id=internal_id)
|
instance_id=internal_id)
|
||||||
instances.append(instance)
|
instances.append(instance)
|
||||||
@@ -687,9 +680,9 @@ class CloudController(object):
|
|||||||
continue
|
continue
|
||||||
i = {}
|
i = {}
|
||||||
instance_id = instance['id']
|
instance_id = instance['id']
|
||||||
ec2_id = id_to_ec2_id(instance_id)
|
ec2_id = ec2utils.id_to_ec2_id(instance_id)
|
||||||
i['instanceId'] = ec2_id
|
i['instanceId'] = ec2_id
|
||||||
i['imageId'] = instance['image_id']
|
i['imageId'] = self._image_ec2_id(instance['image_id'])
|
||||||
i['instanceState'] = {
|
i['instanceState'] = {
|
||||||
'code': instance['state'],
|
'code': instance['state'],
|
||||||
'name': instance['state_description']}
|
'name': instance['state_description']}
|
||||||
@@ -755,7 +748,7 @@ class CloudController(object):
|
|||||||
if (floating_ip_ref['fixed_ip']
|
if (floating_ip_ref['fixed_ip']
|
||||||
and floating_ip_ref['fixed_ip']['instance']):
|
and floating_ip_ref['fixed_ip']['instance']):
|
||||||
instance_id = floating_ip_ref['fixed_ip']['instance']['id']
|
instance_id = floating_ip_ref['fixed_ip']['instance']['id']
|
||||||
ec2_id = id_to_ec2_id(instance_id)
|
ec2_id = ec2utils.id_to_ec2_id(instance_id)
|
||||||
address_rv = {'public_ip': address,
|
address_rv = {'public_ip': address,
|
||||||
'instance_id': ec2_id}
|
'instance_id': ec2_id}
|
||||||
if context.is_admin:
|
if context.is_admin:
|
||||||
@@ -778,7 +771,7 @@ class CloudController(object):
|
|||||||
def associate_address(self, context, instance_id, public_ip, **kwargs):
|
def associate_address(self, context, instance_id, public_ip, **kwargs):
|
||||||
LOG.audit(_("Associate address %(public_ip)s to"
|
LOG.audit(_("Associate address %(public_ip)s to"
|
||||||
" instance %(instance_id)s") % locals(), context=context)
|
" instance %(instance_id)s") % locals(), context=context)
|
||||||
instance_id = ec2_id_to_id(instance_id)
|
instance_id = ec2utils.ec2_id_to_id(instance_id)
|
||||||
self.compute_api.associate_floating_ip(context,
|
self.compute_api.associate_floating_ip(context,
|
||||||
instance_id=instance_id,
|
instance_id=instance_id,
|
||||||
address=public_ip)
|
address=public_ip)
|
||||||
@@ -791,13 +784,19 @@ class CloudController(object):
|
|||||||
|
|
||||||
def run_instances(self, context, **kwargs):
|
def run_instances(self, context, **kwargs):
|
||||||
max_count = int(kwargs.get('max_count', 1))
|
max_count = int(kwargs.get('max_count', 1))
|
||||||
|
if kwargs.get('kernel_id'):
|
||||||
|
kernel = self._get_image(context, kwargs['kernel_id'])
|
||||||
|
kwargs['kernel_id'] = kernel['id']
|
||||||
|
if kwargs.get('ramdisk_id'):
|
||||||
|
ramdisk = self._get_image(context, kwargs['ramdisk_id'])
|
||||||
|
kwargs['ramdisk_id'] = ramdisk['id']
|
||||||
instances = self.compute_api.create(context,
|
instances = self.compute_api.create(context,
|
||||||
instance_type=instance_types.get_by_type(
|
instance_type=instance_types.get_by_type(
|
||||||
kwargs.get('instance_type', None)),
|
kwargs.get('instance_type', None)),
|
||||||
image_id=kwargs['image_id'],
|
image_id=self._get_image(context, kwargs['image_id'])['id'],
|
||||||
min_count=int(kwargs.get('min_count', max_count)),
|
min_count=int(kwargs.get('min_count', max_count)),
|
||||||
max_count=max_count,
|
max_count=max_count,
|
||||||
kernel_id=kwargs.get('kernel_id', None),
|
kernel_id=kwargs.get('kernel_id'),
|
||||||
ramdisk_id=kwargs.get('ramdisk_id'),
|
ramdisk_id=kwargs.get('ramdisk_id'),
|
||||||
display_name=kwargs.get('display_name'),
|
display_name=kwargs.get('display_name'),
|
||||||
display_description=kwargs.get('display_description'),
|
display_description=kwargs.get('display_description'),
|
||||||
@@ -814,7 +813,7 @@ class CloudController(object):
|
|||||||
instance_id is a kwarg so its name cannot be modified."""
|
instance_id is a kwarg so its name cannot be modified."""
|
||||||
LOG.debug(_("Going to start terminating instances"))
|
LOG.debug(_("Going to start terminating instances"))
|
||||||
for ec2_id in instance_id:
|
for ec2_id in instance_id:
|
||||||
instance_id = ec2_id_to_id(ec2_id)
|
instance_id = ec2utils.ec2_id_to_id(ec2_id)
|
||||||
self.compute_api.delete(context, instance_id=instance_id)
|
self.compute_api.delete(context, instance_id=instance_id)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@@ -822,19 +821,19 @@ class CloudController(object):
|
|||||||
"""instance_id is a list of instance ids"""
|
"""instance_id is a list of instance ids"""
|
||||||
LOG.audit(_("Reboot instance %r"), instance_id, context=context)
|
LOG.audit(_("Reboot instance %r"), instance_id, context=context)
|
||||||
for ec2_id in instance_id:
|
for ec2_id in instance_id:
|
||||||
instance_id = ec2_id_to_id(ec2_id)
|
instance_id = ec2utils.ec2_id_to_id(ec2_id)
|
||||||
self.compute_api.reboot(context, instance_id=instance_id)
|
self.compute_api.reboot(context, instance_id=instance_id)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def rescue_instance(self, context, instance_id, **kwargs):
|
def rescue_instance(self, context, instance_id, **kwargs):
|
||||||
"""This is an extension to the normal ec2_api"""
|
"""This is an extension to the normal ec2_api"""
|
||||||
instance_id = ec2_id_to_id(instance_id)
|
instance_id = ec2utils.ec2_id_to_id(instance_id)
|
||||||
self.compute_api.rescue(context, instance_id=instance_id)
|
self.compute_api.rescue(context, instance_id=instance_id)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def unrescue_instance(self, context, instance_id, **kwargs):
|
def unrescue_instance(self, context, instance_id, **kwargs):
|
||||||
"""This is an extension to the normal ec2_api"""
|
"""This is an extension to the normal ec2_api"""
|
||||||
instance_id = ec2_id_to_id(instance_id)
|
instance_id = ec2utils.ec2_id_to_id(instance_id)
|
||||||
self.compute_api.unrescue(context, instance_id=instance_id)
|
self.compute_api.unrescue(context, instance_id=instance_id)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@@ -845,41 +844,80 @@ class CloudController(object):
|
|||||||
if field in kwargs:
|
if field in kwargs:
|
||||||
changes[field] = kwargs[field]
|
changes[field] = kwargs[field]
|
||||||
if changes:
|
if changes:
|
||||||
instance_id = ec2_id_to_id(instance_id)
|
instance_id = ec2utils.ec2_id_to_id(instance_id)
|
||||||
self.compute_api.update(context, instance_id=instance_id, **kwargs)
|
self.compute_api.update(context, instance_id=instance_id, **kwargs)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def _format_image(self, context, image):
|
_type_prefix_map = {'machine': 'ami',
|
||||||
|
'kernel': 'aki',
|
||||||
|
'ramdisk': 'ari'}
|
||||||
|
|
||||||
|
def _image_ec2_id(self, image_id, image_type='machine'):
|
||||||
|
prefix = self._type_prefix_map[image_type]
|
||||||
|
template = prefix + '-%08x'
|
||||||
|
return ec2utils.id_to_ec2_id(int(image_id), template=template)
|
||||||
|
|
||||||
|
def _get_image(self, context, ec2_id):
|
||||||
|
try:
|
||||||
|
internal_id = ec2utils.ec2_id_to_id(ec2_id)
|
||||||
|
return self.image_service.show(context, internal_id)
|
||||||
|
except exception.NotFound:
|
||||||
|
return self.image_service.show_by_name(context, ec2_id)
|
||||||
|
|
||||||
|
def _format_image(self, image):
|
||||||
"""Convert from format defined by BaseImageService to S3 format."""
|
"""Convert from format defined by BaseImageService to S3 format."""
|
||||||
i = {}
|
i = {}
|
||||||
i['imageId'] = image.get('id')
|
image_type = image['properties'].get('type')
|
||||||
i['kernelId'] = image.get('kernel_id')
|
ec2_id = self._image_ec2_id(image.get('id'), image_type)
|
||||||
i['ramdiskId'] = image.get('ramdisk_id')
|
name = image.get('name')
|
||||||
i['imageOwnerId'] = image.get('owner_id')
|
if name:
|
||||||
i['imageLocation'] = image.get('location')
|
i['imageId'] = "%s (%s)" % (ec2_id, name)
|
||||||
i['imageState'] = image.get('status')
|
else:
|
||||||
i['type'] = image.get('type')
|
i['imageId'] = ec2_id
|
||||||
i['isPublic'] = image.get('is_public')
|
kernel_id = image['properties'].get('kernel_id')
|
||||||
i['architecture'] = image.get('architecture')
|
if kernel_id:
|
||||||
|
i['kernelId'] = self._image_ec2_id(kernel_id, 'kernel')
|
||||||
|
ramdisk_id = image['properties'].get('ramdisk_id')
|
||||||
|
if ramdisk_id:
|
||||||
|
i['ramdiskId'] = self._image_ec2_id(ramdisk_id, 'ramdisk')
|
||||||
|
i['imageOwnerId'] = image['properties'].get('owner_id')
|
||||||
|
i['imageLocation'] = image['properties'].get('image_location')
|
||||||
|
i['imageState'] = image['properties'].get('image_state')
|
||||||
|
i['type'] = image_type
|
||||||
|
i['isPublic'] = str(image['properties'].get('is_public', '')) == 'True'
|
||||||
|
i['architecture'] = image['properties'].get('architecture')
|
||||||
return i
|
return i
|
||||||
|
|
||||||
def describe_images(self, context, image_id=None, **kwargs):
|
def describe_images(self, context, image_id=None, **kwargs):
|
||||||
# NOTE: image_id is a list!
|
# NOTE: image_id is a list!
|
||||||
images = self.image_service.index(context)
|
|
||||||
if image_id:
|
if image_id:
|
||||||
images = filter(lambda x: x['id'] in image_id, images)
|
images = []
|
||||||
images = [self._format_image(context, i) for i in images]
|
for ec2_id in image_id:
|
||||||
|
try:
|
||||||
|
image = self._get_image(context, ec2_id)
|
||||||
|
except exception.NotFound:
|
||||||
|
raise exception.NotFound(_('Image %s not found') %
|
||||||
|
ec2_id)
|
||||||
|
images.append(image)
|
||||||
|
else:
|
||||||
|
images = self.image_service.detail(context)
|
||||||
|
images = [self._format_image(i) for i in images]
|
||||||
return {'imagesSet': images}
|
return {'imagesSet': images}
|
||||||
|
|
||||||
def deregister_image(self, context, image_id, **kwargs):
|
def deregister_image(self, context, image_id, **kwargs):
|
||||||
LOG.audit(_("De-registering image %s"), image_id, context=context)
|
LOG.audit(_("De-registering image %s"), image_id, context=context)
|
||||||
self.image_service.deregister(context, image_id)
|
image = self._get_image(context, image_id)
|
||||||
|
internal_id = image['id']
|
||||||
|
self.image_service.delete(context, internal_id)
|
||||||
return {'imageId': image_id}
|
return {'imageId': image_id}
|
||||||
|
|
||||||
def register_image(self, context, image_location=None, **kwargs):
|
def register_image(self, context, image_location=None, **kwargs):
|
||||||
if image_location is None and 'name' in kwargs:
|
if image_location is None and 'name' in kwargs:
|
||||||
image_location = kwargs['name']
|
image_location = kwargs['name']
|
||||||
image_id = self.image_service.register(context, image_location)
|
metadata = {'properties': {'image_location': image_location}}
|
||||||
|
image = self.image_service.create(context, metadata)
|
||||||
|
image_id = self._image_ec2_id(image['id'],
|
||||||
|
image['properties']['type'])
|
||||||
msg = _("Registered image %(image_location)s with"
|
msg = _("Registered image %(image_location)s with"
|
||||||
" id %(image_id)s") % locals()
|
" id %(image_id)s") % locals()
|
||||||
LOG.audit(msg, context=context)
|
LOG.audit(msg, context=context)
|
||||||
@@ -890,13 +928,11 @@ class CloudController(object):
|
|||||||
raise exception.ApiError(_('attribute not supported: %s')
|
raise exception.ApiError(_('attribute not supported: %s')
|
||||||
% attribute)
|
% attribute)
|
||||||
try:
|
try:
|
||||||
image = self._format_image(context,
|
image = self._get_image(context, image_id)
|
||||||
self.image_service.show(context,
|
except exception.NotFound:
|
||||||
image_id))
|
raise exception.NotFound(_('Image %s not found') % image_id)
|
||||||
except IndexError:
|
result = {'imageId': image_id, 'launchPermission': []}
|
||||||
raise exception.ApiError(_('invalid id: %s') % image_id)
|
if image['properties']['is_public']:
|
||||||
result = {'image_id': image_id, 'launchPermission': []}
|
|
||||||
if image['isPublic']:
|
|
||||||
result['launchPermission'].append({'group': 'all'})
|
result['launchPermission'].append({'group': 'all'})
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@@ -913,8 +949,18 @@ class CloudController(object):
|
|||||||
if not operation_type in ['add', 'remove']:
|
if not operation_type in ['add', 'remove']:
|
||||||
raise exception.ApiError(_('operation_type must be add or remove'))
|
raise exception.ApiError(_('operation_type must be add or remove'))
|
||||||
LOG.audit(_("Updating image %s publicity"), image_id, context=context)
|
LOG.audit(_("Updating image %s publicity"), image_id, context=context)
|
||||||
return self.image_service.modify(context, image_id, operation_type)
|
|
||||||
|
try:
|
||||||
|
image = self._get_image(context, image_id)
|
||||||
|
except exception.NotFound:
|
||||||
|
raise exception.NotFound(_('Image %s not found') % image_id)
|
||||||
|
internal_id = image['id']
|
||||||
|
del(image['id'])
|
||||||
|
raise Exception(image)
|
||||||
|
image['properties']['is_public'] = (operation_type == 'add')
|
||||||
|
return self.image_service.update(context, internal_id, image)
|
||||||
|
|
||||||
def update_image(self, context, image_id, **kwargs):
|
def update_image(self, context, image_id, **kwargs):
|
||||||
result = self.image_service.update(context, image_id, dict(kwargs))
|
internal_id = ec2utils.ec2_id_to_id(image_id)
|
||||||
|
result = self.image_service.update(context, internal_id, dict(kwargs))
|
||||||
return result
|
return result
|
||||||
|
|||||||
32
nova/api/ec2/ec2utils.py
Normal file
32
nova/api/ec2/ec2utils.py
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from nova import exception
|
||||||
|
|
||||||
|
|
||||||
|
def ec2_id_to_id(ec2_id):
|
||||||
|
"""Convert an ec2 ID (i-[base 16 number]) to an instance id (int)"""
|
||||||
|
try:
|
||||||
|
return int(ec2_id.split('-')[-1], 16)
|
||||||
|
except ValueError:
|
||||||
|
raise exception.NotFound(_("Id %s Not Found") % ec2_id)
|
||||||
|
|
||||||
|
|
||||||
|
def id_to_ec2_id(instance_id, template='i-%08x'):
|
||||||
|
"""Convert an instance ID (int) to an ec2 ID (i-[base 16 number])"""
|
||||||
|
return template % instance_id
|
||||||
@@ -65,7 +65,7 @@ class MetadataRequestHandler(wsgi.Application):
|
|||||||
data = data[item]
|
data = data[item]
|
||||||
return data
|
return data
|
||||||
|
|
||||||
@webob.dec.wsgify
|
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||||
def __call__(self, req):
|
def __call__(self, req):
|
||||||
cc = cloud.CloudController()
|
cc = cloud.CloudController()
|
||||||
remote_address = req.remote_addr
|
remote_address = req.remote_addr
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ flags.DEFINE_bool('allow_admin_api',
|
|||||||
class FaultWrapper(wsgi.Middleware):
|
class FaultWrapper(wsgi.Middleware):
|
||||||
"""Calls down the middleware stack, making exceptions into faults."""
|
"""Calls down the middleware stack, making exceptions into faults."""
|
||||||
|
|
||||||
@webob.dec.wsgify
|
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||||
def __call__(self, req):
|
def __call__(self, req):
|
||||||
try:
|
try:
|
||||||
return req.get_response(self.application)
|
return req.get_response(self.application)
|
||||||
@@ -79,8 +79,8 @@ class APIRouter(wsgi.Router):
|
|||||||
|
|
||||||
server_members['pause'] = 'POST'
|
server_members['pause'] = 'POST'
|
||||||
server_members['unpause'] = 'POST'
|
server_members['unpause'] = 'POST'
|
||||||
server_members["diagnostics"] = "GET"
|
server_members['diagnostics'] = 'GET'
|
||||||
server_members["actions"] = "GET"
|
server_members['actions'] = 'GET'
|
||||||
server_members['suspend'] = 'POST'
|
server_members['suspend'] = 'POST'
|
||||||
server_members['resume'] = 'POST'
|
server_members['resume'] = 'POST'
|
||||||
server_members['rescue'] = 'POST'
|
server_members['rescue'] = 'POST'
|
||||||
@@ -89,7 +89,7 @@ class APIRouter(wsgi.Router):
|
|||||||
server_members['inject_network_info'] = 'POST'
|
server_members['inject_network_info'] = 'POST'
|
||||||
|
|
||||||
mapper.resource("zone", "zones", controller=zones.Controller(),
|
mapper.resource("zone", "zones", controller=zones.Controller(),
|
||||||
collection={'detail': 'GET'})
|
collection={'detail': 'GET', 'info': 'GET'}),
|
||||||
|
|
||||||
mapper.resource("user", "users", controller=users.Controller(),
|
mapper.resource("user", "users", controller=users.Controller(),
|
||||||
collection={'detail': 'GET'})
|
collection={'detail': 'GET'})
|
||||||
@@ -124,7 +124,7 @@ class APIRouter(wsgi.Router):
|
|||||||
|
|
||||||
|
|
||||||
class Versions(wsgi.Application):
|
class Versions(wsgi.Application):
|
||||||
@webob.dec.wsgify
|
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||||
def __call__(self, req):
|
def __call__(self, req):
|
||||||
"""Respond to a request for all OpenStack API versions."""
|
"""Respond to a request for all OpenStack API versions."""
|
||||||
response = {
|
response = {
|
||||||
@@ -133,4 +133,6 @@ class Versions(wsgi.Application):
|
|||||||
metadata = {
|
metadata = {
|
||||||
"application/xml": {
|
"application/xml": {
|
||||||
"attributes": dict(version=["status", "id"])}}
|
"attributes": dict(version=["status", "id"])}}
|
||||||
return wsgi.Serializer(req.environ, metadata).to_content_type(response)
|
|
||||||
|
content_type = req.best_match_content_type()
|
||||||
|
return wsgi.Serializer(metadata).serialize(response, content_type)
|
||||||
|
|||||||
@@ -74,7 +74,7 @@ class Controller(wsgi.Controller):
|
|||||||
def update(self, req, id):
|
def update(self, req, id):
|
||||||
"""This is really create or update."""
|
"""This is really create or update."""
|
||||||
self._check_admin(req.environ['nova.context'])
|
self._check_admin(req.environ['nova.context'])
|
||||||
env = self._deserialize(req.body, req)
|
env = self._deserialize(req.body, req.get_content_type())
|
||||||
description = env['account'].get('description')
|
description = env['account'].get('description')
|
||||||
manager = env['account'].get('manager')
|
manager = env['account'].get('manager')
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ class AuthMiddleware(wsgi.Middleware):
|
|||||||
self.auth = auth.manager.AuthManager()
|
self.auth = auth.manager.AuthManager()
|
||||||
super(AuthMiddleware, self).__init__(application)
|
super(AuthMiddleware, self).__init__(application)
|
||||||
|
|
||||||
@webob.dec.wsgify
|
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||||
def __call__(self, req):
|
def __call__(self, req):
|
||||||
if not self.has_authentication(req):
|
if not self.has_authentication(req):
|
||||||
return self.authenticate(req)
|
return self.authenticate(req)
|
||||||
@@ -132,7 +132,7 @@ class AuthMiddleware(wsgi.Middleware):
|
|||||||
|
|
||||||
username - string
|
username - string
|
||||||
key - string API key
|
key - string API key
|
||||||
req - webob.Request object
|
req - wsgi.Request object
|
||||||
"""
|
"""
|
||||||
ctxt = context.get_admin_context()
|
ctxt = context.get_admin_context()
|
||||||
user = self.auth.get_user_from_access_key(key)
|
user = self.auth.get_user_from_access_key(key)
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ def limited(items, request, max_limit=1000):
|
|||||||
Return a slice of items according to requested offset and limit.
|
Return a slice of items according to requested offset and limit.
|
||||||
|
|
||||||
@param items: A sliceable entity
|
@param items: A sliceable entity
|
||||||
@param request: `webob.Request` possibly containing 'offset' and 'limit'
|
@param request: `wsgi.Request` possibly containing 'offset' and 'limit'
|
||||||
GET variables. 'offset' is where to start in the list,
|
GET variables. 'offset' is where to start in the list,
|
||||||
and 'limit' is the maximum number of items to return. If
|
and 'limit' is the maximum number of items to return. If
|
||||||
'limit' is not specified, 0, or > max_limit, we default
|
'limit' is not specified, 0, or > max_limit, we default
|
||||||
@@ -36,15 +36,18 @@ def limited(items, request, max_limit=1000):
|
|||||||
try:
|
try:
|
||||||
offset = int(request.GET.get('offset', 0))
|
offset = int(request.GET.get('offset', 0))
|
||||||
except ValueError:
|
except ValueError:
|
||||||
offset = 0
|
raise webob.exc.HTTPBadRequest(_('offset param must be an integer'))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
limit = int(request.GET.get('limit', max_limit))
|
limit = int(request.GET.get('limit', max_limit))
|
||||||
except ValueError:
|
except ValueError:
|
||||||
limit = max_limit
|
raise webob.exc.HTTPBadRequest(_('limit param must be an integer'))
|
||||||
|
|
||||||
if offset < 0 or limit < 0:
|
if limit < 0:
|
||||||
raise webob.exc.HTTPBadRequest()
|
raise webob.exc.HTTPBadRequest(_('limit param must be positive'))
|
||||||
|
|
||||||
|
if offset < 0:
|
||||||
|
raise webob.exc.HTTPBadRequest(_('offset param must be positive'))
|
||||||
|
|
||||||
limit = min(max_limit, limit or max_limit)
|
limit = min(max_limit, limit or max_limit)
|
||||||
range_end = offset + limit
|
range_end = offset + limit
|
||||||
|
|||||||
@@ -65,7 +65,7 @@ class Controller(wsgi.Controller):
|
|||||||
|
|
||||||
def create(self, req, server_id):
|
def create(self, req, server_id):
|
||||||
"""Creates a new console"""
|
"""Creates a new console"""
|
||||||
#info = self._deserialize(req.body, req)
|
#info = self._deserialize(req.body, req.get_content_type())
|
||||||
self.console_api.create_console(
|
self.console_api.create_console(
|
||||||
req.environ['nova.context'],
|
req.environ['nova.context'],
|
||||||
int(server_id))
|
int(server_id))
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ class Fault(webob.exc.HTTPException):
|
|||||||
"""Create a Fault for the given webob.exc.exception."""
|
"""Create a Fault for the given webob.exc.exception."""
|
||||||
self.wrapped_exc = exception
|
self.wrapped_exc = exception
|
||||||
|
|
||||||
@webob.dec.wsgify
|
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||||
def __call__(self, req):
|
def __call__(self, req):
|
||||||
"""Generate a WSGI response based on the exception passed to ctor."""
|
"""Generate a WSGI response based on the exception passed to ctor."""
|
||||||
# Replace the body with fault details.
|
# Replace the body with fault details.
|
||||||
@@ -57,6 +57,7 @@ class Fault(webob.exc.HTTPException):
|
|||||||
fault_data[fault_name]['retryAfter'] = retry
|
fault_data[fault_name]['retryAfter'] = retry
|
||||||
# 'code' is an attribute on the fault tag itself
|
# 'code' is an attribute on the fault tag itself
|
||||||
metadata = {'application/xml': {'attributes': {fault_name: 'code'}}}
|
metadata = {'application/xml': {'attributes': {fault_name: 'code'}}}
|
||||||
serializer = wsgi.Serializer(req.environ, metadata)
|
serializer = wsgi.Serializer(metadata)
|
||||||
self.wrapped_exc.body = serializer.to_content_type(fault_data)
|
content_type = req.best_match_content_type()
|
||||||
|
self.wrapped_exc.body = serializer.serialize(fault_data, content_type)
|
||||||
return self.wrapped_exc
|
return self.wrapped_exc
|
||||||
|
|||||||
@@ -151,7 +151,7 @@ class Controller(wsgi.Controller):
|
|||||||
|
|
||||||
def create(self, req):
|
def create(self, req):
|
||||||
context = req.environ['nova.context']
|
context = req.environ['nova.context']
|
||||||
env = self._deserialize(req.body, req)
|
env = self._deserialize(req.body, req.get_content_type())
|
||||||
instance_id = env["image"]["serverId"]
|
instance_id = env["image"]["serverId"]
|
||||||
name = env["image"]["name"]
|
name = env["image"]["name"]
|
||||||
|
|
||||||
|
|||||||
@@ -57,7 +57,7 @@ class RateLimitingMiddleware(wsgi.Middleware):
|
|||||||
self.limiter = WSGIAppProxy(service_host)
|
self.limiter = WSGIAppProxy(service_host)
|
||||||
super(RateLimitingMiddleware, self).__init__(application)
|
super(RateLimitingMiddleware, self).__init__(application)
|
||||||
|
|
||||||
@webob.dec.wsgify
|
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||||
def __call__(self, req):
|
def __call__(self, req):
|
||||||
"""Rate limit the request.
|
"""Rate limit the request.
|
||||||
|
|
||||||
@@ -183,7 +183,7 @@ class WSGIApp(object):
|
|||||||
"""Create the WSGI application using the given Limiter instance."""
|
"""Create the WSGI application using the given Limiter instance."""
|
||||||
self.limiter = limiter
|
self.limiter = limiter
|
||||||
|
|
||||||
@webob.dec.wsgify
|
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||||
def __call__(self, req):
|
def __call__(self, req):
|
||||||
parts = req.path_info.split('/')
|
parts = req.path_info.split('/')
|
||||||
# format: /limiter/<username>/<urlencoded action>
|
# format: /limiter/<username>/<urlencoded action>
|
||||||
|
|||||||
@@ -98,7 +98,7 @@ class Controller(wsgi.Controller):
|
|||||||
'application/xml': {
|
'application/xml': {
|
||||||
"attributes": {
|
"attributes": {
|
||||||
"server": ["id", "imageId", "name", "flavorId", "hostId",
|
"server": ["id", "imageId", "name", "flavorId", "hostId",
|
||||||
"status", "progress"]}}}
|
"status", "progress", "adminPass"]}}}
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.compute_api = compute.API()
|
self.compute_api = compute.API()
|
||||||
@@ -141,7 +141,7 @@ class Controller(wsgi.Controller):
|
|||||||
|
|
||||||
def create(self, req):
|
def create(self, req):
|
||||||
""" Creates a new server for a given user """
|
""" Creates a new server for a given user """
|
||||||
env = self._deserialize(req.body, req)
|
env = self._deserialize(req.body, req.get_content_type())
|
||||||
if not env:
|
if not env:
|
||||||
return faults.Fault(exc.HTTPUnprocessableEntity())
|
return faults.Fault(exc.HTTPUnprocessableEntity())
|
||||||
|
|
||||||
@@ -178,11 +178,21 @@ class Controller(wsgi.Controller):
|
|||||||
key_data=key_pair['public_key'],
|
key_data=key_pair['public_key'],
|
||||||
metadata=metadata,
|
metadata=metadata,
|
||||||
onset_files=env.get('onset_files', []))
|
onset_files=env.get('onset_files', []))
|
||||||
return _translate_keys(instances[0])
|
|
||||||
|
server = _translate_keys(instances[0])
|
||||||
|
password = "%s%s" % (server['server']['name'][:4],
|
||||||
|
utils.generate_password(12))
|
||||||
|
server['server']['adminPass'] = password
|
||||||
|
self.compute_api.set_admin_password(context, server['server']['id'],
|
||||||
|
password)
|
||||||
|
return server
|
||||||
|
|
||||||
def update(self, req, id):
|
def update(self, req, id):
|
||||||
""" Updates the server name or password """
|
""" Updates the server name or password """
|
||||||
inst_dict = self._deserialize(req.body, req)
|
if len(req.body) == 0:
|
||||||
|
raise exc.HTTPUnprocessableEntity()
|
||||||
|
|
||||||
|
inst_dict = self._deserialize(req.body, req.get_content_type())
|
||||||
if not inst_dict:
|
if not inst_dict:
|
||||||
return faults.Fault(exc.HTTPUnprocessableEntity())
|
return faults.Fault(exc.HTTPUnprocessableEntity())
|
||||||
|
|
||||||
@@ -214,7 +224,7 @@ class Controller(wsgi.Controller):
|
|||||||
'rebuild': self._action_rebuild,
|
'rebuild': self._action_rebuild,
|
||||||
}
|
}
|
||||||
|
|
||||||
input_dict = self._deserialize(req.body, req)
|
input_dict = self._deserialize(req.body, req.get_content_type())
|
||||||
for key in actions.keys():
|
for key in actions.keys():
|
||||||
if key in input_dict:
|
if key in input_dict:
|
||||||
return actions[key](input_dict, req, id)
|
return actions[key](input_dict, req, id)
|
||||||
|
|||||||
@@ -73,7 +73,7 @@ class Controller(wsgi.Controller):
|
|||||||
|
|
||||||
def create(self, req):
|
def create(self, req):
|
||||||
self._check_admin(req.environ['nova.context'])
|
self._check_admin(req.environ['nova.context'])
|
||||||
env = self._deserialize(req.body, req)
|
env = self._deserialize(req.body, req.get_content_type())
|
||||||
is_admin = env['user'].get('admin') in ('T', 'True', True)
|
is_admin = env['user'].get('admin') in ('T', 'True', True)
|
||||||
name = env['user'].get('name')
|
name = env['user'].get('name')
|
||||||
access = env['user'].get('access')
|
access = env['user'].get('access')
|
||||||
@@ -83,7 +83,7 @@ class Controller(wsgi.Controller):
|
|||||||
|
|
||||||
def update(self, req, id):
|
def update(self, req, id):
|
||||||
self._check_admin(req.environ['nova.context'])
|
self._check_admin(req.environ['nova.context'])
|
||||||
env = self._deserialize(req.body, req)
|
env = self._deserialize(req.body, req.get_content_type())
|
||||||
is_admin = env['user'].get('admin')
|
is_admin = env['user'].get('admin')
|
||||||
if is_admin is not None:
|
if is_admin is not None:
|
||||||
is_admin = is_admin in ('T', 'True', True)
|
is_admin = is_admin in ('T', 'True', True)
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Copyright 2010 OpenStack LLC.
|
# Copyright 2011 OpenStack LLC.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@@ -18,6 +18,7 @@ import common
|
|||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import wsgi
|
from nova import wsgi
|
||||||
from nova import db
|
from nova import db
|
||||||
|
from nova.scheduler import api
|
||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
@@ -32,6 +33,10 @@ def _filter_keys(item, keys):
|
|||||||
return dict((k, v) for k, v in item.iteritems() if k in keys)
|
return dict((k, v) for k, v in item.iteritems() if k in keys)
|
||||||
|
|
||||||
|
|
||||||
|
def _exclude_keys(item, keys):
|
||||||
|
return dict((k, v) for k, v in item.iteritems() if k not in keys)
|
||||||
|
|
||||||
|
|
||||||
def _scrub_zone(zone):
|
def _scrub_zone(zone):
|
||||||
return _filter_keys(zone, ('id', 'api_url'))
|
return _filter_keys(zone, ('id', 'api_url'))
|
||||||
|
|
||||||
@@ -41,19 +46,30 @@ class Controller(wsgi.Controller):
|
|||||||
_serialization_metadata = {
|
_serialization_metadata = {
|
||||||
'application/xml': {
|
'application/xml': {
|
||||||
"attributes": {
|
"attributes": {
|
||||||
"zone": ["id", "api_url"]}}}
|
"zone": ["id", "api_url", "name", "capabilities"]}}}
|
||||||
|
|
||||||
def index(self, req):
|
def index(self, req):
|
||||||
"""Return all zones in brief"""
|
"""Return all zones in brief"""
|
||||||
items = db.zone_get_all(req.environ['nova.context'])
|
# Ask the ZoneManager in the Scheduler for most recent data,
|
||||||
|
# or fall-back to the database ...
|
||||||
|
items = api.API().get_zone_list(req.environ['nova.context'])
|
||||||
|
if not items:
|
||||||
|
items = db.zone_get_all(req.environ['nova.context'])
|
||||||
|
|
||||||
items = common.limited(items, req)
|
items = common.limited(items, req)
|
||||||
items = [_scrub_zone(item) for item in items]
|
items = [_exclude_keys(item, ['username', 'password'])
|
||||||
|
for item in items]
|
||||||
return dict(zones=items)
|
return dict(zones=items)
|
||||||
|
|
||||||
def detail(self, req):
|
def detail(self, req):
|
||||||
"""Return all zones in detail"""
|
"""Return all zones in detail"""
|
||||||
return self.index(req)
|
return self.index(req)
|
||||||
|
|
||||||
|
def info(self, req):
|
||||||
|
"""Return name and capabilities for this zone."""
|
||||||
|
return dict(zone=dict(name=FLAGS.zone_name,
|
||||||
|
capabilities=FLAGS.zone_capabilities))
|
||||||
|
|
||||||
def show(self, req, id):
|
def show(self, req, id):
|
||||||
"""Return data about the given zone id"""
|
"""Return data about the given zone id"""
|
||||||
zone_id = int(id)
|
zone_id = int(id)
|
||||||
@@ -67,13 +83,13 @@ class Controller(wsgi.Controller):
|
|||||||
|
|
||||||
def create(self, req):
|
def create(self, req):
|
||||||
context = req.environ['nova.context']
|
context = req.environ['nova.context']
|
||||||
env = self._deserialize(req.body, req)
|
env = self._deserialize(req.body, req.get_content_type())
|
||||||
zone = db.zone_create(context, env["zone"])
|
zone = db.zone_create(context, env["zone"])
|
||||||
return dict(zone=_scrub_zone(zone))
|
return dict(zone=_scrub_zone(zone))
|
||||||
|
|
||||||
def update(self, req, id):
|
def update(self, req, id):
|
||||||
context = req.environ['nova.context']
|
context = req.environ['nova.context']
|
||||||
env = self._deserialize(req.body, req)
|
env = self._deserialize(req.body, req.get_content_type())
|
||||||
zone_id = int(id)
|
zone_id = int(id)
|
||||||
zone = db.zone_update(context, zone_id, env["zone"])
|
zone = db.zone_update(context, zone_id, env["zone"])
|
||||||
return dict(zone=_scrub_zone(zone))
|
return dict(zone=_scrub_zone(zone))
|
||||||
|
|||||||
@@ -126,9 +126,9 @@ class API(base.Base):
|
|||||||
|
|
||||||
image = self.image_service.show(context, image_id)
|
image = self.image_service.show(context, image_id)
|
||||||
if kernel_id is None:
|
if kernel_id is None:
|
||||||
kernel_id = image.get('kernel_id', None)
|
kernel_id = image['properties'].get('kernel_id', None)
|
||||||
if ramdisk_id is None:
|
if ramdisk_id is None:
|
||||||
ramdisk_id = image.get('ramdisk_id', None)
|
ramdisk_id = image['properties'].get('ramdisk_id', None)
|
||||||
# FIXME(sirp): is there a way we can remove null_kernel?
|
# FIXME(sirp): is there a way we can remove null_kernel?
|
||||||
# No kernel and ramdisk for raw images
|
# No kernel and ramdisk for raw images
|
||||||
if kernel_id == str(FLAGS.null_kernel):
|
if kernel_id == str(FLAGS.null_kernel):
|
||||||
@@ -165,6 +165,7 @@ class API(base.Base):
|
|||||||
'image_id': image_id,
|
'image_id': image_id,
|
||||||
'kernel_id': kernel_id or '',
|
'kernel_id': kernel_id or '',
|
||||||
'ramdisk_id': ramdisk_id or '',
|
'ramdisk_id': ramdisk_id or '',
|
||||||
|
'state': 0,
|
||||||
'state_description': 'scheduling',
|
'state_description': 'scheduling',
|
||||||
'user_id': context.user_id,
|
'user_id': context.user_id,
|
||||||
'project_id': context.project_id,
|
'project_id': context.project_id,
|
||||||
@@ -498,9 +499,10 @@ class API(base.Base):
|
|||||||
"""Unrescue the given instance."""
|
"""Unrescue the given instance."""
|
||||||
self._cast_compute_message('unrescue_instance', context, instance_id)
|
self._cast_compute_message('unrescue_instance', context, instance_id)
|
||||||
|
|
||||||
def set_admin_password(self, context, instance_id):
|
def set_admin_password(self, context, instance_id, password=None):
|
||||||
"""Set the root/admin password for the given instance."""
|
"""Set the root/admin password for the given instance."""
|
||||||
self._cast_compute_message('set_admin_password', context, instance_id)
|
self._cast_compute_message('set_admin_password', context, instance_id,
|
||||||
|
password)
|
||||||
|
|
||||||
def inject_file(self, context, instance_id):
|
def inject_file(self, context, instance_id):
|
||||||
"""Write a file to the given instance."""
|
"""Write a file to the given instance."""
|
||||||
|
|||||||
@@ -133,10 +133,10 @@ class XVPConsoleProxy(object):
|
|||||||
return
|
return
|
||||||
logging.debug(_("Starting xvp"))
|
logging.debug(_("Starting xvp"))
|
||||||
try:
|
try:
|
||||||
utils.execute('xvp -p %s -c %s -l %s' %
|
utils.execute('xvp',
|
||||||
(FLAGS.console_xvp_pid,
|
'-p', FLAGS.console_xvp_pid,
|
||||||
FLAGS.console_xvp_conf,
|
'-c', FLAGS.console_xvp_conf,
|
||||||
FLAGS.console_xvp_log))
|
'-l', FLAGS.console_xvp_log)
|
||||||
except exception.ProcessExecutionError, err:
|
except exception.ProcessExecutionError, err:
|
||||||
logging.error(_("Error starting xvp: %s") % err)
|
logging.error(_("Error starting xvp: %s") % err)
|
||||||
|
|
||||||
@@ -190,5 +190,5 @@ class XVPConsoleProxy(object):
|
|||||||
flag = '-x'
|
flag = '-x'
|
||||||
#xvp will blow up on passwords that are too long (mdragon)
|
#xvp will blow up on passwords that are too long (mdragon)
|
||||||
password = password[:maxlen]
|
password = password[:maxlen]
|
||||||
out, err = utils.execute('xvp %s' % flag, process_input=password)
|
out, err = utils.execute('xvp', flag, process_input=password)
|
||||||
return out.strip()
|
return out.strip()
|
||||||
|
|||||||
@@ -105,8 +105,10 @@ def generate_key_pair(bits=1024):
|
|||||||
|
|
||||||
tmpdir = tempfile.mkdtemp()
|
tmpdir = tempfile.mkdtemp()
|
||||||
keyfile = os.path.join(tmpdir, 'temp')
|
keyfile = os.path.join(tmpdir, 'temp')
|
||||||
utils.execute('ssh-keygen -q -b %d -N "" -f %s' % (bits, keyfile))
|
utils.execute('ssh-keygen', '-q', '-b', bits, '-N', '',
|
||||||
(out, err) = utils.execute('ssh-keygen -q -l -f %s.pub' % (keyfile))
|
'-f', keyfile)
|
||||||
|
(out, err) = utils.execute('ssh-keygen', '-q', '-l', '-f',
|
||||||
|
'%s.pub' % (keyfile))
|
||||||
fingerprint = out.split(' ')[1]
|
fingerprint = out.split(' ')[1]
|
||||||
private_key = open(keyfile).read()
|
private_key = open(keyfile).read()
|
||||||
public_key = open(keyfile + '.pub').read()
|
public_key = open(keyfile + '.pub').read()
|
||||||
@@ -118,7 +120,8 @@ def generate_key_pair(bits=1024):
|
|||||||
# bio = M2Crypto.BIO.MemoryBuffer()
|
# bio = M2Crypto.BIO.MemoryBuffer()
|
||||||
# key.save_pub_key_bio(bio)
|
# key.save_pub_key_bio(bio)
|
||||||
# public_key = bio.read()
|
# public_key = bio.read()
|
||||||
# public_key, err = execute('ssh-keygen -y -f /dev/stdin', private_key)
|
# public_key, err = execute('ssh-keygen', '-y', '-f',
|
||||||
|
# '/dev/stdin', private_key)
|
||||||
|
|
||||||
return (private_key, public_key, fingerprint)
|
return (private_key, public_key, fingerprint)
|
||||||
|
|
||||||
@@ -143,9 +146,10 @@ def revoke_cert(project_id, file_name):
|
|||||||
start = os.getcwd()
|
start = os.getcwd()
|
||||||
os.chdir(ca_folder(project_id))
|
os.chdir(ca_folder(project_id))
|
||||||
# NOTE(vish): potential race condition here
|
# NOTE(vish): potential race condition here
|
||||||
utils.execute("openssl ca -config ./openssl.cnf -revoke '%s'" % file_name)
|
utils.execute('openssl', 'ca', '-config', './openssl.cnf', '-revoke',
|
||||||
utils.execute("openssl ca -gencrl -config ./openssl.cnf -out '%s'" %
|
file_name)
|
||||||
FLAGS.crl_file)
|
utils.execute('openssl', 'ca', '-gencrl', '-config', './openssl.cnf',
|
||||||
|
'-out', FLAGS.crl_file)
|
||||||
os.chdir(start)
|
os.chdir(start)
|
||||||
|
|
||||||
|
|
||||||
@@ -193,9 +197,9 @@ def generate_x509_cert(user_id, project_id, bits=1024):
|
|||||||
tmpdir = tempfile.mkdtemp()
|
tmpdir = tempfile.mkdtemp()
|
||||||
keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key'))
|
keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key'))
|
||||||
csrfile = os.path.join(tmpdir, 'temp.csr')
|
csrfile = os.path.join(tmpdir, 'temp.csr')
|
||||||
utils.execute("openssl genrsa -out %s %s" % (keyfile, bits))
|
utils.execute('openssl', 'genrsa', '-out', keyfile, str(bits))
|
||||||
utils.execute("openssl req -new -key %s -out %s -batch -subj %s" %
|
utils.execute('openssl', 'req', '-new', '-key', keyfile, '-out', csrfile,
|
||||||
(keyfile, csrfile, subject))
|
'-batch', '-subj', subject)
|
||||||
private_key = open(keyfile).read()
|
private_key = open(keyfile).read()
|
||||||
csr = open(csrfile).read()
|
csr = open(csrfile).read()
|
||||||
shutil.rmtree(tmpdir)
|
shutil.rmtree(tmpdir)
|
||||||
@@ -212,8 +216,8 @@ def _ensure_project_folder(project_id):
|
|||||||
if not os.path.exists(ca_path(project_id)):
|
if not os.path.exists(ca_path(project_id)):
|
||||||
start = os.getcwd()
|
start = os.getcwd()
|
||||||
os.chdir(ca_folder())
|
os.chdir(ca_folder())
|
||||||
utils.execute("sh geninter.sh %s %s" %
|
utils.execute('sh', 'geninter.sh', project_id,
|
||||||
(project_id, _project_cert_subject(project_id)))
|
_project_cert_subject(project_id))
|
||||||
os.chdir(start)
|
os.chdir(start)
|
||||||
|
|
||||||
|
|
||||||
@@ -228,8 +232,8 @@ def generate_vpn_files(project_id):
|
|||||||
start = os.getcwd()
|
start = os.getcwd()
|
||||||
os.chdir(ca_folder())
|
os.chdir(ca_folder())
|
||||||
# TODO(vish): the shell scripts could all be done in python
|
# TODO(vish): the shell scripts could all be done in python
|
||||||
utils.execute("sh genvpn.sh %s %s" %
|
utils.execute('sh', 'genvpn.sh',
|
||||||
(project_id, _vpn_cert_subject(project_id)))
|
project_id, _vpn_cert_subject(project_id))
|
||||||
with open(csr_fn, "r") as csrfile:
|
with open(csr_fn, "r") as csrfile:
|
||||||
csr_text = csrfile.read()
|
csr_text = csrfile.read()
|
||||||
(serial, signed_csr) = sign_csr(csr_text, project_id)
|
(serial, signed_csr) = sign_csr(csr_text, project_id)
|
||||||
@@ -259,9 +263,10 @@ def _sign_csr(csr_text, ca_folder):
|
|||||||
start = os.getcwd()
|
start = os.getcwd()
|
||||||
# Change working dir to CA
|
# Change working dir to CA
|
||||||
os.chdir(ca_folder)
|
os.chdir(ca_folder)
|
||||||
utils.execute("openssl ca -batch -out %s -config "
|
utils.execute('openssl', 'ca', '-batch', '-out', outbound, '-config',
|
||||||
"./openssl.cnf -infiles %s" % (outbound, inbound))
|
'./openssl.cnf', '-infiles', inbound)
|
||||||
out, _err = utils.execute("openssl x509 -in %s -serial -noout" % outbound)
|
out, _err = utils.execute('openssl', 'x509', '-in', outbound,
|
||||||
|
'-serial', '-noout')
|
||||||
serial = out.rpartition("=")[2]
|
serial = out.rpartition("=")[2]
|
||||||
os.chdir(start)
|
os.chdir(start)
|
||||||
with open(outbound, "r") as crtfile:
|
with open(outbound, "r") as crtfile:
|
||||||
|
|||||||
@@ -517,6 +517,13 @@ def network_create_safe(context, values):
|
|||||||
return IMPL.network_create_safe(context, values)
|
return IMPL.network_create_safe(context, values)
|
||||||
|
|
||||||
|
|
||||||
|
def network_delete_safe(context, network_id):
|
||||||
|
"""Delete network with key network_id.
|
||||||
|
This method assumes that the network is not associated with any project
|
||||||
|
"""
|
||||||
|
return IMPL.network_delete_safe(context, network_id)
|
||||||
|
|
||||||
|
|
||||||
def network_create_fixed_ips(context, network_id, num_vpn_clients):
|
def network_create_fixed_ips(context, network_id, num_vpn_clients):
|
||||||
"""Create the ips for the network, reserving sepecified ips."""
|
"""Create the ips for the network, reserving sepecified ips."""
|
||||||
return IMPL.network_create_fixed_ips(context, network_id, num_vpn_clients)
|
return IMPL.network_create_fixed_ips(context, network_id, num_vpn_clients)
|
||||||
@@ -553,6 +560,11 @@ def network_get_by_bridge(context, bridge):
|
|||||||
return IMPL.network_get_by_bridge(context, bridge)
|
return IMPL.network_get_by_bridge(context, bridge)
|
||||||
|
|
||||||
|
|
||||||
|
def network_get_by_cidr(context, cidr):
|
||||||
|
"""Get a network by cidr or raise if it does not exist"""
|
||||||
|
return IMPL.network_get_by_cidr(context, cidr)
|
||||||
|
|
||||||
|
|
||||||
def network_get_by_instance(context, instance_id):
|
def network_get_by_instance(context, instance_id):
|
||||||
"""Get a network by instance id or raise if it does not exist."""
|
"""Get a network by instance id or raise if it does not exist."""
|
||||||
return IMPL.network_get_by_instance(context, instance_id)
|
return IMPL.network_get_by_instance(context, instance_id)
|
||||||
|
|||||||
@@ -1054,6 +1054,15 @@ def network_create_safe(context, values):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
@require_admin_context
|
||||||
|
def network_delete_safe(context, network_id):
|
||||||
|
session = get_session()
|
||||||
|
with session.begin():
|
||||||
|
network_ref = network_get(context, network_id=network_id, \
|
||||||
|
session=session)
|
||||||
|
session.delete(network_ref)
|
||||||
|
|
||||||
|
|
||||||
@require_admin_context
|
@require_admin_context
|
||||||
def network_disassociate(context, network_id):
|
def network_disassociate(context, network_id):
|
||||||
network_update(context, network_id, {'project_id': None,
|
network_update(context, network_id, {'project_id': None,
|
||||||
@@ -1127,6 +1136,18 @@ def network_get_by_bridge(context, bridge):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
@require_admin_context
|
||||||
|
def network_get_by_cidr(context, cidr):
|
||||||
|
session = get_session()
|
||||||
|
result = session.query(models.Network).\
|
||||||
|
filter_by(cidr=cidr).first()
|
||||||
|
|
||||||
|
if not result:
|
||||||
|
raise exception.NotFound(_('Network with cidr %s does not exist') %
|
||||||
|
cidr)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
@require_admin_context
|
@require_admin_context
|
||||||
def network_get_by_instance(_context, instance_id):
|
def network_get_by_instance(_context, instance_id):
|
||||||
session = get_session()
|
session = get_session()
|
||||||
|
|||||||
@@ -88,6 +88,10 @@ class InvalidInputException(Error):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidContentType(Error):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class TimeoutException(Error):
|
class TimeoutException(Error):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|||||||
@@ -321,6 +321,8 @@ DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger')
|
|||||||
|
|
||||||
DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'),
|
DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'),
|
||||||
"Top-level directory for maintaining nova's state")
|
"Top-level directory for maintaining nova's state")
|
||||||
|
DEFINE_string('lock_path', os.path.join(os.path.dirname(__file__), '../'),
|
||||||
|
"Directory for lock files")
|
||||||
DEFINE_string('logdir', None, 'output to a per-service log file in named '
|
DEFINE_string('logdir', None, 'output to a per-service log file in named '
|
||||||
'directory')
|
'directory')
|
||||||
|
|
||||||
@@ -346,7 +348,7 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager',
|
|||||||
'Manager for scheduler')
|
'Manager for scheduler')
|
||||||
|
|
||||||
# The service to use for image search and retrieval
|
# The service to use for image search and retrieval
|
||||||
DEFINE_string('image_service', 'nova.image.s3.S3ImageService',
|
DEFINE_string('image_service', 'nova.image.local.LocalImageService',
|
||||||
'The service to use for retrieving and searching for images.')
|
'The service to use for retrieving and searching for images.')
|
||||||
|
|
||||||
DEFINE_string('host', socket.gethostname(),
|
DEFINE_string('host', socket.gethostname(),
|
||||||
@@ -354,3 +356,7 @@ DEFINE_string('host', socket.gethostname(),
|
|||||||
|
|
||||||
DEFINE_string('node_availability_zone', 'nova',
|
DEFINE_string('node_availability_zone', 'nova',
|
||||||
'availability zone of this node')
|
'availability zone of this node')
|
||||||
|
|
||||||
|
DEFINE_string('zone_name', 'nova', 'name of this zone')
|
||||||
|
DEFINE_string('zone_capabilities', 'kypervisor:xenserver;os:linux',
|
||||||
|
'Key/Value tags which represent capabilities of this zone')
|
||||||
|
|||||||
@@ -17,9 +17,8 @@
|
|||||||
"""Implementation of an image service that uses Glance as the backend"""
|
"""Implementation of an image service that uses Glance as the backend"""
|
||||||
|
|
||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
import httplib
|
|
||||||
import json
|
from glance.common import exception as glance_exception
|
||||||
import urlparse
|
|
||||||
|
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
@@ -53,31 +52,64 @@ class GlanceImageService(service.BaseImageService):
|
|||||||
"""
|
"""
|
||||||
return self.client.get_images_detailed()
|
return self.client.get_images_detailed()
|
||||||
|
|
||||||
def show(self, context, id):
|
def show(self, context, image_id):
|
||||||
"""
|
"""
|
||||||
Returns a dict containing image data for the given opaque image id.
|
Returns a dict containing image data for the given opaque image id.
|
||||||
"""
|
"""
|
||||||
image = self.client.get_image_meta(id)
|
try:
|
||||||
if image:
|
image = self.client.get_image_meta(image_id)
|
||||||
return image
|
except glance_exception.NotFound:
|
||||||
raise exception.NotFound
|
raise exception.NotFound
|
||||||
|
return image
|
||||||
|
|
||||||
def create(self, context, data):
|
def show_by_name(self, context, name):
|
||||||
|
"""
|
||||||
|
Returns a dict containing image data for the given name.
|
||||||
|
"""
|
||||||
|
# TODO(vish): replace this with more efficient call when glance
|
||||||
|
# supports it.
|
||||||
|
images = self.detail(context)
|
||||||
|
image = None
|
||||||
|
for cantidate in images:
|
||||||
|
if name == cantidate.get('name'):
|
||||||
|
image = cantidate
|
||||||
|
break
|
||||||
|
if image is None:
|
||||||
|
raise exception.NotFound
|
||||||
|
return image
|
||||||
|
|
||||||
|
def get(self, context, image_id, data):
|
||||||
|
"""
|
||||||
|
Calls out to Glance for metadata and data and writes data.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
metadata, image_chunks = self.client.get_image(image_id)
|
||||||
|
except glance_exception.NotFound:
|
||||||
|
raise exception.NotFound
|
||||||
|
for chunk in image_chunks:
|
||||||
|
data.write(chunk)
|
||||||
|
return metadata
|
||||||
|
|
||||||
|
def create(self, context, metadata, data=None):
|
||||||
"""
|
"""
|
||||||
Store the image data and return the new image id.
|
Store the image data and return the new image id.
|
||||||
|
|
||||||
:raises AlreadyExists if the image already exist.
|
:raises AlreadyExists if the image already exist.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
return self.client.add_image(image_meta=data)
|
return self.client.add_image(metadata, data)
|
||||||
|
|
||||||
def update(self, context, image_id, data):
|
def update(self, context, image_id, metadata, data=None):
|
||||||
"""Replace the contents of the given image with the new data.
|
"""Replace the contents of the given image with the new data.
|
||||||
|
|
||||||
:raises NotFound if the image does not exist.
|
:raises NotFound if the image does not exist.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
return self.client.update_image(image_id, data)
|
try:
|
||||||
|
result = self.client.update_image(image_id, metadata, data)
|
||||||
|
except glance_exception.NotFound:
|
||||||
|
raise exception.NotFound
|
||||||
|
return result
|
||||||
|
|
||||||
def delete(self, context, image_id):
|
def delete(self, context, image_id):
|
||||||
"""
|
"""
|
||||||
@@ -86,7 +118,11 @@ class GlanceImageService(service.BaseImageService):
|
|||||||
:raises NotFound if the image does not exist.
|
:raises NotFound if the image does not exist.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
return self.client.delete_image(image_id)
|
try:
|
||||||
|
result = self.client.delete_image(image_id)
|
||||||
|
except glance_exception.NotFound:
|
||||||
|
raise exception.NotFound
|
||||||
|
return result
|
||||||
|
|
||||||
def delete_all(self):
|
def delete_all(self):
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -15,57 +15,110 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import cPickle as pickle
|
import json
|
||||||
import os.path
|
import os.path
|
||||||
import random
|
import random
|
||||||
import tempfile
|
import shutil
|
||||||
|
|
||||||
|
from nova import flags
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.image import service
|
from nova.image import service
|
||||||
|
|
||||||
|
|
||||||
class LocalImageService(service.BaseImageService):
|
FLAGS = flags.FLAGS
|
||||||
|
flags.DEFINE_string('images_path', '$state_path/images',
|
||||||
|
'path to decrypted images')
|
||||||
|
|
||||||
|
|
||||||
|
class LocalImageService(service.BaseImageService):
|
||||||
"""Image service storing images to local disk.
|
"""Image service storing images to local disk.
|
||||||
|
|
||||||
It assumes that image_ids are integers.
|
It assumes that image_ids are integers.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self._path = tempfile.mkdtemp()
|
self._path = FLAGS.images_path
|
||||||
|
|
||||||
def _path_to(self, image_id):
|
def _path_to(self, image_id, fname='info.json'):
|
||||||
return os.path.join(self._path, str(image_id))
|
if fname:
|
||||||
|
return os.path.join(self._path, '%08x' % int(image_id), fname)
|
||||||
|
return os.path.join(self._path, '%08x' % int(image_id))
|
||||||
|
|
||||||
def _ids(self):
|
def _ids(self):
|
||||||
"""The list of all image ids."""
|
"""The list of all image ids."""
|
||||||
return [int(i) for i in os.listdir(self._path)]
|
return [int(i, 16) for i in os.listdir(self._path)]
|
||||||
|
|
||||||
def index(self, context):
|
def index(self, context):
|
||||||
return [dict(id=i['id'], name=i['name']) for i in self.detail(context)]
|
return [dict(image_id=i['id'], name=i.get('name'))
|
||||||
|
for i in self.detail(context)]
|
||||||
|
|
||||||
def detail(self, context):
|
def detail(self, context):
|
||||||
return [self.show(context, id) for id in self._ids()]
|
images = []
|
||||||
|
for image_id in self._ids():
|
||||||
|
try:
|
||||||
|
image = self.show(context, image_id)
|
||||||
|
images.append(image)
|
||||||
|
except exception.NotFound:
|
||||||
|
continue
|
||||||
|
return images
|
||||||
|
|
||||||
def show(self, context, id):
|
def show(self, context, image_id):
|
||||||
try:
|
try:
|
||||||
return pickle.load(open(self._path_to(id)))
|
with open(self._path_to(image_id)) as metadata_file:
|
||||||
except IOError:
|
return json.load(metadata_file)
|
||||||
|
except (IOError, ValueError):
|
||||||
raise exception.NotFound
|
raise exception.NotFound
|
||||||
|
|
||||||
def create(self, context, data):
|
def show_by_name(self, context, name):
|
||||||
"""Store the image data and return the new image id."""
|
"""Returns a dict containing image data for the given name."""
|
||||||
id = random.randint(0, 2 ** 31 - 1)
|
# NOTE(vish): Not very efficient, but the local image service
|
||||||
data['id'] = id
|
# is for testing so it should be fine.
|
||||||
self.update(context, id, data)
|
images = self.detail(context)
|
||||||
return id
|
image = None
|
||||||
|
for cantidate in images:
|
||||||
|
if name == cantidate.get('name'):
|
||||||
|
image = cantidate
|
||||||
|
break
|
||||||
|
if image == None:
|
||||||
|
raise exception.NotFound
|
||||||
|
return image
|
||||||
|
|
||||||
def update(self, context, image_id, data):
|
def get(self, context, image_id, data):
|
||||||
|
"""Get image and metadata."""
|
||||||
|
try:
|
||||||
|
with open(self._path_to(image_id)) as metadata_file:
|
||||||
|
metadata = json.load(metadata_file)
|
||||||
|
with open(self._path_to(image_id, 'image')) as image_file:
|
||||||
|
shutil.copyfileobj(image_file, data)
|
||||||
|
except (IOError, ValueError):
|
||||||
|
raise exception.NotFound
|
||||||
|
return metadata
|
||||||
|
|
||||||
|
def create(self, context, metadata, data=None):
|
||||||
|
"""Store the image data and return the new image."""
|
||||||
|
image_id = random.randint(0, 2 ** 31 - 1)
|
||||||
|
image_path = self._path_to(image_id, None)
|
||||||
|
if not os.path.exists(image_path):
|
||||||
|
os.mkdir(image_path)
|
||||||
|
return self.update(context, image_id, metadata, data)
|
||||||
|
|
||||||
|
def update(self, context, image_id, metadata, data=None):
|
||||||
"""Replace the contents of the given image with the new data."""
|
"""Replace the contents of the given image with the new data."""
|
||||||
|
metadata['id'] = image_id
|
||||||
try:
|
try:
|
||||||
pickle.dump(data, open(self._path_to(image_id), 'w'))
|
if data:
|
||||||
except IOError:
|
location = self._path_to(image_id, 'image')
|
||||||
|
with open(location, 'w') as image_file:
|
||||||
|
shutil.copyfileobj(data, image_file)
|
||||||
|
# NOTE(vish): update metadata similarly to glance
|
||||||
|
metadata['status'] = 'active'
|
||||||
|
metadata['location'] = location
|
||||||
|
with open(self._path_to(image_id), 'w') as metadata_file:
|
||||||
|
json.dump(metadata, metadata_file)
|
||||||
|
except (IOError, ValueError):
|
||||||
raise exception.NotFound
|
raise exception.NotFound
|
||||||
|
return metadata
|
||||||
|
|
||||||
def delete(self, context, image_id):
|
def delete(self, context, image_id):
|
||||||
"""Delete the given image.
|
"""Delete the given image.
|
||||||
@@ -73,18 +126,11 @@ class LocalImageService(service.BaseImageService):
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
os.unlink(self._path_to(image_id))
|
shutil.rmtree(self._path_to(image_id, None))
|
||||||
except IOError:
|
except (IOError, ValueError):
|
||||||
raise exception.NotFound
|
raise exception.NotFound
|
||||||
|
|
||||||
def delete_all(self):
|
def delete_all(self):
|
||||||
"""Clears out all images in local directory."""
|
"""Clears out all images in local directory."""
|
||||||
for id in self._ids():
|
for image_id in self._ids():
|
||||||
os.unlink(self._path_to(id))
|
shutil.rmtree(self._path_to(image_id, None))
|
||||||
|
|
||||||
def delete_imagedir(self):
|
|
||||||
"""Deletes the local directory.
|
|
||||||
Raises OSError if directory is not empty.
|
|
||||||
|
|
||||||
"""
|
|
||||||
os.rmdir(self._path)
|
|
||||||
|
|||||||
300
nova/image/s3.py
300
nova/image/s3.py
@@ -21,8 +21,13 @@ Proxy AMI-related calls from the cloud controller, to the running
|
|||||||
objectstore service.
|
objectstore service.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import json
|
import binascii
|
||||||
import urllib
|
import eventlet
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import tarfile
|
||||||
|
import tempfile
|
||||||
|
from xml.etree import ElementTree
|
||||||
|
|
||||||
import boto.s3.connection
|
import boto.s3.connection
|
||||||
|
|
||||||
@@ -31,84 +36,78 @@ from nova import flags
|
|||||||
from nova import utils
|
from nova import utils
|
||||||
from nova.auth import manager
|
from nova.auth import manager
|
||||||
from nova.image import service
|
from nova.image import service
|
||||||
|
from nova.api.ec2 import ec2utils
|
||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
|
flags.DEFINE_string('image_decryption_dir', '/tmp',
|
||||||
|
'parent dir for tempdir used for image decryption')
|
||||||
def map_s3_to_base(image):
|
|
||||||
"""Convert from S3 format to format defined by BaseImageService."""
|
|
||||||
i = {}
|
|
||||||
i['id'] = image.get('imageId')
|
|
||||||
i['name'] = image.get('imageId')
|
|
||||||
i['kernel_id'] = image.get('kernelId')
|
|
||||||
i['ramdisk_id'] = image.get('ramdiskId')
|
|
||||||
i['location'] = image.get('imageLocation')
|
|
||||||
i['owner_id'] = image.get('imageOwnerId')
|
|
||||||
i['status'] = image.get('imageState')
|
|
||||||
i['type'] = image.get('type')
|
|
||||||
i['is_public'] = image.get('isPublic')
|
|
||||||
i['architecture'] = image.get('architecture')
|
|
||||||
return i
|
|
||||||
|
|
||||||
|
|
||||||
class S3ImageService(service.BaseImageService):
|
class S3ImageService(service.BaseImageService):
|
||||||
|
def __init__(self, service=None, *args, **kwargs):
|
||||||
|
if service == None:
|
||||||
|
service = utils.import_object(FLAGS.image_service)
|
||||||
|
self.service = service
|
||||||
|
self.service.__init__(*args, **kwargs)
|
||||||
|
|
||||||
def modify(self, context, image_id, operation):
|
def create(self, context, metadata, data=None):
|
||||||
self._conn(context).make_request(
|
"""metadata['properties'] should contain image_location"""
|
||||||
method='POST',
|
image = self._s3_create(context, metadata)
|
||||||
bucket='_images',
|
|
||||||
query_args=self._qs({'image_id': image_id,
|
|
||||||
'operation': operation}))
|
|
||||||
return True
|
|
||||||
|
|
||||||
def update(self, context, image_id, attributes):
|
|
||||||
"""update an image's attributes / info.json"""
|
|
||||||
attributes.update({"image_id": image_id})
|
|
||||||
self._conn(context).make_request(
|
|
||||||
method='POST',
|
|
||||||
bucket='_images',
|
|
||||||
query_args=self._qs(attributes))
|
|
||||||
return True
|
|
||||||
|
|
||||||
def register(self, context, image_location):
|
|
||||||
""" rpc call to register a new image based from a manifest """
|
|
||||||
image_id = utils.generate_uid('ami')
|
|
||||||
self._conn(context).make_request(
|
|
||||||
method='PUT',
|
|
||||||
bucket='_images',
|
|
||||||
query_args=self._qs({'image_location': image_location,
|
|
||||||
'image_id': image_id}))
|
|
||||||
return image_id
|
|
||||||
|
|
||||||
def index(self, context):
|
|
||||||
"""Return a list of all images that a user can see."""
|
|
||||||
response = self._conn(context).make_request(
|
|
||||||
method='GET',
|
|
||||||
bucket='_images')
|
|
||||||
images = json.loads(response.read())
|
|
||||||
return [map_s3_to_base(i) for i in images]
|
|
||||||
|
|
||||||
def show(self, context, image_id):
|
|
||||||
"""return a image object if the context has permissions"""
|
|
||||||
if FLAGS.connection_type == 'fake':
|
|
||||||
return {'imageId': 'bar'}
|
|
||||||
result = self.index(context)
|
|
||||||
result = [i for i in result if i['id'] == image_id]
|
|
||||||
if not result:
|
|
||||||
raise exception.NotFound(_('Image %s could not be found')
|
|
||||||
% image_id)
|
|
||||||
image = result[0]
|
|
||||||
return image
|
return image
|
||||||
|
|
||||||
def deregister(self, context, image_id):
|
def delete(self, context, image_id):
|
||||||
""" unregister an image """
|
# FIXME(vish): call to show is to check filter
|
||||||
self._conn(context).make_request(
|
self.show(context, image_id)
|
||||||
method='DELETE',
|
self.service.delete(context, image_id)
|
||||||
bucket='_images',
|
|
||||||
query_args=self._qs({'image_id': image_id}))
|
|
||||||
|
|
||||||
def _conn(self, context):
|
def update(self, context, image_id, metadata, data=None):
|
||||||
|
# FIXME(vish): call to show is to check filter
|
||||||
|
self.show(context, image_id)
|
||||||
|
image = self.service.update(context, image_id, metadata, data)
|
||||||
|
return image
|
||||||
|
|
||||||
|
def index(self, context):
|
||||||
|
images = self.service.index(context)
|
||||||
|
# FIXME(vish): index doesn't filter so we do it manually
|
||||||
|
return self._filter(context, images)
|
||||||
|
|
||||||
|
def detail(self, context):
|
||||||
|
images = self.service.detail(context)
|
||||||
|
# FIXME(vish): detail doesn't filter so we do it manually
|
||||||
|
return self._filter(context, images)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _is_visible(cls, context, image):
|
||||||
|
return (context.is_admin
|
||||||
|
or context.project_id == image['properties']['owner_id']
|
||||||
|
or image['properties']['is_public'] == 'True')
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _filter(cls, context, images):
|
||||||
|
filtered = []
|
||||||
|
for image in images:
|
||||||
|
if not cls._is_visible(context, image):
|
||||||
|
continue
|
||||||
|
filtered.append(image)
|
||||||
|
return filtered
|
||||||
|
|
||||||
|
def show(self, context, image_id):
|
||||||
|
image = self.service.show(context, image_id)
|
||||||
|
if not self._is_visible(context, image):
|
||||||
|
raise exception.NotFound
|
||||||
|
return image
|
||||||
|
|
||||||
|
def show_by_name(self, context, name):
|
||||||
|
image = self.service.show_by_name(context, name)
|
||||||
|
if not self._is_visible(context, image):
|
||||||
|
raise exception.NotFound
|
||||||
|
return image
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _conn(context):
|
||||||
|
# TODO(vish): is there a better way to get creds to sign
|
||||||
|
# for the user?
|
||||||
access = manager.AuthManager().get_access_key(context.user,
|
access = manager.AuthManager().get_access_key(context.user,
|
||||||
context.project)
|
context.project)
|
||||||
secret = str(context.user.secret)
|
secret = str(context.user.secret)
|
||||||
@@ -120,8 +119,159 @@ class S3ImageService(service.BaseImageService):
|
|||||||
port=FLAGS.s3_port,
|
port=FLAGS.s3_port,
|
||||||
host=FLAGS.s3_host)
|
host=FLAGS.s3_host)
|
||||||
|
|
||||||
def _qs(self, params):
|
@staticmethod
|
||||||
pairs = []
|
def _download_file(bucket, filename, local_dir):
|
||||||
for key in params.keys():
|
key = bucket.get_key(filename)
|
||||||
pairs.append(key + '=' + urllib.quote(params[key]))
|
local_filename = os.path.join(local_dir, filename)
|
||||||
return '&'.join(pairs)
|
key.get_contents_to_filename(local_filename)
|
||||||
|
return local_filename
|
||||||
|
|
||||||
|
def _s3_create(self, context, metadata):
|
||||||
|
"""Gets a manifext from s3 and makes an image"""
|
||||||
|
|
||||||
|
image_path = tempfile.mkdtemp(dir=FLAGS.image_decryption_dir)
|
||||||
|
|
||||||
|
image_location = metadata['properties']['image_location']
|
||||||
|
bucket_name = image_location.split("/")[0]
|
||||||
|
manifest_path = image_location[len(bucket_name) + 1:]
|
||||||
|
bucket = self._conn(context).get_bucket(bucket_name)
|
||||||
|
key = bucket.get_key(manifest_path)
|
||||||
|
manifest = key.get_contents_as_string()
|
||||||
|
|
||||||
|
manifest = ElementTree.fromstring(manifest)
|
||||||
|
image_format = 'ami'
|
||||||
|
image_type = 'machine'
|
||||||
|
|
||||||
|
try:
|
||||||
|
kernel_id = manifest.find("machine_configuration/kernel_id").text
|
||||||
|
if kernel_id == 'true':
|
||||||
|
image_format = 'aki'
|
||||||
|
image_type = 'kernel'
|
||||||
|
kernel_id = None
|
||||||
|
except Exception:
|
||||||
|
kernel_id = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
ramdisk_id = manifest.find("machine_configuration/ramdisk_id").text
|
||||||
|
if ramdisk_id == 'true':
|
||||||
|
image_format = 'ari'
|
||||||
|
image_type = 'ramdisk'
|
||||||
|
ramdisk_id = None
|
||||||
|
except Exception:
|
||||||
|
ramdisk_id = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
arch = manifest.find("machine_configuration/architecture").text
|
||||||
|
except Exception:
|
||||||
|
arch = 'x86_64'
|
||||||
|
|
||||||
|
properties = metadata['properties']
|
||||||
|
properties['owner_id'] = context.project_id
|
||||||
|
properties['architecture'] = arch
|
||||||
|
|
||||||
|
if kernel_id:
|
||||||
|
properties['kernel_id'] = ec2utils.ec2_id_to_id(kernel_id)
|
||||||
|
|
||||||
|
if ramdisk_id:
|
||||||
|
properties['ramdisk_id'] = ec2utils.ec2_id_to_id(ramdisk_id)
|
||||||
|
|
||||||
|
properties['is_public'] = False
|
||||||
|
properties['type'] = image_type
|
||||||
|
metadata.update({'disk_format': image_format,
|
||||||
|
'container_format': image_format,
|
||||||
|
'status': 'queued',
|
||||||
|
'is_public': True,
|
||||||
|
'properties': properties})
|
||||||
|
metadata['properties']['image_state'] = 'pending'
|
||||||
|
image = self.service.create(context, metadata)
|
||||||
|
image_id = image['id']
|
||||||
|
|
||||||
|
def delayed_create():
|
||||||
|
"""This handles the fetching and decrypting of the part files."""
|
||||||
|
parts = []
|
||||||
|
for fn_element in manifest.find("image").getiterator("filename"):
|
||||||
|
part = self._download_file(bucket, fn_element.text, image_path)
|
||||||
|
parts.append(part)
|
||||||
|
|
||||||
|
# NOTE(vish): this may be suboptimal, should we use cat?
|
||||||
|
encrypted_filename = os.path.join(image_path, 'image.encrypted')
|
||||||
|
with open(encrypted_filename, 'w') as combined:
|
||||||
|
for filename in parts:
|
||||||
|
with open(filename) as part:
|
||||||
|
shutil.copyfileobj(part, combined)
|
||||||
|
|
||||||
|
metadata['properties']['image_state'] = 'decrypting'
|
||||||
|
self.service.update(context, image_id, metadata)
|
||||||
|
|
||||||
|
hex_key = manifest.find("image/ec2_encrypted_key").text
|
||||||
|
encrypted_key = binascii.a2b_hex(hex_key)
|
||||||
|
hex_iv = manifest.find("image/ec2_encrypted_iv").text
|
||||||
|
encrypted_iv = binascii.a2b_hex(hex_iv)
|
||||||
|
|
||||||
|
# FIXME(vish): grab key from common service so this can run on
|
||||||
|
# any host.
|
||||||
|
cloud_pk = os.path.join(FLAGS.ca_path, "private/cakey.pem")
|
||||||
|
|
||||||
|
decrypted_filename = os.path.join(image_path, 'image.tar.gz')
|
||||||
|
self._decrypt_image(encrypted_filename, encrypted_key,
|
||||||
|
encrypted_iv, cloud_pk, decrypted_filename)
|
||||||
|
|
||||||
|
metadata['properties']['image_state'] = 'untarring'
|
||||||
|
self.service.update(context, image_id, metadata)
|
||||||
|
|
||||||
|
unz_filename = self._untarzip_image(image_path, decrypted_filename)
|
||||||
|
|
||||||
|
metadata['properties']['image_state'] = 'uploading'
|
||||||
|
with open(unz_filename) as image_file:
|
||||||
|
self.service.update(context, image_id, metadata, image_file)
|
||||||
|
metadata['properties']['image_state'] = 'available'
|
||||||
|
self.service.update(context, image_id, metadata)
|
||||||
|
|
||||||
|
shutil.rmtree(image_path)
|
||||||
|
|
||||||
|
eventlet.spawn_n(delayed_create)
|
||||||
|
|
||||||
|
return image
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _decrypt_image(encrypted_filename, encrypted_key, encrypted_iv,
|
||||||
|
cloud_private_key, decrypted_filename):
|
||||||
|
key, err = utils.execute('openssl',
|
||||||
|
'rsautl',
|
||||||
|
'-decrypt',
|
||||||
|
'-inkey', '%s' % cloud_private_key,
|
||||||
|
process_input=encrypted_key,
|
||||||
|
check_exit_code=False)
|
||||||
|
if err:
|
||||||
|
raise exception.Error(_("Failed to decrypt private key: %s")
|
||||||
|
% err)
|
||||||
|
iv, err = utils.execute('openssl',
|
||||||
|
'rsautl',
|
||||||
|
'-decrypt',
|
||||||
|
'-inkey', '%s' % cloud_private_key,
|
||||||
|
process_input=encrypted_iv,
|
||||||
|
check_exit_code=False)
|
||||||
|
if err:
|
||||||
|
raise exception.Error(_("Failed to decrypt initialization "
|
||||||
|
"vector: %s") % err)
|
||||||
|
|
||||||
|
_out, err = utils.execute('openssl', 'enc',
|
||||||
|
'-d', '-aes-128-cbc',
|
||||||
|
'-in', '%s' % (encrypted_filename,),
|
||||||
|
'-K', '%s' % (key,),
|
||||||
|
'-iv', '%s' % (iv,),
|
||||||
|
'-out', '%s' % (decrypted_filename,),
|
||||||
|
check_exit_code=False)
|
||||||
|
if err:
|
||||||
|
raise exception.Error(_("Failed to decrypt image file "
|
||||||
|
"%(image_file)s: %(err)s") %
|
||||||
|
{'image_file': encrypted_filename,
|
||||||
|
'err': err})
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _untarzip_image(path, filename):
|
||||||
|
tar_file = tarfile.open(filename, "r|gz")
|
||||||
|
tar_file.extractall(path)
|
||||||
|
image_file = tar_file.getnames()[0]
|
||||||
|
tar_file.close()
|
||||||
|
return os.path.join(path, image_file)
|
||||||
|
|||||||
@@ -56,9 +56,9 @@ class BaseImageService(object):
|
|||||||
"""
|
"""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def show(self, context, id):
|
def show(self, context, image_id):
|
||||||
"""
|
"""
|
||||||
Returns a dict containing image data for the given opaque image id.
|
Returns a dict containing image metadata for the given opaque image id.
|
||||||
|
|
||||||
:retval a mapping with the following signature:
|
:retval a mapping with the following signature:
|
||||||
|
|
||||||
@@ -76,17 +76,27 @@ class BaseImageService(object):
|
|||||||
"""
|
"""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def create(self, context, data):
|
def get(self, context, data):
|
||||||
"""
|
"""
|
||||||
Store the image data and return the new image id.
|
Returns a dict containing image metadata and writes image data to data.
|
||||||
|
|
||||||
|
:param data: a file-like object to hold binary image data
|
||||||
|
|
||||||
|
:raises NotFound if the image does not exist
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def create(self, context, metadata, data=None):
|
||||||
|
"""
|
||||||
|
Store the image metadata and data and return the new image id.
|
||||||
|
|
||||||
:raises AlreadyExists if the image already exist.
|
:raises AlreadyExists if the image already exist.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def update(self, context, image_id, data):
|
def update(self, context, image_id, metadata, data=None):
|
||||||
"""Replace the contents of the given image with the new data.
|
"""Update the given image with the new metadata and data.
|
||||||
|
|
||||||
:raises NotFound if the image does not exist.
|
:raises NotFound if the image does not exist.
|
||||||
|
|
||||||
|
|||||||
@@ -17,15 +17,17 @@
|
|||||||
Implements vlans, bridges, and iptables rules using linux utilities.
|
Implements vlans, bridges, and iptables rules using linux utilities.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import inspect
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
from eventlet import semaphore
|
||||||
|
|
||||||
from nova import db
|
from nova import db
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import log as logging
|
from nova import log as logging
|
||||||
from nova import utils
|
from nova import utils
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger("nova.linux_net")
|
LOG = logging.getLogger("nova.linux_net")
|
||||||
|
|
||||||
|
|
||||||
@@ -52,8 +54,6 @@ flags.DEFINE_string('dhcpbridge', _bin_file('nova-dhcpbridge'),
|
|||||||
'location of nova-dhcpbridge')
|
'location of nova-dhcpbridge')
|
||||||
flags.DEFINE_string('routing_source_ip', '$my_ip',
|
flags.DEFINE_string('routing_source_ip', '$my_ip',
|
||||||
'Public IP of network host')
|
'Public IP of network host')
|
||||||
flags.DEFINE_bool('use_nova_chains', False,
|
|
||||||
'use the nova_ routing chains instead of default')
|
|
||||||
flags.DEFINE_string('input_chain', 'INPUT',
|
flags.DEFINE_string('input_chain', 'INPUT',
|
||||||
'chain to add nova_input to')
|
'chain to add nova_input to')
|
||||||
|
|
||||||
@@ -63,115 +63,379 @@ flags.DEFINE_string('dmz_cidr', '10.128.0.0/24',
|
|||||||
'dmz range that should be accepted')
|
'dmz range that should be accepted')
|
||||||
|
|
||||||
|
|
||||||
|
binary_name = os.path.basename(inspect.stack()[-1][1])
|
||||||
|
|
||||||
|
|
||||||
|
class IptablesRule(object):
|
||||||
|
"""An iptables rule
|
||||||
|
|
||||||
|
You shouldn't need to use this class directly, it's only used by
|
||||||
|
IptablesManager
|
||||||
|
"""
|
||||||
|
def __init__(self, chain, rule, wrap=True, top=False):
|
||||||
|
self.chain = chain
|
||||||
|
self.rule = rule
|
||||||
|
self.wrap = wrap
|
||||||
|
self.top = top
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
return ((self.chain == other.chain) and
|
||||||
|
(self.rule == other.rule) and
|
||||||
|
(self.top == other.top) and
|
||||||
|
(self.wrap == other.wrap))
|
||||||
|
|
||||||
|
def __ne__(self, other):
|
||||||
|
return not self == other
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
if self.wrap:
|
||||||
|
chain = '%s-%s' % (binary_name, self.chain)
|
||||||
|
else:
|
||||||
|
chain = self.chain
|
||||||
|
return '-A %s %s' % (chain, self.rule)
|
||||||
|
|
||||||
|
|
||||||
|
class IptablesTable(object):
|
||||||
|
"""An iptables table"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.rules = []
|
||||||
|
self.chains = set()
|
||||||
|
self.unwrapped_chains = set()
|
||||||
|
|
||||||
|
def add_chain(self, name, wrap=True):
|
||||||
|
"""Adds a named chain to the table
|
||||||
|
|
||||||
|
The chain name is wrapped to be unique for the component creating
|
||||||
|
it, so different components of Nova can safely create identically
|
||||||
|
named chains without interfering with one another.
|
||||||
|
|
||||||
|
At the moment, its wrapped name is <binary name>-<chain name>,
|
||||||
|
so if nova-compute creates a chain named "OUTPUT", it'll actually
|
||||||
|
end up named "nova-compute-OUTPUT".
|
||||||
|
"""
|
||||||
|
if wrap:
|
||||||
|
self.chains.add(name)
|
||||||
|
else:
|
||||||
|
self.unwrapped_chains.add(name)
|
||||||
|
|
||||||
|
def remove_chain(self, name, wrap=True):
|
||||||
|
"""Remove named chain
|
||||||
|
|
||||||
|
This removal "cascades". All rule in the chain are removed, as are
|
||||||
|
all rules in other chains that jump to it.
|
||||||
|
|
||||||
|
If the chain is not found, this is merely logged.
|
||||||
|
"""
|
||||||
|
if wrap:
|
||||||
|
chain_set = self.chains
|
||||||
|
else:
|
||||||
|
chain_set = self.unwrapped_chains
|
||||||
|
|
||||||
|
if name not in chain_set:
|
||||||
|
LOG.debug(_("Attempted to remove chain %s which doesn't exist"),
|
||||||
|
name)
|
||||||
|
return
|
||||||
|
|
||||||
|
chain_set.remove(name)
|
||||||
|
self.rules = filter(lambda r: r.chain != name, self.rules)
|
||||||
|
|
||||||
|
if wrap:
|
||||||
|
jump_snippet = '-j %s-%s' % (binary_name, name)
|
||||||
|
else:
|
||||||
|
jump_snippet = '-j %s' % (name,)
|
||||||
|
|
||||||
|
self.rules = filter(lambda r: jump_snippet not in r.rule, self.rules)
|
||||||
|
|
||||||
|
def add_rule(self, chain, rule, wrap=True, top=False):
|
||||||
|
"""Add a rule to the table
|
||||||
|
|
||||||
|
This is just like what you'd feed to iptables, just without
|
||||||
|
the "-A <chain name>" bit at the start.
|
||||||
|
|
||||||
|
However, if you need to jump to one of your wrapped chains,
|
||||||
|
prepend its name with a '$' which will ensure the wrapping
|
||||||
|
is applied correctly.
|
||||||
|
"""
|
||||||
|
if wrap and chain not in self.chains:
|
||||||
|
raise ValueError(_("Unknown chain: %r") % chain)
|
||||||
|
|
||||||
|
if '$' in rule:
|
||||||
|
rule = ' '.join(map(self._wrap_target_chain, rule.split(' ')))
|
||||||
|
|
||||||
|
self.rules.append(IptablesRule(chain, rule, wrap, top))
|
||||||
|
|
||||||
|
def _wrap_target_chain(self, s):
|
||||||
|
if s.startswith('$'):
|
||||||
|
return '%s-%s' % (binary_name, s[1:])
|
||||||
|
return s
|
||||||
|
|
||||||
|
def remove_rule(self, chain, rule, wrap=True, top=False):
|
||||||
|
"""Remove a rule from a chain
|
||||||
|
|
||||||
|
Note: The rule must be exactly identical to the one that was added.
|
||||||
|
You cannot switch arguments around like you can with the iptables
|
||||||
|
CLI tool.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
self.rules.remove(IptablesRule(chain, rule, wrap, top))
|
||||||
|
except ValueError:
|
||||||
|
LOG.debug(_("Tried to remove rule that wasn't there:"
|
||||||
|
" %(chain)r %(rule)r %(wrap)r %(top)r"),
|
||||||
|
{'chain': chain, 'rule': rule,
|
||||||
|
'top': top, 'wrap': wrap})
|
||||||
|
|
||||||
|
|
||||||
|
class IptablesManager(object):
|
||||||
|
"""Wrapper for iptables
|
||||||
|
|
||||||
|
See IptablesTable for some usage docs
|
||||||
|
|
||||||
|
A number of chains are set up to begin with.
|
||||||
|
|
||||||
|
First, nova-filter-top. It's added at the top of FORWARD and OUTPUT. Its
|
||||||
|
name is not wrapped, so it's shared between the various nova workers. It's
|
||||||
|
intended for rules that need to live at the top of the FORWARD and OUTPUT
|
||||||
|
chains. It's in both the ipv4 and ipv6 set of tables.
|
||||||
|
|
||||||
|
For ipv4 and ipv6, the builtin INPUT, OUTPUT, and FORWARD filter chains are
|
||||||
|
wrapped, meaning that the "real" INPUT chain has a rule that jumps to the
|
||||||
|
wrapped INPUT chain, etc. Additionally, there's a wrapped chain named
|
||||||
|
"local" which is jumped to from nova-filter-top.
|
||||||
|
|
||||||
|
For ipv4, the builtin PREROUTING, OUTPUT, and POSTROUTING nat chains are
|
||||||
|
wrapped in the same was as the builtin filter chains. Additionally, there's
|
||||||
|
a snat chain that is applied after the POSTROUTING chain.
|
||||||
|
"""
|
||||||
|
def __init__(self, execute=None):
|
||||||
|
if not execute:
|
||||||
|
if FLAGS.fake_network:
|
||||||
|
self.execute = lambda *args, **kwargs: ('', '')
|
||||||
|
else:
|
||||||
|
self.execute = utils.execute
|
||||||
|
else:
|
||||||
|
self.execute = execute
|
||||||
|
|
||||||
|
self.ipv4 = {'filter': IptablesTable(),
|
||||||
|
'nat': IptablesTable()}
|
||||||
|
self.ipv6 = {'filter': IptablesTable()}
|
||||||
|
|
||||||
|
# Add a nova-filter-top chain. It's intended to be shared
|
||||||
|
# among the various nova components. It sits at the very top
|
||||||
|
# of FORWARD and OUTPUT.
|
||||||
|
for tables in [self.ipv4, self.ipv6]:
|
||||||
|
tables['filter'].add_chain('nova-filter-top', wrap=False)
|
||||||
|
tables['filter'].add_rule('FORWARD', '-j nova-filter-top',
|
||||||
|
wrap=False, top=True)
|
||||||
|
tables['filter'].add_rule('OUTPUT', '-j nova-filter-top',
|
||||||
|
wrap=False, top=True)
|
||||||
|
|
||||||
|
tables['filter'].add_chain('local')
|
||||||
|
tables['filter'].add_rule('nova-filter-top', '-j $local',
|
||||||
|
wrap=False)
|
||||||
|
|
||||||
|
# Wrap the builtin chains
|
||||||
|
builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD'],
|
||||||
|
'nat': ['PREROUTING', 'OUTPUT', 'POSTROUTING']},
|
||||||
|
6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}}
|
||||||
|
|
||||||
|
for ip_version in builtin_chains:
|
||||||
|
if ip_version == 4:
|
||||||
|
tables = self.ipv4
|
||||||
|
elif ip_version == 6:
|
||||||
|
tables = self.ipv6
|
||||||
|
|
||||||
|
for table, chains in builtin_chains[ip_version].iteritems():
|
||||||
|
for chain in chains:
|
||||||
|
tables[table].add_chain(chain)
|
||||||
|
tables[table].add_rule(chain, '-j $%s' % (chain,),
|
||||||
|
wrap=False)
|
||||||
|
|
||||||
|
# Add a nova-postrouting-bottom chain. It's intended to be shared
|
||||||
|
# among the various nova components. We set it as the last chain
|
||||||
|
# of POSTROUTING chain.
|
||||||
|
self.ipv4['nat'].add_chain('nova-postrouting-bottom', wrap=False)
|
||||||
|
self.ipv4['nat'].add_rule('POSTROUTING', '-j nova-postrouting-bottom',
|
||||||
|
wrap=False)
|
||||||
|
|
||||||
|
# We add a snat chain to the shared nova-postrouting-bottom chain
|
||||||
|
# so that it's applied last.
|
||||||
|
self.ipv4['nat'].add_chain('snat')
|
||||||
|
self.ipv4['nat'].add_rule('nova-postrouting-bottom', '-j $snat',
|
||||||
|
wrap=False)
|
||||||
|
|
||||||
|
# And then we add a floating-snat chain and jump to first thing in
|
||||||
|
# the snat chain.
|
||||||
|
self.ipv4['nat'].add_chain('floating-snat')
|
||||||
|
self.ipv4['nat'].add_rule('snat', '-j $floating-snat')
|
||||||
|
|
||||||
|
self.semaphore = semaphore.Semaphore()
|
||||||
|
|
||||||
|
@utils.synchronized('iptables')
|
||||||
|
def apply(self):
|
||||||
|
"""Apply the current in-memory set of iptables rules
|
||||||
|
|
||||||
|
This will blow away any rules left over from previous runs of the
|
||||||
|
same component of Nova, and replace them with our current set of
|
||||||
|
rules. This happens atomically, thanks to iptables-restore.
|
||||||
|
|
||||||
|
We wrap the call in a semaphore lock, so that we don't race with
|
||||||
|
ourselves. In the event of a race with another component running
|
||||||
|
an iptables-* command at the same time, we retry up to 5 times.
|
||||||
|
"""
|
||||||
|
with self.semaphore:
|
||||||
|
s = [('iptables', self.ipv4)]
|
||||||
|
if FLAGS.use_ipv6:
|
||||||
|
s += [('ip6tables', self.ipv6)]
|
||||||
|
|
||||||
|
for cmd, tables in s:
|
||||||
|
for table in tables:
|
||||||
|
current_table, _ = self.execute('sudo',
|
||||||
|
'%s-save' % (cmd,),
|
||||||
|
'-t', '%s' % (table,),
|
||||||
|
attempts=5)
|
||||||
|
current_lines = current_table.split('\n')
|
||||||
|
new_filter = self._modify_rules(current_lines,
|
||||||
|
tables[table])
|
||||||
|
self.execute('sudo', '%s-restore' % (cmd,),
|
||||||
|
process_input='\n'.join(new_filter),
|
||||||
|
attempts=5)
|
||||||
|
|
||||||
|
def _modify_rules(self, current_lines, table, binary=None):
|
||||||
|
unwrapped_chains = table.unwrapped_chains
|
||||||
|
chains = table.chains
|
||||||
|
rules = table.rules
|
||||||
|
|
||||||
|
# Remove any trace of our rules
|
||||||
|
new_filter = filter(lambda line: binary_name not in line,
|
||||||
|
current_lines)
|
||||||
|
|
||||||
|
seen_chains = False
|
||||||
|
rules_index = 0
|
||||||
|
for rules_index, rule in enumerate(new_filter):
|
||||||
|
if not seen_chains:
|
||||||
|
if rule.startswith(':'):
|
||||||
|
seen_chains = True
|
||||||
|
else:
|
||||||
|
if not rule.startswith(':'):
|
||||||
|
break
|
||||||
|
|
||||||
|
our_rules = []
|
||||||
|
for rule in rules:
|
||||||
|
rule_str = str(rule)
|
||||||
|
if rule.top:
|
||||||
|
# rule.top == True means we want this rule to be at the top.
|
||||||
|
# Further down, we weed out duplicates from the bottom of the
|
||||||
|
# list, so here we remove the dupes ahead of time.
|
||||||
|
new_filter = filter(lambda s: s.strip() != rule_str.strip(),
|
||||||
|
new_filter)
|
||||||
|
our_rules += [rule_str]
|
||||||
|
|
||||||
|
new_filter[rules_index:rules_index] = our_rules
|
||||||
|
|
||||||
|
new_filter[rules_index:rules_index] = [':%s - [0:0]' % \
|
||||||
|
(name,) \
|
||||||
|
for name in unwrapped_chains]
|
||||||
|
new_filter[rules_index:rules_index] = [':%s-%s - [0:0]' % \
|
||||||
|
(binary_name, name,) \
|
||||||
|
for name in chains]
|
||||||
|
|
||||||
|
seen_lines = set()
|
||||||
|
|
||||||
|
def _weed_out_duplicates(line):
|
||||||
|
line = line.strip()
|
||||||
|
if line in seen_lines:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
seen_lines.add(line)
|
||||||
|
return True
|
||||||
|
|
||||||
|
# We filter duplicates, letting the *last* occurrence take
|
||||||
|
# precendence.
|
||||||
|
new_filter.reverse()
|
||||||
|
new_filter = filter(_weed_out_duplicates, new_filter)
|
||||||
|
new_filter.reverse()
|
||||||
|
return new_filter
|
||||||
|
|
||||||
|
|
||||||
|
iptables_manager = IptablesManager()
|
||||||
|
|
||||||
|
|
||||||
def metadata_forward():
|
def metadata_forward():
|
||||||
"""Create forwarding rule for metadata"""
|
"""Create forwarding rule for metadata"""
|
||||||
_confirm_rule("PREROUTING", "-t nat -s 0.0.0.0/0 "
|
iptables_manager.ipv4['nat'].add_rule("PREROUTING",
|
||||||
"-d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT "
|
"-s 0.0.0.0/0 -d 169.254.169.254/32 "
|
||||||
"--to-destination %s:%s" % (FLAGS.ec2_dmz_host, FLAGS.ec2_port))
|
"-p tcp -m tcp --dport 80 -j DNAT "
|
||||||
|
"--to-destination %s:%s" % \
|
||||||
|
(FLAGS.ec2_dmz_host, FLAGS.ec2_port))
|
||||||
|
iptables_manager.apply()
|
||||||
|
|
||||||
|
|
||||||
def init_host():
|
def init_host():
|
||||||
"""Basic networking setup goes here"""
|
"""Basic networking setup goes here"""
|
||||||
|
|
||||||
if FLAGS.use_nova_chains:
|
|
||||||
_execute("sudo iptables -N nova_input", check_exit_code=False)
|
|
||||||
_execute("sudo iptables -D %s -j nova_input" % FLAGS.input_chain,
|
|
||||||
check_exit_code=False)
|
|
||||||
_execute("sudo iptables -A %s -j nova_input" % FLAGS.input_chain)
|
|
||||||
|
|
||||||
_execute("sudo iptables -N nova_forward", check_exit_code=False)
|
|
||||||
_execute("sudo iptables -D FORWARD -j nova_forward",
|
|
||||||
check_exit_code=False)
|
|
||||||
_execute("sudo iptables -A FORWARD -j nova_forward")
|
|
||||||
|
|
||||||
_execute("sudo iptables -N nova_output", check_exit_code=False)
|
|
||||||
_execute("sudo iptables -D OUTPUT -j nova_output",
|
|
||||||
check_exit_code=False)
|
|
||||||
_execute("sudo iptables -A OUTPUT -j nova_output")
|
|
||||||
|
|
||||||
_execute("sudo iptables -t nat -N nova_prerouting",
|
|
||||||
check_exit_code=False)
|
|
||||||
_execute("sudo iptables -t nat -D PREROUTING -j nova_prerouting",
|
|
||||||
check_exit_code=False)
|
|
||||||
_execute("sudo iptables -t nat -A PREROUTING -j nova_prerouting")
|
|
||||||
|
|
||||||
_execute("sudo iptables -t nat -N nova_postrouting",
|
|
||||||
check_exit_code=False)
|
|
||||||
_execute("sudo iptables -t nat -D POSTROUTING -j nova_postrouting",
|
|
||||||
check_exit_code=False)
|
|
||||||
_execute("sudo iptables -t nat -A POSTROUTING -j nova_postrouting")
|
|
||||||
|
|
||||||
_execute("sudo iptables -t nat -N nova_snatting",
|
|
||||||
check_exit_code=False)
|
|
||||||
_execute("sudo iptables -t nat -D POSTROUTING -j nova_snatting",
|
|
||||||
check_exit_code=False)
|
|
||||||
_execute("sudo iptables -t nat -A POSTROUTING -j nova_snatting")
|
|
||||||
|
|
||||||
_execute("sudo iptables -t nat -N nova_output", check_exit_code=False)
|
|
||||||
_execute("sudo iptables -t nat -D OUTPUT -j nova_output",
|
|
||||||
check_exit_code=False)
|
|
||||||
_execute("sudo iptables -t nat -A OUTPUT -j nova_output")
|
|
||||||
else:
|
|
||||||
# NOTE(vish): This makes it easy to ensure snatting rules always
|
|
||||||
# come after the accept rules in the postrouting chain
|
|
||||||
_execute("sudo iptables -t nat -N SNATTING",
|
|
||||||
check_exit_code=False)
|
|
||||||
_execute("sudo iptables -t nat -D POSTROUTING -j SNATTING",
|
|
||||||
check_exit_code=False)
|
|
||||||
_execute("sudo iptables -t nat -A POSTROUTING -j SNATTING")
|
|
||||||
|
|
||||||
# NOTE(devcamcar): Cloud public SNAT entries and the default
|
# NOTE(devcamcar): Cloud public SNAT entries and the default
|
||||||
# SNAT rule for outbound traffic.
|
# SNAT rule for outbound traffic.
|
||||||
_confirm_rule("SNATTING", "-t nat -s %s "
|
iptables_manager.ipv4['nat'].add_rule("snat",
|
||||||
"-j SNAT --to-source %s"
|
"-s %s -j SNAT --to-source %s" % \
|
||||||
% (FLAGS.fixed_range, FLAGS.routing_source_ip), append=True)
|
(FLAGS.fixed_range,
|
||||||
|
FLAGS.routing_source_ip))
|
||||||
|
|
||||||
_confirm_rule("POSTROUTING", "-t nat -s %s -d %s -j ACCEPT" %
|
iptables_manager.ipv4['nat'].add_rule("POSTROUTING",
|
||||||
(FLAGS.fixed_range, FLAGS.dmz_cidr))
|
"-s %s -d %s -j ACCEPT" % \
|
||||||
_confirm_rule("POSTROUTING", "-t nat -s %(range)s -d %(range)s -j ACCEPT" %
|
(FLAGS.fixed_range, FLAGS.dmz_cidr))
|
||||||
{'range': FLAGS.fixed_range})
|
|
||||||
|
iptables_manager.ipv4['nat'].add_rule("POSTROUTING",
|
||||||
|
"-s %(range)s -d %(range)s "
|
||||||
|
"-j ACCEPT" % \
|
||||||
|
{'range': FLAGS.fixed_range})
|
||||||
|
iptables_manager.apply()
|
||||||
|
|
||||||
|
|
||||||
def bind_floating_ip(floating_ip, check_exit_code=True):
|
def bind_floating_ip(floating_ip, check_exit_code=True):
|
||||||
"""Bind ip to public interface"""
|
"""Bind ip to public interface"""
|
||||||
_execute("sudo ip addr add %s dev %s" % (floating_ip,
|
_execute('sudo', 'ip', 'addr', 'add', floating_ip,
|
||||||
FLAGS.public_interface),
|
'dev', FLAGS.public_interface,
|
||||||
check_exit_code=check_exit_code)
|
check_exit_code=check_exit_code)
|
||||||
|
|
||||||
|
|
||||||
def unbind_floating_ip(floating_ip):
|
def unbind_floating_ip(floating_ip):
|
||||||
"""Unbind a public ip from public interface"""
|
"""Unbind a public ip from public interface"""
|
||||||
_execute("sudo ip addr del %s dev %s" % (floating_ip,
|
_execute('sudo', 'ip', 'addr', 'del', floating_ip,
|
||||||
FLAGS.public_interface))
|
'dev', FLAGS.public_interface)
|
||||||
|
|
||||||
|
|
||||||
def ensure_vlan_forward(public_ip, port, private_ip):
|
def ensure_vlan_forward(public_ip, port, private_ip):
|
||||||
"""Sets up forwarding rules for vlan"""
|
"""Sets up forwarding rules for vlan"""
|
||||||
_confirm_rule("FORWARD", "-d %s -p udp --dport 1194 -j ACCEPT" %
|
iptables_manager.ipv4['filter'].add_rule("FORWARD",
|
||||||
private_ip)
|
"-d %s -p udp "
|
||||||
_confirm_rule("PREROUTING",
|
"--dport 1194 "
|
||||||
"-t nat -d %s -p udp --dport %s -j DNAT --to %s:1194"
|
"-j ACCEPT" % private_ip)
|
||||||
% (public_ip, port, private_ip))
|
iptables_manager.ipv4['nat'].add_rule("PREROUTING",
|
||||||
|
"-d %s -p udp "
|
||||||
|
"--dport %s -j DNAT --to %s:1194" %
|
||||||
|
(public_ip, port, private_ip))
|
||||||
|
iptables_manager.apply()
|
||||||
|
|
||||||
|
|
||||||
def ensure_floating_forward(floating_ip, fixed_ip):
|
def ensure_floating_forward(floating_ip, fixed_ip):
|
||||||
"""Ensure floating ip forwarding rule"""
|
"""Ensure floating ip forwarding rule"""
|
||||||
_confirm_rule("PREROUTING", "-t nat -d %s -j DNAT --to %s"
|
for chain, rule in floating_forward_rules(floating_ip, fixed_ip):
|
||||||
% (floating_ip, fixed_ip))
|
iptables_manager.ipv4['nat'].add_rule(chain, rule)
|
||||||
_confirm_rule("OUTPUT", "-t nat -d %s -j DNAT --to %s"
|
iptables_manager.apply()
|
||||||
% (floating_ip, fixed_ip))
|
|
||||||
_confirm_rule("SNATTING", "-t nat -s %s -j SNAT --to %s"
|
|
||||||
% (fixed_ip, floating_ip))
|
|
||||||
|
|
||||||
|
|
||||||
def remove_floating_forward(floating_ip, fixed_ip):
|
def remove_floating_forward(floating_ip, fixed_ip):
|
||||||
"""Remove forwarding for floating ip"""
|
"""Remove forwarding for floating ip"""
|
||||||
_remove_rule("PREROUTING", "-t nat -d %s -j DNAT --to %s"
|
for chain, rule in floating_forward_rules(floating_ip, fixed_ip):
|
||||||
% (floating_ip, fixed_ip))
|
iptables_manager.ipv4['nat'].remove_rule(chain, rule)
|
||||||
_remove_rule("OUTPUT", "-t nat -d %s -j DNAT --to %s"
|
iptables_manager.apply()
|
||||||
% (floating_ip, fixed_ip))
|
|
||||||
_remove_rule("SNATTING", "-t nat -s %s -j SNAT --to %s"
|
|
||||||
% (fixed_ip, floating_ip))
|
def floating_forward_rules(floating_ip, fixed_ip):
|
||||||
|
return [("PREROUTING", "-d %s -j DNAT --to %s" % (floating_ip, fixed_ip)),
|
||||||
|
("OUTPUT", "-d %s -j DNAT --to %s" % (floating_ip, fixed_ip)),
|
||||||
|
("floating-snat",
|
||||||
|
"-s %s -j SNAT --to %s" % (fixed_ip, floating_ip))]
|
||||||
|
|
||||||
|
|
||||||
def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
|
def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
|
||||||
@@ -185,9 +449,9 @@ def ensure_vlan(vlan_num):
|
|||||||
interface = "vlan%s" % vlan_num
|
interface = "vlan%s" % vlan_num
|
||||||
if not _device_exists(interface):
|
if not _device_exists(interface):
|
||||||
LOG.debug(_("Starting VLAN inteface %s"), interface)
|
LOG.debug(_("Starting VLAN inteface %s"), interface)
|
||||||
_execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD")
|
_execute('sudo', 'vconfig', 'set_name_type', 'VLAN_PLUS_VID_NO_PAD')
|
||||||
_execute("sudo vconfig add %s %s" % (FLAGS.vlan_interface, vlan_num))
|
_execute('sudo', 'vconfig', 'add', FLAGS.vlan_interface, vlan_num)
|
||||||
_execute("sudo ip link set %s up" % interface)
|
_execute('sudo', 'ip', 'link', 'set', interface, 'up')
|
||||||
return interface
|
return interface
|
||||||
|
|
||||||
|
|
||||||
@@ -206,71 +470,69 @@ def ensure_bridge(bridge, interface, net_attrs=None):
|
|||||||
"""
|
"""
|
||||||
if not _device_exists(bridge):
|
if not _device_exists(bridge):
|
||||||
LOG.debug(_("Starting Bridge interface for %s"), interface)
|
LOG.debug(_("Starting Bridge interface for %s"), interface)
|
||||||
_execute("sudo brctl addbr %s" % bridge)
|
_execute('sudo', 'brctl', 'addbr', bridge)
|
||||||
_execute("sudo brctl setfd %s 0" % bridge)
|
_execute('sudo', 'brctl', 'setfd', bridge, 0)
|
||||||
# _execute("sudo brctl setageing %s 10" % bridge)
|
# _execute("sudo brctl setageing %s 10" % bridge)
|
||||||
_execute("sudo brctl stp %s off" % bridge)
|
_execute('sudo', 'brctl', 'stp', bridge, 'off')
|
||||||
_execute("sudo ip link set %s up" % bridge)
|
_execute('sudo', 'ip', 'link', 'set', bridge, 'up')
|
||||||
if net_attrs:
|
if net_attrs:
|
||||||
# NOTE(vish): The ip for dnsmasq has to be the first address on the
|
# NOTE(vish): The ip for dnsmasq has to be the first address on the
|
||||||
# bridge for it to respond to reqests properly
|
# bridge for it to respond to reqests properly
|
||||||
suffix = net_attrs['cidr'].rpartition('/')[2]
|
suffix = net_attrs['cidr'].rpartition('/')[2]
|
||||||
out, err = _execute("sudo ip addr add %s/%s brd %s dev %s" %
|
out, err = _execute('sudo', 'ip', 'addr', 'add',
|
||||||
(net_attrs['gateway'],
|
"%s/%s" %
|
||||||
suffix,
|
(net_attrs['gateway'], suffix),
|
||||||
net_attrs['broadcast'],
|
'brd',
|
||||||
bridge),
|
net_attrs['broadcast'],
|
||||||
|
'dev',
|
||||||
|
bridge,
|
||||||
check_exit_code=False)
|
check_exit_code=False)
|
||||||
if err and err != "RTNETLINK answers: File exists\n":
|
if err and err != "RTNETLINK answers: File exists\n":
|
||||||
raise exception.Error("Failed to add ip: %s" % err)
|
raise exception.Error("Failed to add ip: %s" % err)
|
||||||
if(FLAGS.use_ipv6):
|
if(FLAGS.use_ipv6):
|
||||||
_execute("sudo ip -f inet6 addr change %s dev %s" %
|
_execute('sudo', 'ip', '-f', 'inet6', 'addr',
|
||||||
(net_attrs['cidr_v6'], bridge))
|
'change', net_attrs['cidr_v6'],
|
||||||
|
'dev', bridge)
|
||||||
# NOTE(vish): If the public interface is the same as the
|
# NOTE(vish): If the public interface is the same as the
|
||||||
# bridge, then the bridge has to be in promiscuous
|
# bridge, then the bridge has to be in promiscuous
|
||||||
# to forward packets properly.
|
# to forward packets properly.
|
||||||
if(FLAGS.public_interface == bridge):
|
if(FLAGS.public_interface == bridge):
|
||||||
_execute("sudo ip link set dev %s promisc on" % bridge)
|
_execute('sudo', 'ip', 'link', 'set',
|
||||||
|
'dev', bridge, 'promisc', 'on')
|
||||||
if interface:
|
if interface:
|
||||||
# NOTE(vish): This will break if there is already an ip on the
|
# NOTE(vish): This will break if there is already an ip on the
|
||||||
# interface, so we move any ips to the bridge
|
# interface, so we move any ips to the bridge
|
||||||
gateway = None
|
gateway = None
|
||||||
out, err = _execute("sudo route -n")
|
out, err = _execute('sudo', 'route', '-n')
|
||||||
for line in out.split("\n"):
|
for line in out.split("\n"):
|
||||||
fields = line.split()
|
fields = line.split()
|
||||||
if fields and fields[0] == "0.0.0.0" and fields[-1] == interface:
|
if fields and fields[0] == "0.0.0.0" and fields[-1] == interface:
|
||||||
gateway = fields[1]
|
gateway = fields[1]
|
||||||
out, err = _execute("sudo ip addr show dev %s scope global" %
|
out, err = _execute('sudo', 'ip', 'addr', 'show', 'dev', interface,
|
||||||
interface)
|
'scope', 'global')
|
||||||
for line in out.split("\n"):
|
for line in out.split("\n"):
|
||||||
fields = line.split()
|
fields = line.split()
|
||||||
if fields and fields[0] == "inet":
|
if fields and fields[0] == "inet":
|
||||||
params = ' '.join(fields[1:-1])
|
params = ' '.join(fields[1:-1])
|
||||||
_execute("sudo ip addr del %s dev %s" % (params, fields[-1]))
|
_execute('sudo', 'ip', 'addr',
|
||||||
_execute("sudo ip addr add %s dev %s" % (params, bridge))
|
'del', params, 'dev', fields[-1])
|
||||||
|
_execute('sudo', 'ip', 'addr',
|
||||||
|
'add', params, 'dev', bridge)
|
||||||
if gateway:
|
if gateway:
|
||||||
_execute("sudo route add 0.0.0.0 gw %s" % gateway)
|
_execute('sudo', 'route', 'add', '0.0.0.0', 'gw', gateway)
|
||||||
out, err = _execute("sudo brctl addif %s %s" %
|
out, err = _execute('sudo', 'brctl', 'addif', bridge, interface,
|
||||||
(bridge, interface),
|
|
||||||
check_exit_code=False)
|
check_exit_code=False)
|
||||||
|
|
||||||
if (err and err != "device %s is already a member of a bridge; can't "
|
if (err and err != "device %s is already a member of a bridge; can't "
|
||||||
"enslave it to bridge %s.\n" % (interface, bridge)):
|
"enslave it to bridge %s.\n" % (interface, bridge)):
|
||||||
raise exception.Error("Failed to add interface: %s" % err)
|
raise exception.Error("Failed to add interface: %s" % err)
|
||||||
|
|
||||||
if FLAGS.use_nova_chains:
|
iptables_manager.ipv4['filter'].add_rule("FORWARD",
|
||||||
(out, err) = _execute("sudo iptables -N nova_forward",
|
"--in-interface %s -j ACCEPT" % \
|
||||||
check_exit_code=False)
|
bridge)
|
||||||
if err != 'iptables: Chain already exists.\n':
|
iptables_manager.ipv4['filter'].add_rule("FORWARD",
|
||||||
# NOTE(vish): chain didn't exist link chain
|
"--out-interface %s -j ACCEPT" % \
|
||||||
_execute("sudo iptables -D FORWARD -j nova_forward",
|
bridge)
|
||||||
check_exit_code=False)
|
|
||||||
_execute("sudo iptables -A FORWARD -j nova_forward")
|
|
||||||
|
|
||||||
_confirm_rule("FORWARD", "--in-interface %s -j ACCEPT" % bridge)
|
|
||||||
_confirm_rule("FORWARD", "--out-interface %s -j ACCEPT" % bridge)
|
|
||||||
_execute("sudo iptables -N nova-local", check_exit_code=False)
|
|
||||||
_confirm_rule("FORWARD", "-j nova-local")
|
|
||||||
|
|
||||||
|
|
||||||
def get_dhcp_hosts(context, network_id):
|
def get_dhcp_hosts(context, network_id):
|
||||||
@@ -304,11 +566,11 @@ def update_dhcp(context, network_id):
|
|||||||
|
|
||||||
# if dnsmasq is already running, then tell it to reload
|
# if dnsmasq is already running, then tell it to reload
|
||||||
if pid:
|
if pid:
|
||||||
out, _err = _execute('cat /proc/%d/cmdline' % pid,
|
out, _err = _execute('cat', "/proc/%d/cmdline" % pid,
|
||||||
check_exit_code=False)
|
check_exit_code=False)
|
||||||
if conffile in out:
|
if conffile in out:
|
||||||
try:
|
try:
|
||||||
_execute('sudo kill -HUP %d' % pid)
|
_execute('sudo', 'kill', '-HUP', pid)
|
||||||
return
|
return
|
||||||
except Exception as exc: # pylint: disable-msg=W0703
|
except Exception as exc: # pylint: disable-msg=W0703
|
||||||
LOG.debug(_("Hupping dnsmasq threw %s"), exc)
|
LOG.debug(_("Hupping dnsmasq threw %s"), exc)
|
||||||
@@ -319,7 +581,7 @@ def update_dhcp(context, network_id):
|
|||||||
env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile,
|
env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile,
|
||||||
'DNSMASQ_INTERFACE': network_ref['bridge']}
|
'DNSMASQ_INTERFACE': network_ref['bridge']}
|
||||||
command = _dnsmasq_cmd(network_ref)
|
command = _dnsmasq_cmd(network_ref)
|
||||||
_execute(command, addl_env=env)
|
_execute(*command, addl_env=env)
|
||||||
|
|
||||||
|
|
||||||
def update_ra(context, network_id):
|
def update_ra(context, network_id):
|
||||||
@@ -349,17 +611,17 @@ interface %s
|
|||||||
|
|
||||||
# if radvd is already running, then tell it to reload
|
# if radvd is already running, then tell it to reload
|
||||||
if pid:
|
if pid:
|
||||||
out, _err = _execute('cat /proc/%d/cmdline'
|
out, _err = _execute('cat', '/proc/%d/cmdline'
|
||||||
% pid, check_exit_code=False)
|
% pid, check_exit_code=False)
|
||||||
if conffile in out:
|
if conffile in out:
|
||||||
try:
|
try:
|
||||||
_execute('sudo kill %d' % pid)
|
_execute('sudo', 'kill', pid)
|
||||||
except Exception as exc: # pylint: disable-msg=W0703
|
except Exception as exc: # pylint: disable-msg=W0703
|
||||||
LOG.debug(_("killing radvd threw %s"), exc)
|
LOG.debug(_("killing radvd threw %s"), exc)
|
||||||
else:
|
else:
|
||||||
LOG.debug(_("Pid %d is stale, relaunching radvd"), pid)
|
LOG.debug(_("Pid %d is stale, relaunching radvd"), pid)
|
||||||
command = _ra_cmd(network_ref)
|
command = _ra_cmd(network_ref)
|
||||||
_execute(command)
|
_execute(*command)
|
||||||
db.network_update(context, network_id,
|
db.network_update(context, network_id,
|
||||||
{"ra_server":
|
{"ra_server":
|
||||||
utils.get_my_linklocal(network_ref['bridge'])})
|
utils.get_my_linklocal(network_ref['bridge'])})
|
||||||
@@ -374,68 +636,48 @@ def _host_dhcp(fixed_ip_ref):
|
|||||||
fixed_ip_ref['address'])
|
fixed_ip_ref['address'])
|
||||||
|
|
||||||
|
|
||||||
def _execute(cmd, *args, **kwargs):
|
def _execute(*cmd, **kwargs):
|
||||||
"""Wrapper around utils._execute for fake_network"""
|
"""Wrapper around utils._execute for fake_network"""
|
||||||
if FLAGS.fake_network:
|
if FLAGS.fake_network:
|
||||||
LOG.debug("FAKE NET: %s", cmd)
|
LOG.debug("FAKE NET: %s", " ".join(map(str, cmd)))
|
||||||
return "fake", 0
|
return "fake", 0
|
||||||
else:
|
else:
|
||||||
return utils.execute(cmd, *args, **kwargs)
|
return utils.execute(*cmd, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
def _device_exists(device):
|
def _device_exists(device):
|
||||||
"""Check if ethernet device exists"""
|
"""Check if ethernet device exists"""
|
||||||
(_out, err) = _execute("ip link show dev %s" % device,
|
(_out, err) = _execute('ip', 'link', 'show', 'dev', device,
|
||||||
check_exit_code=False)
|
check_exit_code=False)
|
||||||
return not err
|
return not err
|
||||||
|
|
||||||
|
|
||||||
def _confirm_rule(chain, cmd, append=False):
|
|
||||||
"""Delete and re-add iptables rule"""
|
|
||||||
if FLAGS.use_nova_chains:
|
|
||||||
chain = "nova_%s" % chain.lower()
|
|
||||||
if append:
|
|
||||||
loc = "-A"
|
|
||||||
else:
|
|
||||||
loc = "-I"
|
|
||||||
_execute("sudo iptables --delete %s %s" % (chain, cmd),
|
|
||||||
check_exit_code=False)
|
|
||||||
_execute("sudo iptables %s %s %s" % (loc, chain, cmd))
|
|
||||||
|
|
||||||
|
|
||||||
def _remove_rule(chain, cmd):
|
|
||||||
"""Remove iptables rule"""
|
|
||||||
if FLAGS.use_nova_chains:
|
|
||||||
chain = "%s" % chain.lower()
|
|
||||||
_execute("sudo iptables --delete %s %s" % (chain, cmd))
|
|
||||||
|
|
||||||
|
|
||||||
def _dnsmasq_cmd(net):
|
def _dnsmasq_cmd(net):
|
||||||
"""Builds dnsmasq command"""
|
"""Builds dnsmasq command"""
|
||||||
cmd = ['sudo -E dnsmasq',
|
cmd = ['sudo', '-E', 'dnsmasq',
|
||||||
' --strict-order',
|
'--strict-order',
|
||||||
' --bind-interfaces',
|
'--bind-interfaces',
|
||||||
' --conf-file=',
|
'--conf-file=',
|
||||||
' --domain=%s' % FLAGS.dhcp_domain,
|
'--domain=%s' % FLAGS.dhcp_domain,
|
||||||
' --pid-file=%s' % _dhcp_file(net['bridge'], 'pid'),
|
'--pid-file=%s' % _dhcp_file(net['bridge'], 'pid'),
|
||||||
' --listen-address=%s' % net['gateway'],
|
'--listen-address=%s' % net['gateway'],
|
||||||
' --except-interface=lo',
|
'--except-interface=lo',
|
||||||
' --dhcp-range=%s,static,120s' % net['dhcp_start'],
|
'--dhcp-range=%s,static,120s' % net['dhcp_start'],
|
||||||
' --dhcp-hostsfile=%s' % _dhcp_file(net['bridge'], 'conf'),
|
'--dhcp-hostsfile=%s' % _dhcp_file(net['bridge'], 'conf'),
|
||||||
' --dhcp-script=%s' % FLAGS.dhcpbridge,
|
'--dhcp-script=%s' % FLAGS.dhcpbridge,
|
||||||
' --leasefile-ro']
|
'--leasefile-ro']
|
||||||
if FLAGS.dns_server:
|
if FLAGS.dns_server:
|
||||||
cmd.append(' -h -R --server=%s' % FLAGS.dns_server)
|
cmd += ['-h', '-R', '--server=%s' % FLAGS.dns_server]
|
||||||
return ''.join(cmd)
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
def _ra_cmd(net):
|
def _ra_cmd(net):
|
||||||
"""Builds radvd command"""
|
"""Builds radvd command"""
|
||||||
cmd = ['sudo -E radvd',
|
cmd = ['sudo', '-E', 'radvd',
|
||||||
# ' -u nobody',
|
# '-u', 'nobody',
|
||||||
' -C %s' % _ra_file(net['bridge'], 'conf'),
|
'-C', '%s' % _ra_file(net['bridge'], 'conf'),
|
||||||
' -p %s' % _ra_file(net['bridge'], 'pid')]
|
'-p', '%s' % _ra_file(net['bridge'], 'pid')]
|
||||||
return ''.join(cmd)
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
def _stop_dnsmasq(network):
|
def _stop_dnsmasq(network):
|
||||||
@@ -444,7 +686,7 @@ def _stop_dnsmasq(network):
|
|||||||
|
|
||||||
if pid:
|
if pid:
|
||||||
try:
|
try:
|
||||||
_execute('sudo kill -TERM %d' % pid)
|
_execute('sudo', 'kill', '-TERM', pid)
|
||||||
except Exception as exc: # pylint: disable-msg=W0703
|
except Exception as exc: # pylint: disable-msg=W0703
|
||||||
LOG.debug(_("Killing dnsmasq threw %s"), exc)
|
LOG.debug(_("Killing dnsmasq threw %s"), exc)
|
||||||
|
|
||||||
|
|||||||
@@ -563,6 +563,16 @@ class VlanManager(NetworkManager):
|
|||||||
# NOTE(vish): This makes ports unique accross the cloud, a more
|
# NOTE(vish): This makes ports unique accross the cloud, a more
|
||||||
# robust solution would be to make them unique per ip
|
# robust solution would be to make them unique per ip
|
||||||
net['vpn_public_port'] = vpn_start + index
|
net['vpn_public_port'] = vpn_start + index
|
||||||
|
network_ref = None
|
||||||
|
try:
|
||||||
|
network_ref = db.network_get_by_cidr(context, cidr)
|
||||||
|
except exception.NotFound:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if network_ref is not None:
|
||||||
|
raise ValueError(_('Network with cidr %s already exists' %
|
||||||
|
cidr))
|
||||||
|
|
||||||
network_ref = self.db.network_create_safe(context, net)
|
network_ref = self.db.network_create_safe(context, net)
|
||||||
if network_ref:
|
if network_ref:
|
||||||
self._create_fixed_ips(context, network_ref['id'])
|
self._create_fixed_ips(context, network_ref['id'])
|
||||||
|
|||||||
@@ -37,8 +37,7 @@ from nova.objectstore import bucket
|
|||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
flags.DEFINE_string('images_path', '$state_path/images',
|
flags.DECLARE('images_path', 'nova.image.local')
|
||||||
'path to decrypted images')
|
|
||||||
|
|
||||||
|
|
||||||
class Image(object):
|
class Image(object):
|
||||||
@@ -254,25 +253,34 @@ class Image(object):
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def decrypt_image(encrypted_filename, encrypted_key, encrypted_iv,
|
def decrypt_image(encrypted_filename, encrypted_key, encrypted_iv,
|
||||||
cloud_private_key, decrypted_filename):
|
cloud_private_key, decrypted_filename):
|
||||||
key, err = utils.execute(
|
key, err = utils.execute('openssl',
|
||||||
'openssl rsautl -decrypt -inkey %s' % cloud_private_key,
|
'rsautl',
|
||||||
process_input=encrypted_key,
|
'-decrypt',
|
||||||
check_exit_code=False)
|
'-inkey', '%s' % cloud_private_key,
|
||||||
|
process_input=encrypted_key,
|
||||||
|
check_exit_code=False)
|
||||||
if err:
|
if err:
|
||||||
raise exception.Error(_("Failed to decrypt private key: %s")
|
raise exception.Error(_("Failed to decrypt private key: %s")
|
||||||
% err)
|
% err)
|
||||||
iv, err = utils.execute(
|
iv, err = utils.execute('openssl',
|
||||||
'openssl rsautl -decrypt -inkey %s' % cloud_private_key,
|
'rsautl',
|
||||||
process_input=encrypted_iv,
|
'-decrypt',
|
||||||
check_exit_code=False)
|
'-inkey', '%s' % cloud_private_key,
|
||||||
|
process_input=encrypted_iv,
|
||||||
|
check_exit_code=False)
|
||||||
if err:
|
if err:
|
||||||
raise exception.Error(_("Failed to decrypt initialization "
|
raise exception.Error(_("Failed to decrypt initialization "
|
||||||
"vector: %s") % err)
|
"vector: %s") % err)
|
||||||
|
|
||||||
_out, err = utils.execute(
|
_out, err = utils.execute('openssl',
|
||||||
'openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s'
|
'enc',
|
||||||
% (encrypted_filename, key, iv, decrypted_filename),
|
'-d',
|
||||||
check_exit_code=False)
|
'-aes-128-cbc',
|
||||||
|
'-in', '%s' % (encrypted_filename,),
|
||||||
|
'-K', '%s' % (key,),
|
||||||
|
'-iv', '%s' % (iv,),
|
||||||
|
'-out', '%s' % (decrypted_filename,),
|
||||||
|
check_exit_code=False)
|
||||||
if err:
|
if err:
|
||||||
raise exception.Error(_("Failed to decrypt image file "
|
raise exception.Error(_("Failed to decrypt image file "
|
||||||
"%(image_file)s: %(err)s") %
|
"%(image_file)s: %(err)s") %
|
||||||
|
|||||||
49
nova/scheduler/api.py
Normal file
49
nova/scheduler/api.py
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
# Copyright (c) 2011 Openstack, LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Handles all requests relating to schedulers.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from nova import flags
|
||||||
|
from nova import log as logging
|
||||||
|
from nova import rpc
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
LOG = logging.getLogger('nova.scheduler.api')
|
||||||
|
|
||||||
|
|
||||||
|
class API(object):
|
||||||
|
"""API for interacting with the scheduler."""
|
||||||
|
|
||||||
|
def _call_scheduler(self, method, context, params=None):
|
||||||
|
"""Generic handler for RPC calls to the scheduler.
|
||||||
|
|
||||||
|
:param params: Optional dictionary of arguments to be passed to the
|
||||||
|
scheduler worker
|
||||||
|
|
||||||
|
:retval: Result returned by scheduler worker
|
||||||
|
"""
|
||||||
|
if not params:
|
||||||
|
params = {}
|
||||||
|
queue = FLAGS.scheduler_topic
|
||||||
|
kwargs = {'method': method, 'args': params}
|
||||||
|
return rpc.call(context, queue, kwargs)
|
||||||
|
|
||||||
|
def get_zone_list(self, context):
|
||||||
|
items = self._call_scheduler('get_zone_list', context)
|
||||||
|
for item in items:
|
||||||
|
item['api_url'] = item['api_url'].replace('\\/', '/')
|
||||||
|
return items
|
||||||
@@ -29,6 +29,7 @@ from nova import log as logging
|
|||||||
from nova import manager
|
from nova import manager
|
||||||
from nova import rpc
|
from nova import rpc
|
||||||
from nova import utils
|
from nova import utils
|
||||||
|
from nova.scheduler import zone_manager
|
||||||
|
|
||||||
LOG = logging.getLogger('nova.scheduler.manager')
|
LOG = logging.getLogger('nova.scheduler.manager')
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
@@ -43,12 +44,21 @@ class SchedulerManager(manager.Manager):
|
|||||||
if not scheduler_driver:
|
if not scheduler_driver:
|
||||||
scheduler_driver = FLAGS.scheduler_driver
|
scheduler_driver = FLAGS.scheduler_driver
|
||||||
self.driver = utils.import_object(scheduler_driver)
|
self.driver = utils.import_object(scheduler_driver)
|
||||||
|
self.zone_manager = zone_manager.ZoneManager()
|
||||||
super(SchedulerManager, self).__init__(*args, **kwargs)
|
super(SchedulerManager, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
def __getattr__(self, key):
|
def __getattr__(self, key):
|
||||||
"""Converts all method calls to use the schedule method"""
|
"""Converts all method calls to use the schedule method"""
|
||||||
return functools.partial(self._schedule, key)
|
return functools.partial(self._schedule, key)
|
||||||
|
|
||||||
|
def periodic_tasks(self, context=None):
|
||||||
|
"""Poll child zones periodically to get status."""
|
||||||
|
self.zone_manager.ping(context)
|
||||||
|
|
||||||
|
def get_zone_list(self, context=None):
|
||||||
|
"""Get a list of zones from the ZoneManager."""
|
||||||
|
return self.zone_manager.get_zone_list()
|
||||||
|
|
||||||
def _schedule(self, method, context, topic, *args, **kwargs):
|
def _schedule(self, method, context, topic, *args, **kwargs):
|
||||||
"""Tries to call schedule_* method on the driver to retrieve host.
|
"""Tries to call schedule_* method on the driver to retrieve host.
|
||||||
|
|
||||||
|
|||||||
143
nova/scheduler/zone_manager.py
Normal file
143
nova/scheduler/zone_manager.py
Normal file
@@ -0,0 +1,143 @@
|
|||||||
|
# Copyright (c) 2011 Openstack, LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
ZoneManager oversees all communications with child Zones.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import novaclient
|
||||||
|
import thread
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
from datetime import datetime
|
||||||
|
from eventlet import greenpool
|
||||||
|
|
||||||
|
from nova import db
|
||||||
|
from nova import flags
|
||||||
|
from nova import log as logging
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
flags.DEFINE_integer('zone_db_check_interval', 60,
|
||||||
|
'Seconds between getting fresh zone info from db.')
|
||||||
|
flags.DEFINE_integer('zone_failures_to_offline', 3,
|
||||||
|
'Number of consecutive errors before marking zone offline')
|
||||||
|
|
||||||
|
|
||||||
|
class ZoneState(object):
|
||||||
|
"""Holds the state of all connected child zones."""
|
||||||
|
def __init__(self):
|
||||||
|
self.is_active = True
|
||||||
|
self.name = None
|
||||||
|
self.capabilities = None
|
||||||
|
self.attempt = 0
|
||||||
|
self.last_seen = datetime.min
|
||||||
|
self.last_exception = None
|
||||||
|
self.last_exception_time = None
|
||||||
|
|
||||||
|
def update_credentials(self, zone):
|
||||||
|
"""Update zone credentials from db"""
|
||||||
|
self.zone_id = zone.id
|
||||||
|
self.api_url = zone.api_url
|
||||||
|
self.username = zone.username
|
||||||
|
self.password = zone.password
|
||||||
|
|
||||||
|
def update_metadata(self, zone_metadata):
|
||||||
|
"""Update zone metadata after successful communications with
|
||||||
|
child zone."""
|
||||||
|
self.last_seen = datetime.now()
|
||||||
|
self.attempt = 0
|
||||||
|
self.name = zone_metadata["name"]
|
||||||
|
self.capabilities = zone_metadata["capabilities"]
|
||||||
|
self.is_active = True
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
return dict(name=self.name, capabilities=self.capabilities,
|
||||||
|
is_active=self.is_active, api_url=self.api_url,
|
||||||
|
id=self.zone_id)
|
||||||
|
|
||||||
|
def log_error(self, exception):
|
||||||
|
"""Something went wrong. Check to see if zone should be
|
||||||
|
marked as offline."""
|
||||||
|
self.last_exception = exception
|
||||||
|
self.last_exception_time = datetime.now()
|
||||||
|
api_url = self.api_url
|
||||||
|
logging.warning(_("'%(exception)s' error talking to "
|
||||||
|
"zone %(api_url)s") % locals())
|
||||||
|
|
||||||
|
max_errors = FLAGS.zone_failures_to_offline
|
||||||
|
self.attempt += 1
|
||||||
|
if self.attempt >= max_errors:
|
||||||
|
self.is_active = False
|
||||||
|
logging.error(_("No answer from zone %(api_url)s "
|
||||||
|
"after %(max_errors)d "
|
||||||
|
"attempts. Marking inactive.") % locals())
|
||||||
|
|
||||||
|
|
||||||
|
def _call_novaclient(zone):
|
||||||
|
"""Call novaclient. Broken out for testing purposes."""
|
||||||
|
client = novaclient.OpenStack(zone.username, zone.password, zone.api_url)
|
||||||
|
return client.zones.info()._info
|
||||||
|
|
||||||
|
|
||||||
|
def _poll_zone(zone):
|
||||||
|
"""Eventlet worker to poll a zone."""
|
||||||
|
logging.debug(_("Polling zone: %s") % zone.api_url)
|
||||||
|
try:
|
||||||
|
zone.update_metadata(_call_novaclient(zone))
|
||||||
|
except Exception, e:
|
||||||
|
zone.log_error(traceback.format_exc())
|
||||||
|
|
||||||
|
|
||||||
|
class ZoneManager(object):
|
||||||
|
"""Keeps the zone states updated."""
|
||||||
|
def __init__(self):
|
||||||
|
self.last_zone_db_check = datetime.min
|
||||||
|
self.zone_states = {}
|
||||||
|
self.green_pool = greenpool.GreenPool()
|
||||||
|
|
||||||
|
def get_zone_list(self):
|
||||||
|
"""Return the list of zones we know about."""
|
||||||
|
return [zone.to_dict() for zone in self.zone_states.values()]
|
||||||
|
|
||||||
|
def _refresh_from_db(self, context):
|
||||||
|
"""Make our zone state map match the db."""
|
||||||
|
# Add/update existing zones ...
|
||||||
|
zones = db.zone_get_all(context)
|
||||||
|
existing = self.zone_states.keys()
|
||||||
|
db_keys = []
|
||||||
|
for zone in zones:
|
||||||
|
db_keys.append(zone.id)
|
||||||
|
if zone.id not in existing:
|
||||||
|
self.zone_states[zone.id] = ZoneState()
|
||||||
|
self.zone_states[zone.id].update_credentials(zone)
|
||||||
|
|
||||||
|
# Cleanup zones removed from db ...
|
||||||
|
keys = self.zone_states.keys() # since we're deleting
|
||||||
|
for zone_id in keys:
|
||||||
|
if zone_id not in db_keys:
|
||||||
|
del self.zone_states[zone_id]
|
||||||
|
|
||||||
|
def _poll_zones(self, context):
|
||||||
|
"""Try to connect to each child zone and get update."""
|
||||||
|
self.green_pool.imap(_poll_zone, self.zone_states.values())
|
||||||
|
|
||||||
|
def ping(self, context=None):
|
||||||
|
"""Ping should be called periodically to update zone status."""
|
||||||
|
diff = datetime.now() - self.last_zone_db_check
|
||||||
|
if diff.seconds >= FLAGS.zone_db_check_interval:
|
||||||
|
logging.debug(_("Updating zone cache from db."))
|
||||||
|
self.last_zone_db_check = datetime.now()
|
||||||
|
self._refresh_from_db(context)
|
||||||
|
self._poll_zones(context)
|
||||||
@@ -28,6 +28,7 @@ def webob_factory(url):
|
|||||||
def web_request(url, method=None, body=None):
|
def web_request(url, method=None, body=None):
|
||||||
req = webob.Request.blank("%s%s" % (base_url, url))
|
req = webob.Request.blank("%s%s" % (base_url, url))
|
||||||
if method:
|
if method:
|
||||||
|
req.content_type = "application/json"
|
||||||
req.method = method
|
req.method = method
|
||||||
if body:
|
if body:
|
||||||
req.body = json.dumps(body)
|
req.body = json.dumps(body)
|
||||||
|
|||||||
@@ -25,6 +25,7 @@ import webob.dec
|
|||||||
from paste import urlmap
|
from paste import urlmap
|
||||||
|
|
||||||
from glance import client as glance_client
|
from glance import client as glance_client
|
||||||
|
from glance.common import exception as glance_exc
|
||||||
|
|
||||||
from nova import context
|
from nova import context
|
||||||
from nova import exception as exc
|
from nova import exception as exc
|
||||||
@@ -149,25 +150,26 @@ def stub_out_glance(stubs, initial_fixtures=None):
|
|||||||
for f in self.fixtures:
|
for f in self.fixtures:
|
||||||
if f['id'] == image_id:
|
if f['id'] == image_id:
|
||||||
return f
|
return f
|
||||||
return None
|
raise glance_exc.NotFound
|
||||||
|
|
||||||
def fake_add_image(self, image_meta):
|
def fake_add_image(self, image_meta, data=None):
|
||||||
id = ''.join(random.choice(string.letters) for _ in range(20))
|
id = ''.join(random.choice(string.letters) for _ in range(20))
|
||||||
image_meta['id'] = id
|
image_meta['id'] = id
|
||||||
self.fixtures.append(image_meta)
|
self.fixtures.append(image_meta)
|
||||||
return id
|
return image_meta
|
||||||
|
|
||||||
def fake_update_image(self, image_id, image_meta):
|
def fake_update_image(self, image_id, image_meta, data=None):
|
||||||
f = self.fake_get_image_meta(image_id)
|
f = self.fake_get_image_meta(image_id)
|
||||||
if not f:
|
if not f:
|
||||||
raise exc.NotFound
|
raise glance_exc.NotFound
|
||||||
|
|
||||||
f.update(image_meta)
|
f.update(image_meta)
|
||||||
|
return f
|
||||||
|
|
||||||
def fake_delete_image(self, image_id):
|
def fake_delete_image(self, image_id):
|
||||||
f = self.fake_get_image_meta(image_id)
|
f = self.fake_get_image_meta(image_id)
|
||||||
if not f:
|
if not f:
|
||||||
raise exc.NotFound
|
raise glance_exc.NotFound
|
||||||
|
|
||||||
self.fixtures.remove(f)
|
self.fixtures.remove(f)
|
||||||
|
|
||||||
|
|||||||
@@ -90,6 +90,7 @@ class AccountsTest(test.TestCase):
|
|||||||
body = dict(account=dict(description='test account',
|
body = dict(account=dict(description='test account',
|
||||||
manager='guy1'))
|
manager='guy1'))
|
||||||
req = webob.Request.blank('/v1.0/accounts/newacct')
|
req = webob.Request.blank('/v1.0/accounts/newacct')
|
||||||
|
req.headers["Content-Type"] = "application/json"
|
||||||
req.method = 'PUT'
|
req.method = 'PUT'
|
||||||
req.body = json.dumps(body)
|
req.body = json.dumps(body)
|
||||||
|
|
||||||
@@ -109,6 +110,7 @@ class AccountsTest(test.TestCase):
|
|||||||
body = dict(account=dict(description='test account',
|
body = dict(account=dict(description='test account',
|
||||||
manager='guy2'))
|
manager='guy2'))
|
||||||
req = webob.Request.blank('/v1.0/accounts/test1')
|
req = webob.Request.blank('/v1.0/accounts/test1')
|
||||||
|
req.headers["Content-Type"] = "application/json"
|
||||||
req.method = 'PUT'
|
req.method = 'PUT'
|
||||||
req.body = json.dumps(body)
|
req.body = json.dumps(body)
|
||||||
|
|
||||||
|
|||||||
@@ -79,20 +79,14 @@ class LimiterTest(test.TestCase):
|
|||||||
Test offset key works with a blank offset.
|
Test offset key works with a blank offset.
|
||||||
"""
|
"""
|
||||||
req = Request.blank('/?offset=')
|
req = Request.blank('/?offset=')
|
||||||
self.assertEqual(limited(self.tiny, req), self.tiny)
|
self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req)
|
||||||
self.assertEqual(limited(self.small, req), self.small)
|
|
||||||
self.assertEqual(limited(self.medium, req), self.medium)
|
|
||||||
self.assertEqual(limited(self.large, req), self.large[:1000])
|
|
||||||
|
|
||||||
def test_limiter_offset_bad(self):
|
def test_limiter_offset_bad(self):
|
||||||
"""
|
"""
|
||||||
Test offset key works with a BAD offset.
|
Test offset key works with a BAD offset.
|
||||||
"""
|
"""
|
||||||
req = Request.blank(u'/?offset=\u0020aa')
|
req = Request.blank(u'/?offset=\u0020aa')
|
||||||
self.assertEqual(limited(self.tiny, req), self.tiny)
|
self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req)
|
||||||
self.assertEqual(limited(self.small, req), self.small)
|
|
||||||
self.assertEqual(limited(self.medium, req), self.medium)
|
|
||||||
self.assertEqual(limited(self.large, req), self.large[:1000])
|
|
||||||
|
|
||||||
def test_limiter_nothing(self):
|
def test_limiter_nothing(self):
|
||||||
"""
|
"""
|
||||||
@@ -166,18 +160,12 @@ class LimiterTest(test.TestCase):
|
|||||||
"""
|
"""
|
||||||
Test a negative limit.
|
Test a negative limit.
|
||||||
"""
|
"""
|
||||||
def _limit_large():
|
|
||||||
limited(self.large, req, max_limit=2000)
|
|
||||||
|
|
||||||
req = Request.blank('/?limit=-3000')
|
req = Request.blank('/?limit=-3000')
|
||||||
self.assertRaises(webob.exc.HTTPBadRequest, _limit_large)
|
self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req)
|
||||||
|
|
||||||
def test_limiter_negative_offset(self):
|
def test_limiter_negative_offset(self):
|
||||||
"""
|
"""
|
||||||
Test a negative offset.
|
Test a negative offset.
|
||||||
"""
|
"""
|
||||||
def _limit_large():
|
|
||||||
limited(self.large, req, max_limit=2000)
|
|
||||||
|
|
||||||
req = Request.blank('/?offset=-30')
|
req = Request.blank('/?offset=-30')
|
||||||
self.assertRaises(webob.exc.HTTPBadRequest, _limit_large)
|
self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req)
|
||||||
|
|||||||
@@ -22,6 +22,8 @@ and as a WSGI layer
|
|||||||
|
|
||||||
import json
|
import json
|
||||||
import datetime
|
import datetime
|
||||||
|
import shutil
|
||||||
|
import tempfile
|
||||||
|
|
||||||
import stubout
|
import stubout
|
||||||
import webob
|
import webob
|
||||||
@@ -54,7 +56,7 @@ class BaseImageServiceTests(object):
|
|||||||
|
|
||||||
num_images = len(self.service.index(self.context))
|
num_images = len(self.service.index(self.context))
|
||||||
|
|
||||||
id = self.service.create(self.context, fixture)
|
id = self.service.create(self.context, fixture)['id']
|
||||||
|
|
||||||
self.assertNotEquals(None, id)
|
self.assertNotEquals(None, id)
|
||||||
self.assertEquals(num_images + 1,
|
self.assertEquals(num_images + 1,
|
||||||
@@ -71,7 +73,7 @@ class BaseImageServiceTests(object):
|
|||||||
|
|
||||||
num_images = len(self.service.index(self.context))
|
num_images = len(self.service.index(self.context))
|
||||||
|
|
||||||
id = self.service.create(self.context, fixture)
|
id = self.service.create(self.context, fixture)['id']
|
||||||
|
|
||||||
self.assertNotEquals(None, id)
|
self.assertNotEquals(None, id)
|
||||||
|
|
||||||
@@ -89,7 +91,7 @@ class BaseImageServiceTests(object):
|
|||||||
'instance_id': None,
|
'instance_id': None,
|
||||||
'progress': None}
|
'progress': None}
|
||||||
|
|
||||||
id = self.service.create(self.context, fixture)
|
id = self.service.create(self.context, fixture)['id']
|
||||||
|
|
||||||
fixture['status'] = 'in progress'
|
fixture['status'] = 'in progress'
|
||||||
|
|
||||||
@@ -118,7 +120,7 @@ class BaseImageServiceTests(object):
|
|||||||
|
|
||||||
ids = []
|
ids = []
|
||||||
for fixture in fixtures:
|
for fixture in fixtures:
|
||||||
new_id = self.service.create(self.context, fixture)
|
new_id = self.service.create(self.context, fixture)['id']
|
||||||
ids.append(new_id)
|
ids.append(new_id)
|
||||||
|
|
||||||
num_images = len(self.service.index(self.context))
|
num_images = len(self.service.index(self.context))
|
||||||
@@ -137,14 +139,15 @@ class LocalImageServiceTest(test.TestCase,
|
|||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(LocalImageServiceTest, self).setUp()
|
super(LocalImageServiceTest, self).setUp()
|
||||||
|
self.tempdir = tempfile.mkdtemp()
|
||||||
|
self.flags(images_path=self.tempdir)
|
||||||
self.stubs = stubout.StubOutForTesting()
|
self.stubs = stubout.StubOutForTesting()
|
||||||
service_class = 'nova.image.local.LocalImageService'
|
service_class = 'nova.image.local.LocalImageService'
|
||||||
self.service = utils.import_object(service_class)
|
self.service = utils.import_object(service_class)
|
||||||
self.context = context.RequestContext(None, None)
|
self.context = context.RequestContext(None, None)
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
self.service.delete_all()
|
shutil.rmtree(self.tempdir)
|
||||||
self.service.delete_imagedir()
|
|
||||||
self.stubs.UnsetAll()
|
self.stubs.UnsetAll()
|
||||||
super(LocalImageServiceTest, self).tearDown()
|
super(LocalImageServiceTest, self).tearDown()
|
||||||
|
|
||||||
|
|||||||
@@ -188,9 +188,37 @@ class ServersTest(test.TestCase):
|
|||||||
self.assertEqual(s.get('imageId', None), None)
|
self.assertEqual(s.get('imageId', None), None)
|
||||||
i += 1
|
i += 1
|
||||||
|
|
||||||
|
def test_get_servers_with_limit(self):
|
||||||
|
req = webob.Request.blank('/v1.0/servers?limit=3')
|
||||||
|
res = req.get_response(fakes.wsgi_app())
|
||||||
|
servers = json.loads(res.body)['servers']
|
||||||
|
self.assertEqual([s['id'] for s in servers], [0, 1, 2])
|
||||||
|
|
||||||
|
req = webob.Request.blank('/v1.0/servers?limit=aaa')
|
||||||
|
res = req.get_response(fakes.wsgi_app())
|
||||||
|
self.assertEqual(res.status_int, 400)
|
||||||
|
self.assertTrue('limit' in res.body)
|
||||||
|
|
||||||
|
def test_get_servers_with_offset(self):
|
||||||
|
req = webob.Request.blank('/v1.0/servers?offset=2')
|
||||||
|
res = req.get_response(fakes.wsgi_app())
|
||||||
|
servers = json.loads(res.body)['servers']
|
||||||
|
self.assertEqual([s['id'] for s in servers], [2, 3, 4])
|
||||||
|
|
||||||
|
req = webob.Request.blank('/v1.0/servers?offset=aaa')
|
||||||
|
res = req.get_response(fakes.wsgi_app())
|
||||||
|
self.assertEqual(res.status_int, 400)
|
||||||
|
self.assertTrue('offset' in res.body)
|
||||||
|
|
||||||
|
def test_get_servers_with_limit_and_offset(self):
|
||||||
|
req = webob.Request.blank('/v1.0/servers?limit=2&offset=1')
|
||||||
|
res = req.get_response(fakes.wsgi_app())
|
||||||
|
servers = json.loads(res.body)['servers']
|
||||||
|
self.assertEqual([s['id'] for s in servers], [1, 2])
|
||||||
|
|
||||||
def test_create_instance(self):
|
def test_create_instance(self):
|
||||||
def instance_create(context, inst):
|
def instance_create(context, inst):
|
||||||
return {'id': '1', 'display_name': ''}
|
return {'id': '1', 'display_name': 'server_test'}
|
||||||
|
|
||||||
def server_update(context, id, params):
|
def server_update(context, id, params):
|
||||||
return instance_create(context, id)
|
return instance_create(context, id)
|
||||||
@@ -231,9 +259,16 @@ class ServersTest(test.TestCase):
|
|||||||
req = webob.Request.blank('/v1.0/servers')
|
req = webob.Request.blank('/v1.0/servers')
|
||||||
req.method = 'POST'
|
req.method = 'POST'
|
||||||
req.body = json.dumps(body)
|
req.body = json.dumps(body)
|
||||||
|
req.headers["Content-Type"] = "application/json"
|
||||||
|
|
||||||
res = req.get_response(fakes.wsgi_app())
|
res = req.get_response(fakes.wsgi_app())
|
||||||
|
|
||||||
|
server = json.loads(res.body)['server']
|
||||||
|
self.assertEqual('serv', server['adminPass'][:4])
|
||||||
|
self.assertEqual(16, len(server['adminPass']))
|
||||||
|
self.assertEqual('server_test', server['name'])
|
||||||
|
self.assertEqual('1', server['id'])
|
||||||
|
|
||||||
self.assertEqual(res.status_int, 200)
|
self.assertEqual(res.status_int, 200)
|
||||||
|
|
||||||
def test_update_no_body(self):
|
def test_update_no_body(self):
|
||||||
|
|||||||
@@ -104,6 +104,7 @@ class UsersTest(test.TestCase):
|
|||||||
secret='invasionIsInNormandy',
|
secret='invasionIsInNormandy',
|
||||||
admin=True))
|
admin=True))
|
||||||
req = webob.Request.blank('/v1.0/users')
|
req = webob.Request.blank('/v1.0/users')
|
||||||
|
req.headers["Content-Type"] = "application/json"
|
||||||
req.method = 'POST'
|
req.method = 'POST'
|
||||||
req.body = json.dumps(body)
|
req.body = json.dumps(body)
|
||||||
|
|
||||||
@@ -125,6 +126,7 @@ class UsersTest(test.TestCase):
|
|||||||
access='acc2',
|
access='acc2',
|
||||||
secret='invasionIsInNormandy'))
|
secret='invasionIsInNormandy'))
|
||||||
req = webob.Request.blank('/v1.0/users/guy2')
|
req = webob.Request.blank('/v1.0/users/guy2')
|
||||||
|
req.headers["Content-Type"] = "application/json"
|
||||||
req.method = 'PUT'
|
req.method = 'PUT'
|
||||||
req.body = json.dumps(body)
|
req.body = json.dumps(body)
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Copyright 2010 OpenStack LLC.
|
# Copyright 2011 OpenStack LLC.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@@ -24,6 +24,7 @@ from nova import flags
|
|||||||
from nova import test
|
from nova import test
|
||||||
from nova.api.openstack import zones
|
from nova.api.openstack import zones
|
||||||
from nova.tests.api.openstack import fakes
|
from nova.tests.api.openstack import fakes
|
||||||
|
from nova.scheduler import api
|
||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
@@ -31,7 +32,7 @@ FLAGS.verbose = True
|
|||||||
|
|
||||||
|
|
||||||
def zone_get(context, zone_id):
|
def zone_get(context, zone_id):
|
||||||
return dict(id=1, api_url='http://foo.com', username='bob',
|
return dict(id=1, api_url='http://example.com', username='bob',
|
||||||
password='xxx')
|
password='xxx')
|
||||||
|
|
||||||
|
|
||||||
@@ -42,7 +43,7 @@ def zone_create(context, values):
|
|||||||
|
|
||||||
|
|
||||||
def zone_update(context, zone_id, values):
|
def zone_update(context, zone_id, values):
|
||||||
zone = dict(id=zone_id, api_url='http://foo.com', username='bob',
|
zone = dict(id=zone_id, api_url='http://example.com', username='bob',
|
||||||
password='xxx')
|
password='xxx')
|
||||||
zone.update(values)
|
zone.update(values)
|
||||||
return zone
|
return zone
|
||||||
@@ -52,12 +53,26 @@ def zone_delete(context, zone_id):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def zone_get_all(context):
|
def zone_get_all_scheduler(*args):
|
||||||
return [
|
return [
|
||||||
dict(id=1, api_url='http://foo.com', username='bob',
|
dict(id=1, api_url='http://example.com', username='bob',
|
||||||
password='xxx'),
|
password='xxx'),
|
||||||
dict(id=2, api_url='http://blah.com', username='alice',
|
dict(id=2, api_url='http://example.org', username='alice',
|
||||||
password='qwerty')]
|
password='qwerty'),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def zone_get_all_scheduler_empty(*args):
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
def zone_get_all_db(context):
|
||||||
|
return [
|
||||||
|
dict(id=1, api_url='http://example.com', username='bob',
|
||||||
|
password='xxx'),
|
||||||
|
dict(id=2, api_url='http://example.org', username='alice',
|
||||||
|
password='qwerty'),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
class ZonesTest(test.TestCase):
|
class ZonesTest(test.TestCase):
|
||||||
@@ -74,7 +89,6 @@ class ZonesTest(test.TestCase):
|
|||||||
FLAGS.allow_admin_api = True
|
FLAGS.allow_admin_api = True
|
||||||
|
|
||||||
self.stubs.Set(nova.db, 'zone_get', zone_get)
|
self.stubs.Set(nova.db, 'zone_get', zone_get)
|
||||||
self.stubs.Set(nova.db, 'zone_get_all', zone_get_all)
|
|
||||||
self.stubs.Set(nova.db, 'zone_update', zone_update)
|
self.stubs.Set(nova.db, 'zone_update', zone_update)
|
||||||
self.stubs.Set(nova.db, 'zone_create', zone_create)
|
self.stubs.Set(nova.db, 'zone_create', zone_create)
|
||||||
self.stubs.Set(nova.db, 'zone_delete', zone_delete)
|
self.stubs.Set(nova.db, 'zone_delete', zone_delete)
|
||||||
@@ -84,7 +98,8 @@ class ZonesTest(test.TestCase):
|
|||||||
FLAGS.allow_admin_api = self.allow_admin
|
FLAGS.allow_admin_api = self.allow_admin
|
||||||
super(ZonesTest, self).tearDown()
|
super(ZonesTest, self).tearDown()
|
||||||
|
|
||||||
def test_get_zone_list(self):
|
def test_get_zone_list_scheduler(self):
|
||||||
|
self.stubs.Set(api.API, '_call_scheduler', zone_get_all_scheduler)
|
||||||
req = webob.Request.blank('/v1.0/zones')
|
req = webob.Request.blank('/v1.0/zones')
|
||||||
res = req.get_response(fakes.wsgi_app())
|
res = req.get_response(fakes.wsgi_app())
|
||||||
res_dict = json.loads(res.body)
|
res_dict = json.loads(res.body)
|
||||||
@@ -92,47 +107,63 @@ class ZonesTest(test.TestCase):
|
|||||||
self.assertEqual(res.status_int, 200)
|
self.assertEqual(res.status_int, 200)
|
||||||
self.assertEqual(len(res_dict['zones']), 2)
|
self.assertEqual(len(res_dict['zones']), 2)
|
||||||
|
|
||||||
|
def test_get_zone_list_db(self):
|
||||||
|
self.stubs.Set(api.API, '_call_scheduler',
|
||||||
|
zone_get_all_scheduler_empty)
|
||||||
|
self.stubs.Set(nova.db, 'zone_get_all', zone_get_all_db)
|
||||||
|
req = webob.Request.blank('/v1.0/zones')
|
||||||
|
req.headers["Content-Type"] = "application/json"
|
||||||
|
res = req.get_response(fakes.wsgi_app())
|
||||||
|
|
||||||
|
self.assertEqual(res.status_int, 200)
|
||||||
|
res_dict = json.loads(res.body)
|
||||||
|
self.assertEqual(len(res_dict['zones']), 2)
|
||||||
|
|
||||||
def test_get_zone_by_id(self):
|
def test_get_zone_by_id(self):
|
||||||
req = webob.Request.blank('/v1.0/zones/1')
|
req = webob.Request.blank('/v1.0/zones/1')
|
||||||
|
req.headers["Content-Type"] = "application/json"
|
||||||
res = req.get_response(fakes.wsgi_app())
|
res = req.get_response(fakes.wsgi_app())
|
||||||
res_dict = json.loads(res.body)
|
|
||||||
|
|
||||||
self.assertEqual(res_dict['zone']['id'], 1)
|
|
||||||
self.assertEqual(res_dict['zone']['api_url'], 'http://foo.com')
|
|
||||||
self.assertFalse('password' in res_dict['zone'])
|
|
||||||
self.assertEqual(res.status_int, 200)
|
self.assertEqual(res.status_int, 200)
|
||||||
|
res_dict = json.loads(res.body)
|
||||||
|
self.assertEqual(res_dict['zone']['id'], 1)
|
||||||
|
self.assertEqual(res_dict['zone']['api_url'], 'http://example.com')
|
||||||
|
self.assertFalse('password' in res_dict['zone'])
|
||||||
|
|
||||||
def test_zone_delete(self):
|
def test_zone_delete(self):
|
||||||
req = webob.Request.blank('/v1.0/zones/1')
|
req = webob.Request.blank('/v1.0/zones/1')
|
||||||
|
req.headers["Content-Type"] = "application/json"
|
||||||
res = req.get_response(fakes.wsgi_app())
|
res = req.get_response(fakes.wsgi_app())
|
||||||
|
|
||||||
self.assertEqual(res.status_int, 200)
|
self.assertEqual(res.status_int, 200)
|
||||||
|
|
||||||
def test_zone_create(self):
|
def test_zone_create(self):
|
||||||
body = dict(zone=dict(api_url='http://blah.zoo', username='fred',
|
body = dict(zone=dict(api_url='http://example.com', username='fred',
|
||||||
password='fubar'))
|
password='fubar'))
|
||||||
req = webob.Request.blank('/v1.0/zones')
|
req = webob.Request.blank('/v1.0/zones')
|
||||||
|
req.headers["Content-Type"] = "application/json"
|
||||||
req.method = 'POST'
|
req.method = 'POST'
|
||||||
req.body = json.dumps(body)
|
req.body = json.dumps(body)
|
||||||
|
|
||||||
res = req.get_response(fakes.wsgi_app())
|
res = req.get_response(fakes.wsgi_app())
|
||||||
res_dict = json.loads(res.body)
|
|
||||||
|
|
||||||
self.assertEqual(res.status_int, 200)
|
self.assertEqual(res.status_int, 200)
|
||||||
|
res_dict = json.loads(res.body)
|
||||||
self.assertEqual(res_dict['zone']['id'], 1)
|
self.assertEqual(res_dict['zone']['id'], 1)
|
||||||
self.assertEqual(res_dict['zone']['api_url'], 'http://blah.zoo')
|
self.assertEqual(res_dict['zone']['api_url'], 'http://example.com')
|
||||||
self.assertFalse('username' in res_dict['zone'])
|
self.assertFalse('username' in res_dict['zone'])
|
||||||
|
|
||||||
def test_zone_update(self):
|
def test_zone_update(self):
|
||||||
body = dict(zone=dict(username='zeb', password='sneaky'))
|
body = dict(zone=dict(username='zeb', password='sneaky'))
|
||||||
req = webob.Request.blank('/v1.0/zones/1')
|
req = webob.Request.blank('/v1.0/zones/1')
|
||||||
|
req.headers["Content-Type"] = "application/json"
|
||||||
req.method = 'PUT'
|
req.method = 'PUT'
|
||||||
req.body = json.dumps(body)
|
req.body = json.dumps(body)
|
||||||
|
|
||||||
res = req.get_response(fakes.wsgi_app())
|
res = req.get_response(fakes.wsgi_app())
|
||||||
res_dict = json.loads(res.body)
|
|
||||||
|
|
||||||
self.assertEqual(res.status_int, 200)
|
self.assertEqual(res.status_int, 200)
|
||||||
|
res_dict = json.loads(res.body)
|
||||||
self.assertEqual(res_dict['zone']['id'], 1)
|
self.assertEqual(res_dict['zone']['id'], 1)
|
||||||
self.assertEqual(res_dict['zone']['api_url'], 'http://foo.com')
|
self.assertEqual(res_dict['zone']['api_url'], 'http://example.com')
|
||||||
self.assertFalse('username' in res_dict['zone'])
|
self.assertFalse('username' in res_dict['zone'])
|
||||||
|
|||||||
@@ -21,11 +21,13 @@
|
|||||||
Test WSGI basics and provide some helper functions for other WSGI tests.
|
Test WSGI basics and provide some helper functions for other WSGI tests.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
from nova import test
|
from nova import test
|
||||||
|
|
||||||
import routes
|
import routes
|
||||||
import webob
|
import webob
|
||||||
|
|
||||||
|
from nova import exception
|
||||||
from nova import wsgi
|
from nova import wsgi
|
||||||
|
|
||||||
|
|
||||||
@@ -66,63 +68,164 @@ class Test(test.TestCase):
|
|||||||
result = webob.Request.blank('/bad').get_response(Router())
|
result = webob.Request.blank('/bad').get_response(Router())
|
||||||
self.assertNotEqual(result.body, "Router result")
|
self.assertNotEqual(result.body, "Router result")
|
||||||
|
|
||||||
def test_controller(self):
|
|
||||||
|
|
||||||
class Controller(wsgi.Controller):
|
class ControllerTest(test.TestCase):
|
||||||
"""Test controller to call from router."""
|
|
||||||
test = self
|
class TestRouter(wsgi.Router):
|
||||||
|
|
||||||
|
class TestController(wsgi.Controller):
|
||||||
|
|
||||||
|
_serialization_metadata = {
|
||||||
|
'application/xml': {
|
||||||
|
"attributes": {
|
||||||
|
"test": ["id"]}}}
|
||||||
|
|
||||||
def show(self, req, id): # pylint: disable-msg=W0622,C0103
|
def show(self, req, id): # pylint: disable-msg=W0622,C0103
|
||||||
"""Default action called for requests with an ID."""
|
return {"test": {"id": id}}
|
||||||
self.test.assertEqual(req.path_info, '/tests/123')
|
|
||||||
self.test.assertEqual(id, '123')
|
|
||||||
return id
|
|
||||||
|
|
||||||
class Router(wsgi.Router):
|
def __init__(self):
|
||||||
"""Test router."""
|
mapper = routes.Mapper()
|
||||||
|
mapper.resource("test", "tests", controller=self.TestController())
|
||||||
|
wsgi.Router.__init__(self, mapper)
|
||||||
|
|
||||||
def __init__(self):
|
def test_show(self):
|
||||||
mapper = routes.Mapper()
|
request = wsgi.Request.blank('/tests/123')
|
||||||
mapper.resource("test", "tests", controller=Controller())
|
result = request.get_response(self.TestRouter())
|
||||||
super(Router, self).__init__(mapper)
|
self.assertEqual(json.loads(result.body), {"test": {"id": "123"}})
|
||||||
|
|
||||||
result = webob.Request.blank('/tests/123').get_response(Router())
|
def test_response_content_type_from_accept_xml(self):
|
||||||
self.assertEqual(result.body, "123")
|
request = webob.Request.blank('/tests/123')
|
||||||
result = webob.Request.blank('/test/123').get_response(Router())
|
request.headers["Accept"] = "application/xml"
|
||||||
self.assertNotEqual(result.body, "123")
|
result = request.get_response(self.TestRouter())
|
||||||
|
self.assertEqual(result.headers["Content-Type"], "application/xml")
|
||||||
|
|
||||||
|
def test_response_content_type_from_accept_json(self):
|
||||||
|
request = wsgi.Request.blank('/tests/123')
|
||||||
|
request.headers["Accept"] = "application/json"
|
||||||
|
result = request.get_response(self.TestRouter())
|
||||||
|
self.assertEqual(result.headers["Content-Type"], "application/json")
|
||||||
|
|
||||||
|
def test_response_content_type_from_query_extension_xml(self):
|
||||||
|
request = wsgi.Request.blank('/tests/123.xml')
|
||||||
|
result = request.get_response(self.TestRouter())
|
||||||
|
self.assertEqual(result.headers["Content-Type"], "application/xml")
|
||||||
|
|
||||||
|
def test_response_content_type_from_query_extension_json(self):
|
||||||
|
request = wsgi.Request.blank('/tests/123.json')
|
||||||
|
result = request.get_response(self.TestRouter())
|
||||||
|
self.assertEqual(result.headers["Content-Type"], "application/json")
|
||||||
|
|
||||||
|
def test_response_content_type_default_when_unsupported(self):
|
||||||
|
request = wsgi.Request.blank('/tests/123.unsupported')
|
||||||
|
request.headers["Accept"] = "application/unsupported1"
|
||||||
|
result = request.get_response(self.TestRouter())
|
||||||
|
self.assertEqual(result.status_int, 200)
|
||||||
|
self.assertEqual(result.headers["Content-Type"], "application/json")
|
||||||
|
|
||||||
|
|
||||||
|
class RequestTest(test.TestCase):
|
||||||
|
|
||||||
|
def test_request_content_type_missing(self):
|
||||||
|
request = wsgi.Request.blank('/tests/123')
|
||||||
|
request.body = "<body />"
|
||||||
|
self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type)
|
||||||
|
|
||||||
|
def test_request_content_type_unsupported(self):
|
||||||
|
request = wsgi.Request.blank('/tests/123')
|
||||||
|
request.headers["Content-Type"] = "text/html"
|
||||||
|
request.body = "asdf<br />"
|
||||||
|
self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type)
|
||||||
|
|
||||||
|
def test_content_type_from_accept_xml(self):
|
||||||
|
request = wsgi.Request.blank('/tests/123')
|
||||||
|
request.headers["Accept"] = "application/xml"
|
||||||
|
result = request.best_match_content_type()
|
||||||
|
self.assertEqual(result, "application/xml")
|
||||||
|
|
||||||
|
request = wsgi.Request.blank('/tests/123')
|
||||||
|
request.headers["Accept"] = "application/json"
|
||||||
|
result = request.best_match_content_type()
|
||||||
|
self.assertEqual(result, "application/json")
|
||||||
|
|
||||||
|
request = wsgi.Request.blank('/tests/123')
|
||||||
|
request.headers["Accept"] = "application/xml, application/json"
|
||||||
|
result = request.best_match_content_type()
|
||||||
|
self.assertEqual(result, "application/json")
|
||||||
|
|
||||||
|
request = wsgi.Request.blank('/tests/123')
|
||||||
|
request.headers["Accept"] = \
|
||||||
|
"application/json; q=0.3, application/xml; q=0.9"
|
||||||
|
result = request.best_match_content_type()
|
||||||
|
self.assertEqual(result, "application/xml")
|
||||||
|
|
||||||
|
def test_content_type_from_query_extension(self):
|
||||||
|
request = wsgi.Request.blank('/tests/123.xml')
|
||||||
|
result = request.best_match_content_type()
|
||||||
|
self.assertEqual(result, "application/xml")
|
||||||
|
|
||||||
|
request = wsgi.Request.blank('/tests/123.json')
|
||||||
|
result = request.best_match_content_type()
|
||||||
|
self.assertEqual(result, "application/json")
|
||||||
|
|
||||||
|
request = wsgi.Request.blank('/tests/123.invalid')
|
||||||
|
result = request.best_match_content_type()
|
||||||
|
self.assertEqual(result, "application/json")
|
||||||
|
|
||||||
|
def test_content_type_accept_and_query_extension(self):
|
||||||
|
request = wsgi.Request.blank('/tests/123.xml')
|
||||||
|
request.headers["Accept"] = "application/json"
|
||||||
|
result = request.best_match_content_type()
|
||||||
|
self.assertEqual(result, "application/xml")
|
||||||
|
|
||||||
|
def test_content_type_accept_default(self):
|
||||||
|
request = wsgi.Request.blank('/tests/123.unsupported')
|
||||||
|
request.headers["Accept"] = "application/unsupported1"
|
||||||
|
result = request.best_match_content_type()
|
||||||
|
self.assertEqual(result, "application/json")
|
||||||
|
|
||||||
|
|
||||||
class SerializerTest(test.TestCase):
|
class SerializerTest(test.TestCase):
|
||||||
|
|
||||||
def match(self, url, accept, expect):
|
def test_xml(self):
|
||||||
input_dict = dict(servers=dict(a=(2, 3)))
|
input_dict = dict(servers=dict(a=(2, 3)))
|
||||||
expected_xml = '<servers><a>(2,3)</a></servers>'
|
expected_xml = '<servers><a>(2,3)</a></servers>'
|
||||||
expected_json = '{"servers":{"a":[2,3]}}'
|
serializer = wsgi.Serializer()
|
||||||
req = webob.Request.blank(url, headers=dict(Accept=accept))
|
result = serializer.serialize(input_dict, "application/xml")
|
||||||
result = wsgi.Serializer(req.environ).to_content_type(input_dict)
|
|
||||||
result = result.replace('\n', '').replace(' ', '')
|
result = result.replace('\n', '').replace(' ', '')
|
||||||
if expect == 'xml':
|
self.assertEqual(result, expected_xml)
|
||||||
self.assertEqual(result, expected_xml)
|
|
||||||
elif expect == 'json':
|
|
||||||
self.assertEqual(result, expected_json)
|
|
||||||
else:
|
|
||||||
raise "Bad expect value"
|
|
||||||
|
|
||||||
def test_basic(self):
|
def test_json(self):
|
||||||
self.match('/servers/4.json', None, expect='json')
|
input_dict = dict(servers=dict(a=(2, 3)))
|
||||||
self.match('/servers/4', 'application/json', expect='json')
|
expected_json = '{"servers":{"a":[2,3]}}'
|
||||||
self.match('/servers/4', 'application/xml', expect='xml')
|
serializer = wsgi.Serializer()
|
||||||
self.match('/servers/4.xml', None, expect='xml')
|
result = serializer.serialize(input_dict, "application/json")
|
||||||
|
result = result.replace('\n', '').replace(' ', '')
|
||||||
|
self.assertEqual(result, expected_json)
|
||||||
|
|
||||||
def test_defaults_to_json(self):
|
def test_unsupported_content_type(self):
|
||||||
self.match('/servers/4', None, expect='json')
|
serializer = wsgi.Serializer()
|
||||||
self.match('/servers/4', 'text/html', expect='json')
|
self.assertRaises(exception.InvalidContentType, serializer.serialize,
|
||||||
|
{}, "text/null")
|
||||||
|
|
||||||
def test_suffix_takes_precedence_over_accept_header(self):
|
def test_deserialize_json(self):
|
||||||
self.match('/servers/4.xml', 'application/json', expect='xml')
|
data = """{"a": {
|
||||||
self.match('/servers/4.xml.', 'application/json', expect='json')
|
"a1": "1",
|
||||||
|
"a2": "2",
|
||||||
|
"bs": ["1", "2", "3", {"c": {"c1": "1"}}],
|
||||||
|
"d": {"e": "1"},
|
||||||
|
"f": "1"}}"""
|
||||||
|
as_dict = dict(a={
|
||||||
|
'a1': '1',
|
||||||
|
'a2': '2',
|
||||||
|
'bs': ['1', '2', '3', {'c': dict(c1='1')}],
|
||||||
|
'd': {'e': '1'},
|
||||||
|
'f': '1'})
|
||||||
|
metadata = {}
|
||||||
|
serializer = wsgi.Serializer(metadata)
|
||||||
|
self.assertEqual(serializer.deserialize(data, "application/json"),
|
||||||
|
as_dict)
|
||||||
|
|
||||||
def test_deserialize(self):
|
def test_deserialize_xml(self):
|
||||||
xml = """
|
xml = """
|
||||||
<a a1="1" a2="2">
|
<a a1="1" a2="2">
|
||||||
<bs><b>1</b><b>2</b><b>3</b><b><c c1="1"/></b></bs>
|
<bs><b>1</b><b>2</b><b>3</b><b><c c1="1"/></b></bs>
|
||||||
@@ -137,11 +240,13 @@ class SerializerTest(test.TestCase):
|
|||||||
'd': {'e': '1'},
|
'd': {'e': '1'},
|
||||||
'f': '1'})
|
'f': '1'})
|
||||||
metadata = {'application/xml': dict(plurals={'bs': 'b', 'ts': 't'})}
|
metadata = {'application/xml': dict(plurals={'bs': 'b', 'ts': 't'})}
|
||||||
serializer = wsgi.Serializer({}, metadata)
|
serializer = wsgi.Serializer(metadata)
|
||||||
self.assertEqual(serializer.deserialize(xml), as_dict)
|
self.assertEqual(serializer.deserialize(xml, "application/xml"),
|
||||||
|
as_dict)
|
||||||
|
|
||||||
def test_deserialize_empty_xml(self):
|
def test_deserialize_empty_xml(self):
|
||||||
xml = """<a></a>"""
|
xml = """<a></a>"""
|
||||||
as_dict = {"a": {}}
|
as_dict = {"a": {}}
|
||||||
serializer = wsgi.Serializer({})
|
serializer = wsgi.Serializer()
|
||||||
self.assertEqual(serializer.deserialize(xml), as_dict)
|
self.assertEqual(serializer.deserialize(xml, "application/xml"),
|
||||||
|
as_dict)
|
||||||
|
|||||||
@@ -32,6 +32,7 @@ flags.DECLARE('fake_network', 'nova.network.manager')
|
|||||||
FLAGS.network_size = 8
|
FLAGS.network_size = 8
|
||||||
FLAGS.num_networks = 2
|
FLAGS.num_networks = 2
|
||||||
FLAGS.fake_network = True
|
FLAGS.fake_network = True
|
||||||
|
FLAGS.image_service = 'nova.image.local.LocalImageService'
|
||||||
flags.DECLARE('num_shelves', 'nova.volume.driver')
|
flags.DECLARE('num_shelves', 'nova.volume.driver')
|
||||||
flags.DECLARE('blades_per_shelf', 'nova.volume.driver')
|
flags.DECLARE('blades_per_shelf', 'nova.volume.driver')
|
||||||
flags.DECLARE('iscsi_num_targets', 'nova.volume.driver')
|
flags.DECLARE('iscsi_num_targets', 'nova.volume.driver')
|
||||||
|
|||||||
@@ -38,6 +38,8 @@ from nova import test
|
|||||||
from nova.auth import manager
|
from nova.auth import manager
|
||||||
from nova.compute import power_state
|
from nova.compute import power_state
|
||||||
from nova.api.ec2 import cloud
|
from nova.api.ec2 import cloud
|
||||||
|
from nova.api.ec2 import ec2utils
|
||||||
|
from nova.image import local
|
||||||
from nova.objectstore import image
|
from nova.objectstore import image
|
||||||
|
|
||||||
|
|
||||||
@@ -76,6 +78,12 @@ class CloudTestCase(test.TestCase):
|
|||||||
project=self.project)
|
project=self.project)
|
||||||
host = self.network.get_network_host(self.context.elevated())
|
host = self.network.get_network_host(self.context.elevated())
|
||||||
|
|
||||||
|
def fake_show(meh, context, id):
|
||||||
|
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
|
||||||
|
|
||||||
|
self.stubs.Set(local.LocalImageService, 'show', fake_show)
|
||||||
|
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
network_ref = db.project_get_network(self.context,
|
network_ref = db.project_get_network(self.context,
|
||||||
self.project.id)
|
self.project.id)
|
||||||
@@ -122,7 +130,7 @@ class CloudTestCase(test.TestCase):
|
|||||||
self.cloud.allocate_address(self.context)
|
self.cloud.allocate_address(self.context)
|
||||||
inst = db.instance_create(self.context, {'host': self.compute.host})
|
inst = db.instance_create(self.context, {'host': self.compute.host})
|
||||||
fixed = self.network.allocate_fixed_ip(self.context, inst['id'])
|
fixed = self.network.allocate_fixed_ip(self.context, inst['id'])
|
||||||
ec2_id = cloud.id_to_ec2_id(inst['id'])
|
ec2_id = ec2utils.id_to_ec2_id(inst['id'])
|
||||||
self.cloud.associate_address(self.context,
|
self.cloud.associate_address(self.context,
|
||||||
instance_id=ec2_id,
|
instance_id=ec2_id,
|
||||||
public_ip=address)
|
public_ip=address)
|
||||||
@@ -158,12 +166,12 @@ class CloudTestCase(test.TestCase):
|
|||||||
vol2 = db.volume_create(self.context, {})
|
vol2 = db.volume_create(self.context, {})
|
||||||
result = self.cloud.describe_volumes(self.context)
|
result = self.cloud.describe_volumes(self.context)
|
||||||
self.assertEqual(len(result['volumeSet']), 2)
|
self.assertEqual(len(result['volumeSet']), 2)
|
||||||
volume_id = cloud.id_to_ec2_id(vol2['id'], 'vol-%08x')
|
volume_id = ec2utils.id_to_ec2_id(vol2['id'], 'vol-%08x')
|
||||||
result = self.cloud.describe_volumes(self.context,
|
result = self.cloud.describe_volumes(self.context,
|
||||||
volume_id=[volume_id])
|
volume_id=[volume_id])
|
||||||
self.assertEqual(len(result['volumeSet']), 1)
|
self.assertEqual(len(result['volumeSet']), 1)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
cloud.ec2_id_to_id(result['volumeSet'][0]['volumeId']),
|
ec2utils.ec2_id_to_id(result['volumeSet'][0]['volumeId']),
|
||||||
vol2['id'])
|
vol2['id'])
|
||||||
db.volume_destroy(self.context, vol1['id'])
|
db.volume_destroy(self.context, vol1['id'])
|
||||||
db.volume_destroy(self.context, vol2['id'])
|
db.volume_destroy(self.context, vol2['id'])
|
||||||
@@ -188,8 +196,10 @@ class CloudTestCase(test.TestCase):
|
|||||||
def test_describe_instances(self):
|
def test_describe_instances(self):
|
||||||
"""Makes sure describe_instances works and filters results."""
|
"""Makes sure describe_instances works and filters results."""
|
||||||
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
|
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
|
||||||
|
'image_id': 1,
|
||||||
'host': 'host1'})
|
'host': 'host1'})
|
||||||
inst2 = db.instance_create(self.context, {'reservation_id': 'a',
|
inst2 = db.instance_create(self.context, {'reservation_id': 'a',
|
||||||
|
'image_id': 1,
|
||||||
'host': 'host2'})
|
'host': 'host2'})
|
||||||
comp1 = db.service_create(self.context, {'host': 'host1',
|
comp1 = db.service_create(self.context, {'host': 'host1',
|
||||||
'availability_zone': 'zone1',
|
'availability_zone': 'zone1',
|
||||||
@@ -200,7 +210,7 @@ class CloudTestCase(test.TestCase):
|
|||||||
result = self.cloud.describe_instances(self.context)
|
result = self.cloud.describe_instances(self.context)
|
||||||
result = result['reservationSet'][0]
|
result = result['reservationSet'][0]
|
||||||
self.assertEqual(len(result['instancesSet']), 2)
|
self.assertEqual(len(result['instancesSet']), 2)
|
||||||
instance_id = cloud.id_to_ec2_id(inst2['id'])
|
instance_id = ec2utils.id_to_ec2_id(inst2['id'])
|
||||||
result = self.cloud.describe_instances(self.context,
|
result = self.cloud.describe_instances(self.context,
|
||||||
instance_id=[instance_id])
|
instance_id=[instance_id])
|
||||||
result = result['reservationSet'][0]
|
result = result['reservationSet'][0]
|
||||||
@@ -215,10 +225,9 @@ class CloudTestCase(test.TestCase):
|
|||||||
db.service_destroy(self.context, comp2['id'])
|
db.service_destroy(self.context, comp2['id'])
|
||||||
|
|
||||||
def test_console_output(self):
|
def test_console_output(self):
|
||||||
image_id = FLAGS.default_image
|
|
||||||
instance_type = FLAGS.default_instance_type
|
instance_type = FLAGS.default_instance_type
|
||||||
max_count = 1
|
max_count = 1
|
||||||
kwargs = {'image_id': image_id,
|
kwargs = {'image_id': 'ami-1',
|
||||||
'instance_type': instance_type,
|
'instance_type': instance_type,
|
||||||
'max_count': max_count}
|
'max_count': max_count}
|
||||||
rv = self.cloud.run_instances(self.context, **kwargs)
|
rv = self.cloud.run_instances(self.context, **kwargs)
|
||||||
@@ -234,8 +243,7 @@ class CloudTestCase(test.TestCase):
|
|||||||
greenthread.sleep(0.3)
|
greenthread.sleep(0.3)
|
||||||
|
|
||||||
def test_ajax_console(self):
|
def test_ajax_console(self):
|
||||||
image_id = FLAGS.default_image
|
kwargs = {'image_id': 'ami-1'}
|
||||||
kwargs = {'image_id': image_id}
|
|
||||||
rv = self.cloud.run_instances(self.context, **kwargs)
|
rv = self.cloud.run_instances(self.context, **kwargs)
|
||||||
instance_id = rv['instancesSet'][0]['instanceId']
|
instance_id = rv['instancesSet'][0]['instanceId']
|
||||||
greenthread.sleep(0.3)
|
greenthread.sleep(0.3)
|
||||||
@@ -347,7 +355,7 @@ class CloudTestCase(test.TestCase):
|
|||||||
|
|
||||||
def test_update_of_instance_display_fields(self):
|
def test_update_of_instance_display_fields(self):
|
||||||
inst = db.instance_create(self.context, {})
|
inst = db.instance_create(self.context, {})
|
||||||
ec2_id = cloud.id_to_ec2_id(inst['id'])
|
ec2_id = ec2utils.id_to_ec2_id(inst['id'])
|
||||||
self.cloud.update_instance(self.context, ec2_id,
|
self.cloud.update_instance(self.context, ec2_id,
|
||||||
display_name='c00l 1m4g3')
|
display_name='c00l 1m4g3')
|
||||||
inst = db.instance_get(self.context, inst['id'])
|
inst = db.instance_get(self.context, inst['id'])
|
||||||
@@ -365,7 +373,7 @@ class CloudTestCase(test.TestCase):
|
|||||||
def test_update_of_volume_display_fields(self):
|
def test_update_of_volume_display_fields(self):
|
||||||
vol = db.volume_create(self.context, {})
|
vol = db.volume_create(self.context, {})
|
||||||
self.cloud.update_volume(self.context,
|
self.cloud.update_volume(self.context,
|
||||||
cloud.id_to_ec2_id(vol['id'], 'vol-%08x'),
|
ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x'),
|
||||||
display_name='c00l v0lum3')
|
display_name='c00l v0lum3')
|
||||||
vol = db.volume_get(self.context, vol['id'])
|
vol = db.volume_get(self.context, vol['id'])
|
||||||
self.assertEqual('c00l v0lum3', vol['display_name'])
|
self.assertEqual('c00l v0lum3', vol['display_name'])
|
||||||
@@ -374,7 +382,7 @@ class CloudTestCase(test.TestCase):
|
|||||||
def test_update_of_volume_wont_update_private_fields(self):
|
def test_update_of_volume_wont_update_private_fields(self):
|
||||||
vol = db.volume_create(self.context, {})
|
vol = db.volume_create(self.context, {})
|
||||||
self.cloud.update_volume(self.context,
|
self.cloud.update_volume(self.context,
|
||||||
cloud.id_to_ec2_id(vol['id'], 'vol-%08x'),
|
ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x'),
|
||||||
mountpoint='/not/here')
|
mountpoint='/not/here')
|
||||||
vol = db.volume_get(self.context, vol['id'])
|
vol = db.volume_get(self.context, vol['id'])
|
||||||
self.assertEqual(None, vol['mountpoint'])
|
self.assertEqual(None, vol['mountpoint'])
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ from nova import test
|
|||||||
from nova import utils
|
from nova import utils
|
||||||
from nova.auth import manager
|
from nova.auth import manager
|
||||||
from nova.compute import instance_types
|
from nova.compute import instance_types
|
||||||
|
from nova.image import local
|
||||||
|
|
||||||
LOG = logging.getLogger('nova.tests.compute')
|
LOG = logging.getLogger('nova.tests.compute')
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
@@ -52,6 +52,11 @@ class ComputeTestCase(test.TestCase):
|
|||||||
self.project = self.manager.create_project('fake', 'fake', 'fake')
|
self.project = self.manager.create_project('fake', 'fake', 'fake')
|
||||||
self.context = context.RequestContext('fake', 'fake', False)
|
self.context = context.RequestContext('fake', 'fake', False)
|
||||||
|
|
||||||
|
def fake_show(meh, context, id):
|
||||||
|
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
|
||||||
|
|
||||||
|
self.stubs.Set(local.LocalImageService, 'show', fake_show)
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
self.manager.delete_user(self.user)
|
self.manager.delete_user(self.user)
|
||||||
self.manager.delete_project(self.project)
|
self.manager.delete_project(self.project)
|
||||||
@@ -60,7 +65,7 @@ class ComputeTestCase(test.TestCase):
|
|||||||
def _create_instance(self, params={}):
|
def _create_instance(self, params={}):
|
||||||
"""Create a test instance"""
|
"""Create a test instance"""
|
||||||
inst = {}
|
inst = {}
|
||||||
inst['image_id'] = 'ami-test'
|
inst['image_id'] = 1
|
||||||
inst['reservation_id'] = 'r-fakeres'
|
inst['reservation_id'] = 'r-fakeres'
|
||||||
inst['launch_time'] = '10'
|
inst['launch_time'] = '10'
|
||||||
inst['user_id'] = self.user.id
|
inst['user_id'] = self.user.id
|
||||||
|
|||||||
@@ -57,7 +57,7 @@ class ConsoleTestCase(test.TestCase):
|
|||||||
inst = {}
|
inst = {}
|
||||||
#inst['host'] = self.host
|
#inst['host'] = self.host
|
||||||
#inst['name'] = 'instance-1234'
|
#inst['name'] = 'instance-1234'
|
||||||
inst['image_id'] = 'ami-test'
|
inst['image_id'] = 1
|
||||||
inst['reservation_id'] = 'r-fakeres'
|
inst['reservation_id'] = 'r-fakeres'
|
||||||
inst['launch_time'] = '10'
|
inst['launch_time'] = '10'
|
||||||
inst['user_id'] = self.user.id
|
inst['user_id'] = self.user.id
|
||||||
|
|||||||
@@ -59,6 +59,7 @@ class DirectTestCase(test.TestCase):
|
|||||||
req.headers['X-OpenStack-User'] = 'user1'
|
req.headers['X-OpenStack-User'] = 'user1'
|
||||||
req.headers['X-OpenStack-Project'] = 'proj1'
|
req.headers['X-OpenStack-Project'] = 'proj1'
|
||||||
resp = req.get_response(self.auth_router)
|
resp = req.get_response(self.auth_router)
|
||||||
|
self.assertEqual(resp.status_int, 200)
|
||||||
data = json.loads(resp.body)
|
data = json.loads(resp.body)
|
||||||
self.assertEqual(data['user'], 'user1')
|
self.assertEqual(data['user'], 'user1')
|
||||||
self.assertEqual(data['project'], 'proj1')
|
self.assertEqual(data['project'], 'proj1')
|
||||||
@@ -69,6 +70,7 @@ class DirectTestCase(test.TestCase):
|
|||||||
req.method = 'POST'
|
req.method = 'POST'
|
||||||
req.body = 'json=%s' % json.dumps({'data': 'foo'})
|
req.body = 'json=%s' % json.dumps({'data': 'foo'})
|
||||||
resp = req.get_response(self.router)
|
resp = req.get_response(self.router)
|
||||||
|
self.assertEqual(resp.status_int, 200)
|
||||||
resp_parsed = json.loads(resp.body)
|
resp_parsed = json.loads(resp.body)
|
||||||
self.assertEqual(resp_parsed['data'], 'foo')
|
self.assertEqual(resp_parsed['data'], 'foo')
|
||||||
|
|
||||||
@@ -78,6 +80,7 @@ class DirectTestCase(test.TestCase):
|
|||||||
req.method = 'POST'
|
req.method = 'POST'
|
||||||
req.body = 'data=foo'
|
req.body = 'data=foo'
|
||||||
resp = req.get_response(self.router)
|
resp = req.get_response(self.router)
|
||||||
|
self.assertEqual(resp.status_int, 200)
|
||||||
resp_parsed = json.loads(resp.body)
|
resp_parsed = json.loads(resp.body)
|
||||||
self.assertEqual(resp_parsed['data'], 'foo')
|
self.assertEqual(resp_parsed['data'], 'foo')
|
||||||
|
|
||||||
@@ -90,8 +93,7 @@ class DirectTestCase(test.TestCase):
|
|||||||
class DirectCloudTestCase(test_cloud.CloudTestCase):
|
class DirectCloudTestCase(test_cloud.CloudTestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(DirectCloudTestCase, self).setUp()
|
super(DirectCloudTestCase, self).setUp()
|
||||||
compute_handle = compute.API(image_service=self.cloud.image_service,
|
compute_handle = compute.API(network_api=self.cloud.network_api,
|
||||||
network_api=self.cloud.network_api,
|
|
||||||
volume_api=self.cloud.volume_api)
|
volume_api=self.cloud.volume_api)
|
||||||
direct.register_service('compute', compute_handle)
|
direct.register_service('compute', compute_handle)
|
||||||
self.router = direct.JsonParamsMiddleware(direct.Router())
|
self.router = direct.JsonParamsMiddleware(direct.Router())
|
||||||
|
|||||||
@@ -14,10 +14,12 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
import errno
|
||||||
import os
|
import os
|
||||||
|
import select
|
||||||
|
|
||||||
from nova import test
|
from nova import test
|
||||||
from nova.utils import parse_mailmap, str_dict_replace
|
from nova.utils import parse_mailmap, str_dict_replace, synchronized
|
||||||
|
|
||||||
|
|
||||||
class ProjectTestCase(test.TestCase):
|
class ProjectTestCase(test.TestCase):
|
||||||
@@ -55,3 +57,47 @@ class ProjectTestCase(test.TestCase):
|
|||||||
'%r not listed in Authors' % missing)
|
'%r not listed in Authors' % missing)
|
||||||
finally:
|
finally:
|
||||||
tree.unlock()
|
tree.unlock()
|
||||||
|
|
||||||
|
|
||||||
|
class LockTestCase(test.TestCase):
|
||||||
|
def test_synchronized_wrapped_function_metadata(self):
|
||||||
|
@synchronized('whatever')
|
||||||
|
def foo():
|
||||||
|
"""Bar"""
|
||||||
|
pass
|
||||||
|
self.assertEquals(foo.__doc__, 'Bar', "Wrapped function's docstring "
|
||||||
|
"got lost")
|
||||||
|
self.assertEquals(foo.__name__, 'foo', "Wrapped function's name "
|
||||||
|
"got mangled")
|
||||||
|
|
||||||
|
def test_synchronized(self):
|
||||||
|
rpipe1, wpipe1 = os.pipe()
|
||||||
|
rpipe2, wpipe2 = os.pipe()
|
||||||
|
|
||||||
|
@synchronized('testlock')
|
||||||
|
def f(rpipe, wpipe):
|
||||||
|
try:
|
||||||
|
os.write(wpipe, "foo")
|
||||||
|
except OSError, e:
|
||||||
|
self.assertEquals(e.errno, errno.EPIPE)
|
||||||
|
return
|
||||||
|
|
||||||
|
rfds, _, __ = select.select([rpipe], [], [], 1)
|
||||||
|
self.assertEquals(len(rfds), 0, "The other process, which was"
|
||||||
|
" supposed to be locked, "
|
||||||
|
"wrote on its end of the "
|
||||||
|
"pipe")
|
||||||
|
os.close(rpipe)
|
||||||
|
|
||||||
|
pid = os.fork()
|
||||||
|
if pid > 0:
|
||||||
|
os.close(wpipe1)
|
||||||
|
os.close(rpipe2)
|
||||||
|
|
||||||
|
f(rpipe1, wpipe2)
|
||||||
|
else:
|
||||||
|
os.close(rpipe1)
|
||||||
|
os.close(wpipe2)
|
||||||
|
|
||||||
|
f(rpipe2, wpipe1)
|
||||||
|
os._exit(0)
|
||||||
|
|||||||
@@ -29,11 +29,153 @@ from nova import log as logging
|
|||||||
from nova import test
|
from nova import test
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova.auth import manager
|
from nova.auth import manager
|
||||||
|
from nova.network import linux_net
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
LOG = logging.getLogger('nova.tests.network')
|
LOG = logging.getLogger('nova.tests.network')
|
||||||
|
|
||||||
|
|
||||||
|
class IptablesManagerTestCase(test.TestCase):
|
||||||
|
sample_filter = ['#Generated by iptables-save on Fri Feb 18 15:17:05 2011',
|
||||||
|
'*filter',
|
||||||
|
':INPUT ACCEPT [2223527:305688874]',
|
||||||
|
':FORWARD ACCEPT [0:0]',
|
||||||
|
':OUTPUT ACCEPT [2172501:140856656]',
|
||||||
|
':nova-compute-FORWARD - [0:0]',
|
||||||
|
':nova-compute-INPUT - [0:0]',
|
||||||
|
':nova-compute-local - [0:0]',
|
||||||
|
':nova-compute-OUTPUT - [0:0]',
|
||||||
|
':nova-filter-top - [0:0]',
|
||||||
|
'-A FORWARD -j nova-filter-top ',
|
||||||
|
'-A OUTPUT -j nova-filter-top ',
|
||||||
|
'-A nova-filter-top -j nova-compute-local ',
|
||||||
|
'-A INPUT -j nova-compute-INPUT ',
|
||||||
|
'-A OUTPUT -j nova-compute-OUTPUT ',
|
||||||
|
'-A FORWARD -j nova-compute-FORWARD ',
|
||||||
|
'-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT ',
|
||||||
|
'-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT ',
|
||||||
|
'-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT ',
|
||||||
|
'-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
|
||||||
|
'-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
|
||||||
|
'-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
|
||||||
|
'-A FORWARD -o virbr0 -j REJECT --reject-with '
|
||||||
|
'icmp-port-unreachable ',
|
||||||
|
'-A FORWARD -i virbr0 -j REJECT --reject-with '
|
||||||
|
'icmp-port-unreachable ',
|
||||||
|
'COMMIT',
|
||||||
|
'# Completed on Fri Feb 18 15:17:05 2011']
|
||||||
|
|
||||||
|
sample_nat = ['# Generated by iptables-save on Fri Feb 18 15:17:05 2011',
|
||||||
|
'*nat',
|
||||||
|
':PREROUTING ACCEPT [3936:762355]',
|
||||||
|
':INPUT ACCEPT [2447:225266]',
|
||||||
|
':OUTPUT ACCEPT [63491:4191863]',
|
||||||
|
':POSTROUTING ACCEPT [63112:4108641]',
|
||||||
|
':nova-compute-OUTPUT - [0:0]',
|
||||||
|
':nova-compute-floating-ip-snat - [0:0]',
|
||||||
|
':nova-compute-SNATTING - [0:0]',
|
||||||
|
':nova-compute-PREROUTING - [0:0]',
|
||||||
|
':nova-compute-POSTROUTING - [0:0]',
|
||||||
|
':nova-postrouting-bottom - [0:0]',
|
||||||
|
'-A PREROUTING -j nova-compute-PREROUTING ',
|
||||||
|
'-A OUTPUT -j nova-compute-OUTPUT ',
|
||||||
|
'-A POSTROUTING -j nova-compute-POSTROUTING ',
|
||||||
|
'-A POSTROUTING -j nova-postrouting-bottom ',
|
||||||
|
'-A nova-postrouting-bottom -j nova-compute-SNATTING ',
|
||||||
|
'-A nova-compute-SNATTING -j nova-compute-floating-ip-snat ',
|
||||||
|
'COMMIT',
|
||||||
|
'# Completed on Fri Feb 18 15:17:05 2011']
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(IptablesManagerTestCase, self).setUp()
|
||||||
|
self.manager = linux_net.IptablesManager()
|
||||||
|
|
||||||
|
def test_filter_rules_are_wrapped(self):
|
||||||
|
current_lines = self.sample_filter
|
||||||
|
|
||||||
|
table = self.manager.ipv4['filter']
|
||||||
|
table.add_rule('FORWARD', '-s 1.2.3.4/5 -j DROP')
|
||||||
|
new_lines = self.manager._modify_rules(current_lines, table)
|
||||||
|
self.assertTrue('-A run_tests.py-FORWARD '
|
||||||
|
'-s 1.2.3.4/5 -j DROP' in new_lines)
|
||||||
|
|
||||||
|
table.remove_rule('FORWARD', '-s 1.2.3.4/5 -j DROP')
|
||||||
|
new_lines = self.manager._modify_rules(current_lines, table)
|
||||||
|
self.assertTrue('-A run_tests.py-FORWARD '
|
||||||
|
'-s 1.2.3.4/5 -j DROP' not in new_lines)
|
||||||
|
|
||||||
|
def test_nat_rules(self):
|
||||||
|
current_lines = self.sample_nat
|
||||||
|
new_lines = self.manager._modify_rules(current_lines,
|
||||||
|
self.manager.ipv4['nat'])
|
||||||
|
|
||||||
|
for line in [':nova-compute-OUTPUT - [0:0]',
|
||||||
|
':nova-compute-floating-ip-snat - [0:0]',
|
||||||
|
':nova-compute-SNATTING - [0:0]',
|
||||||
|
':nova-compute-PREROUTING - [0:0]',
|
||||||
|
':nova-compute-POSTROUTING - [0:0]']:
|
||||||
|
self.assertTrue(line in new_lines, "One of nova-compute's chains "
|
||||||
|
"went missing.")
|
||||||
|
|
||||||
|
seen_lines = set()
|
||||||
|
for line in new_lines:
|
||||||
|
line = line.strip()
|
||||||
|
self.assertTrue(line not in seen_lines,
|
||||||
|
"Duplicate line: %s" % line)
|
||||||
|
seen_lines.add(line)
|
||||||
|
|
||||||
|
last_postrouting_line = ''
|
||||||
|
|
||||||
|
for line in new_lines:
|
||||||
|
if line.startswith('-A POSTROUTING'):
|
||||||
|
last_postrouting_line = line
|
||||||
|
|
||||||
|
self.assertTrue('-j nova-postrouting-bottom' in last_postrouting_line,
|
||||||
|
"Last POSTROUTING rule does not jump to "
|
||||||
|
"nova-postouting-bottom: %s" % last_postrouting_line)
|
||||||
|
|
||||||
|
for chain in ['POSTROUTING', 'PREROUTING', 'OUTPUT']:
|
||||||
|
self.assertTrue('-A %s -j run_tests.py-%s' \
|
||||||
|
% (chain, chain) in new_lines,
|
||||||
|
"Built-in chain %s not wrapped" % (chain,))
|
||||||
|
|
||||||
|
def test_filter_rules(self):
|
||||||
|
current_lines = self.sample_filter
|
||||||
|
new_lines = self.manager._modify_rules(current_lines,
|
||||||
|
self.manager.ipv4['filter'])
|
||||||
|
|
||||||
|
for line in [':nova-compute-FORWARD - [0:0]',
|
||||||
|
':nova-compute-INPUT - [0:0]',
|
||||||
|
':nova-compute-local - [0:0]',
|
||||||
|
':nova-compute-OUTPUT - [0:0]']:
|
||||||
|
self.assertTrue(line in new_lines, "One of nova-compute's chains"
|
||||||
|
" went missing.")
|
||||||
|
|
||||||
|
seen_lines = set()
|
||||||
|
for line in new_lines:
|
||||||
|
line = line.strip()
|
||||||
|
self.assertTrue(line not in seen_lines,
|
||||||
|
"Duplicate line: %s" % line)
|
||||||
|
seen_lines.add(line)
|
||||||
|
|
||||||
|
for chain in ['FORWARD', 'OUTPUT']:
|
||||||
|
for line in new_lines:
|
||||||
|
if line.startswith('-A %s' % chain):
|
||||||
|
self.assertTrue('-j nova-filter-top' in line,
|
||||||
|
"First %s rule does not "
|
||||||
|
"jump to nova-filter-top" % chain)
|
||||||
|
break
|
||||||
|
|
||||||
|
self.assertTrue('-A nova-filter-top '
|
||||||
|
'-j run_tests.py-local' in new_lines,
|
||||||
|
"nova-filter-top does not jump to wrapped local chain")
|
||||||
|
|
||||||
|
for chain in ['INPUT', 'OUTPUT', 'FORWARD']:
|
||||||
|
self.assertTrue('-A %s -j run_tests.py-%s' \
|
||||||
|
% (chain, chain) in new_lines,
|
||||||
|
"Built-in chain %s not wrapped" % (chain,))
|
||||||
|
|
||||||
|
|
||||||
class NetworkTestCase(test.TestCase):
|
class NetworkTestCase(test.TestCase):
|
||||||
"""Test cases for network code"""
|
"""Test cases for network code"""
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
@@ -343,13 +485,13 @@ def lease_ip(private_ip):
|
|||||||
private_ip)
|
private_ip)
|
||||||
instance_ref = db.fixed_ip_get_instance(context.get_admin_context(),
|
instance_ref = db.fixed_ip_get_instance(context.get_admin_context(),
|
||||||
private_ip)
|
private_ip)
|
||||||
cmd = "%s add %s %s fake" % (binpath('nova-dhcpbridge'),
|
cmd = (binpath('nova-dhcpbridge'), 'add',
|
||||||
instance_ref['mac_address'],
|
instance_ref['mac_address'],
|
||||||
private_ip)
|
private_ip, 'fake')
|
||||||
env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
|
env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
|
||||||
'TESTING': '1',
|
'TESTING': '1',
|
||||||
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
|
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
|
||||||
(out, err) = utils.execute(cmd, addl_env=env)
|
(out, err) = utils.execute(*cmd, addl_env=env)
|
||||||
LOG.debug("ISSUE_IP: %s, %s ", out, err)
|
LOG.debug("ISSUE_IP: %s, %s ", out, err)
|
||||||
|
|
||||||
|
|
||||||
@@ -359,11 +501,11 @@ def release_ip(private_ip):
|
|||||||
private_ip)
|
private_ip)
|
||||||
instance_ref = db.fixed_ip_get_instance(context.get_admin_context(),
|
instance_ref = db.fixed_ip_get_instance(context.get_admin_context(),
|
||||||
private_ip)
|
private_ip)
|
||||||
cmd = "%s del %s %s fake" % (binpath('nova-dhcpbridge'),
|
cmd = (binpath('nova-dhcpbridge'), 'del',
|
||||||
instance_ref['mac_address'],
|
instance_ref['mac_address'],
|
||||||
private_ip)
|
private_ip, 'fake')
|
||||||
env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
|
env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
|
||||||
'TESTING': '1',
|
'TESTING': '1',
|
||||||
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
|
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
|
||||||
(out, err) = utils.execute(cmd, addl_env=env)
|
(out, err) = utils.execute(*cmd, addl_env=env)
|
||||||
LOG.debug("RELEASE_IP: %s, %s ", out, err)
|
LOG.debug("RELEASE_IP: %s, %s ", out, err)
|
||||||
|
|||||||
@@ -20,11 +20,12 @@ from nova import compute
|
|||||||
from nova import context
|
from nova import context
|
||||||
from nova import db
|
from nova import db
|
||||||
from nova import flags
|
from nova import flags
|
||||||
|
from nova import network
|
||||||
from nova import quota
|
from nova import quota
|
||||||
from nova import test
|
from nova import test
|
||||||
from nova import utils
|
from nova import utils
|
||||||
|
from nova import volume
|
||||||
from nova.auth import manager
|
from nova.auth import manager
|
||||||
from nova.api.ec2 import cloud
|
|
||||||
from nova.compute import instance_types
|
from nova.compute import instance_types
|
||||||
|
|
||||||
|
|
||||||
@@ -41,7 +42,6 @@ class QuotaTestCase(test.TestCase):
|
|||||||
quota_gigabytes=20,
|
quota_gigabytes=20,
|
||||||
quota_floating_ips=1)
|
quota_floating_ips=1)
|
||||||
|
|
||||||
self.cloud = cloud.CloudController()
|
|
||||||
self.manager = manager.AuthManager()
|
self.manager = manager.AuthManager()
|
||||||
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
|
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
|
||||||
self.project = self.manager.create_project('admin', 'admin', 'admin')
|
self.project = self.manager.create_project('admin', 'admin', 'admin')
|
||||||
@@ -57,7 +57,7 @@ class QuotaTestCase(test.TestCase):
|
|||||||
def _create_instance(self, cores=2):
|
def _create_instance(self, cores=2):
|
||||||
"""Create a test instance"""
|
"""Create a test instance"""
|
||||||
inst = {}
|
inst = {}
|
||||||
inst['image_id'] = 'ami-test'
|
inst['image_id'] = 1
|
||||||
inst['reservation_id'] = 'r-fakeres'
|
inst['reservation_id'] = 'r-fakeres'
|
||||||
inst['user_id'] = self.user.id
|
inst['user_id'] = self.user.id
|
||||||
inst['project_id'] = self.project.id
|
inst['project_id'] = self.project.id
|
||||||
@@ -118,12 +118,12 @@ class QuotaTestCase(test.TestCase):
|
|||||||
for i in range(FLAGS.quota_instances):
|
for i in range(FLAGS.quota_instances):
|
||||||
instance_id = self._create_instance()
|
instance_id = self._create_instance()
|
||||||
instance_ids.append(instance_id)
|
instance_ids.append(instance_id)
|
||||||
self.assertRaises(quota.QuotaError, self.cloud.run_instances,
|
self.assertRaises(quota.QuotaError, compute.API().create,
|
||||||
self.context,
|
self.context,
|
||||||
min_count=1,
|
min_count=1,
|
||||||
max_count=1,
|
max_count=1,
|
||||||
instance_type='m1.small',
|
instance_type='m1.small',
|
||||||
image_id='fake')
|
image_id=1)
|
||||||
for instance_id in instance_ids:
|
for instance_id in instance_ids:
|
||||||
db.instance_destroy(self.context, instance_id)
|
db.instance_destroy(self.context, instance_id)
|
||||||
|
|
||||||
@@ -131,12 +131,12 @@ class QuotaTestCase(test.TestCase):
|
|||||||
instance_ids = []
|
instance_ids = []
|
||||||
instance_id = self._create_instance(cores=4)
|
instance_id = self._create_instance(cores=4)
|
||||||
instance_ids.append(instance_id)
|
instance_ids.append(instance_id)
|
||||||
self.assertRaises(quota.QuotaError, self.cloud.run_instances,
|
self.assertRaises(quota.QuotaError, compute.API().create,
|
||||||
self.context,
|
self.context,
|
||||||
min_count=1,
|
min_count=1,
|
||||||
max_count=1,
|
max_count=1,
|
||||||
instance_type='m1.small',
|
instance_type='m1.small',
|
||||||
image_id='fake')
|
image_id=1)
|
||||||
for instance_id in instance_ids:
|
for instance_id in instance_ids:
|
||||||
db.instance_destroy(self.context, instance_id)
|
db.instance_destroy(self.context, instance_id)
|
||||||
|
|
||||||
@@ -145,9 +145,12 @@ class QuotaTestCase(test.TestCase):
|
|||||||
for i in range(FLAGS.quota_volumes):
|
for i in range(FLAGS.quota_volumes):
|
||||||
volume_id = self._create_volume()
|
volume_id = self._create_volume()
|
||||||
volume_ids.append(volume_id)
|
volume_ids.append(volume_id)
|
||||||
self.assertRaises(quota.QuotaError, self.cloud.create_volume,
|
self.assertRaises(quota.QuotaError,
|
||||||
self.context,
|
volume.API().create,
|
||||||
size=10)
|
self.context,
|
||||||
|
size=10,
|
||||||
|
name='',
|
||||||
|
description='')
|
||||||
for volume_id in volume_ids:
|
for volume_id in volume_ids:
|
||||||
db.volume_destroy(self.context, volume_id)
|
db.volume_destroy(self.context, volume_id)
|
||||||
|
|
||||||
@@ -156,9 +159,11 @@ class QuotaTestCase(test.TestCase):
|
|||||||
volume_id = self._create_volume(size=20)
|
volume_id = self._create_volume(size=20)
|
||||||
volume_ids.append(volume_id)
|
volume_ids.append(volume_id)
|
||||||
self.assertRaises(quota.QuotaError,
|
self.assertRaises(quota.QuotaError,
|
||||||
self.cloud.create_volume,
|
volume.API().create,
|
||||||
self.context,
|
self.context,
|
||||||
size=10)
|
size=10,
|
||||||
|
name='',
|
||||||
|
description='')
|
||||||
for volume_id in volume_ids:
|
for volume_id in volume_ids:
|
||||||
db.volume_destroy(self.context, volume_id)
|
db.volume_destroy(self.context, volume_id)
|
||||||
|
|
||||||
@@ -172,7 +177,8 @@ class QuotaTestCase(test.TestCase):
|
|||||||
# make an rpc.call, the test just finishes with OK. It
|
# make an rpc.call, the test just finishes with OK. It
|
||||||
# appears to be something in the magic inline callbacks
|
# appears to be something in the magic inline callbacks
|
||||||
# that is breaking.
|
# that is breaking.
|
||||||
self.assertRaises(quota.QuotaError, self.cloud.allocate_address,
|
self.assertRaises(quota.QuotaError,
|
||||||
|
network.API().allocate_floating_ip,
|
||||||
self.context)
|
self.context)
|
||||||
db.floating_ip_destroy(context.get_admin_context(), address)
|
db.floating_ip_destroy(context.get_admin_context(), address)
|
||||||
|
|
||||||
|
|||||||
@@ -155,7 +155,7 @@ class SimpleDriverTestCase(test.TestCase):
|
|||||||
def _create_instance(self, **kwargs):
|
def _create_instance(self, **kwargs):
|
||||||
"""Create a test instance"""
|
"""Create a test instance"""
|
||||||
inst = {}
|
inst = {}
|
||||||
inst['image_id'] = 'ami-test'
|
inst['image_id'] = 1
|
||||||
inst['reservation_id'] = 'r-fakeres'
|
inst['reservation_id'] = 'r-fakeres'
|
||||||
inst['user_id'] = self.user.id
|
inst['user_id'] = self.user.id
|
||||||
inst['project_id'] = self.project.id
|
inst['project_id'] = self.project.id
|
||||||
@@ -169,8 +169,6 @@ class SimpleDriverTestCase(test.TestCase):
|
|||||||
def _create_volume(self):
|
def _create_volume(self):
|
||||||
"""Create a test volume"""
|
"""Create a test volume"""
|
||||||
vol = {}
|
vol = {}
|
||||||
vol['image_id'] = 'ami-test'
|
|
||||||
vol['reservation_id'] = 'r-fakeres'
|
|
||||||
vol['size'] = 1
|
vol['size'] = 1
|
||||||
vol['availability_zone'] = 'test'
|
vol['availability_zone'] = 'test'
|
||||||
return db.volume_create(self.context, vol)['id']
|
return db.volume_create(self.context, vol)['id']
|
||||||
|
|||||||
@@ -14,6 +14,10 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
import re
|
||||||
|
import os
|
||||||
|
|
||||||
|
import eventlet
|
||||||
from xml.etree.ElementTree import fromstring as xml_to_tree
|
from xml.etree.ElementTree import fromstring as xml_to_tree
|
||||||
from xml.dom.minidom import parseString as xml_to_dom
|
from xml.dom.minidom import parseString as xml_to_dom
|
||||||
|
|
||||||
@@ -30,6 +34,70 @@ FLAGS = flags.FLAGS
|
|||||||
flags.DECLARE('instances_path', 'nova.compute.manager')
|
flags.DECLARE('instances_path', 'nova.compute.manager')
|
||||||
|
|
||||||
|
|
||||||
|
def _concurrency(wait, done, target):
|
||||||
|
wait.wait()
|
||||||
|
done.send()
|
||||||
|
|
||||||
|
|
||||||
|
class CacheConcurrencyTestCase(test.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super(CacheConcurrencyTestCase, self).setUp()
|
||||||
|
|
||||||
|
def fake_exists(fname):
|
||||||
|
basedir = os.path.join(FLAGS.instances_path, '_base')
|
||||||
|
if fname == basedir:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def fake_execute(*args, **kwargs):
|
||||||
|
pass
|
||||||
|
|
||||||
|
self.stubs.Set(os.path, 'exists', fake_exists)
|
||||||
|
self.stubs.Set(utils, 'execute', fake_execute)
|
||||||
|
|
||||||
|
def test_same_fname_concurrency(self):
|
||||||
|
"""Ensures that the same fname cache runs at a sequentially"""
|
||||||
|
conn = libvirt_conn.LibvirtConnection
|
||||||
|
wait1 = eventlet.event.Event()
|
||||||
|
done1 = eventlet.event.Event()
|
||||||
|
eventlet.spawn(conn._cache_image, _concurrency,
|
||||||
|
'target', 'fname', False, wait1, done1)
|
||||||
|
wait2 = eventlet.event.Event()
|
||||||
|
done2 = eventlet.event.Event()
|
||||||
|
eventlet.spawn(conn._cache_image, _concurrency,
|
||||||
|
'target', 'fname', False, wait2, done2)
|
||||||
|
wait2.send()
|
||||||
|
eventlet.sleep(0)
|
||||||
|
try:
|
||||||
|
self.assertFalse(done2.ready())
|
||||||
|
self.assertTrue('fname' in conn._image_sems)
|
||||||
|
finally:
|
||||||
|
wait1.send()
|
||||||
|
done1.wait()
|
||||||
|
eventlet.sleep(0)
|
||||||
|
self.assertTrue(done2.ready())
|
||||||
|
self.assertFalse('fname' in conn._image_sems)
|
||||||
|
|
||||||
|
def test_different_fname_concurrency(self):
|
||||||
|
"""Ensures that two different fname caches are concurrent"""
|
||||||
|
conn = libvirt_conn.LibvirtConnection
|
||||||
|
wait1 = eventlet.event.Event()
|
||||||
|
done1 = eventlet.event.Event()
|
||||||
|
eventlet.spawn(conn._cache_image, _concurrency,
|
||||||
|
'target', 'fname2', False, wait1, done1)
|
||||||
|
wait2 = eventlet.event.Event()
|
||||||
|
done2 = eventlet.event.Event()
|
||||||
|
eventlet.spawn(conn._cache_image, _concurrency,
|
||||||
|
'target', 'fname1', False, wait2, done2)
|
||||||
|
wait2.send()
|
||||||
|
eventlet.sleep(0)
|
||||||
|
try:
|
||||||
|
self.assertTrue(done2.ready())
|
||||||
|
finally:
|
||||||
|
wait1.send()
|
||||||
|
eventlet.sleep(0)
|
||||||
|
|
||||||
|
|
||||||
class LibvirtConnTestCase(test.TestCase):
|
class LibvirtConnTestCase(test.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(LibvirtConnTestCase, self).setUp()
|
super(LibvirtConnTestCase, self).setUp()
|
||||||
@@ -234,16 +302,22 @@ class IptablesFirewallTestCase(test.TestCase):
|
|||||||
self.manager.delete_user(self.user)
|
self.manager.delete_user(self.user)
|
||||||
super(IptablesFirewallTestCase, self).tearDown()
|
super(IptablesFirewallTestCase, self).tearDown()
|
||||||
|
|
||||||
in_rules = [
|
in_nat_rules = [
|
||||||
|
'# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
|
||||||
|
'*nat',
|
||||||
|
':PREROUTING ACCEPT [1170:189210]',
|
||||||
|
':INPUT ACCEPT [844:71028]',
|
||||||
|
':OUTPUT ACCEPT [5149:405186]',
|
||||||
|
':POSTROUTING ACCEPT [5063:386098]',
|
||||||
|
]
|
||||||
|
|
||||||
|
in_filter_rules = [
|
||||||
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
|
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
|
||||||
'*filter',
|
'*filter',
|
||||||
':INPUT ACCEPT [969615:281627771]',
|
':INPUT ACCEPT [969615:281627771]',
|
||||||
':FORWARD ACCEPT [0:0]',
|
':FORWARD ACCEPT [0:0]',
|
||||||
':OUTPUT ACCEPT [915599:63811649]',
|
':OUTPUT ACCEPT [915599:63811649]',
|
||||||
':nova-block-ipv4 - [0:0]',
|
':nova-block-ipv4 - [0:0]',
|
||||||
'-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT ',
|
|
||||||
'-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT ',
|
|
||||||
'-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT ',
|
|
||||||
'-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
|
'-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
|
||||||
'-A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
|
'-A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
|
||||||
',ESTABLISHED -j ACCEPT ',
|
',ESTABLISHED -j ACCEPT ',
|
||||||
@@ -255,7 +329,7 @@ class IptablesFirewallTestCase(test.TestCase):
|
|||||||
'# Completed on Mon Dec 6 11:54:13 2010',
|
'# Completed on Mon Dec 6 11:54:13 2010',
|
||||||
]
|
]
|
||||||
|
|
||||||
in6_rules = [
|
in6_filter_rules = [
|
||||||
'# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
|
'# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
|
||||||
'*filter',
|
'*filter',
|
||||||
':INPUT ACCEPT [349155:75810423]',
|
':INPUT ACCEPT [349155:75810423]',
|
||||||
@@ -315,23 +389,34 @@ class IptablesFirewallTestCase(test.TestCase):
|
|||||||
instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
|
instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
|
||||||
|
|
||||||
# self.fw.add_instance(instance_ref)
|
# self.fw.add_instance(instance_ref)
|
||||||
def fake_iptables_execute(cmd, process_input=None):
|
def fake_iptables_execute(*cmd, **kwargs):
|
||||||
if cmd == 'sudo ip6tables-save -t filter':
|
process_input = kwargs.get('process_input', None)
|
||||||
return '\n'.join(self.in6_rules), None
|
if cmd == ('sudo', 'ip6tables-save', '-t', 'filter'):
|
||||||
if cmd == 'sudo iptables-save -t filter':
|
return '\n'.join(self.in6_filter_rules), None
|
||||||
return '\n'.join(self.in_rules), None
|
if cmd == ('sudo', 'iptables-save', '-t', 'filter'):
|
||||||
if cmd == 'sudo iptables-restore':
|
return '\n'.join(self.in_filter_rules), None
|
||||||
self.out_rules = process_input.split('\n')
|
if cmd == ('sudo', 'iptables-save', '-t', 'nat'):
|
||||||
|
return '\n'.join(self.in_nat_rules), None
|
||||||
|
if cmd == ('sudo', 'iptables-restore'):
|
||||||
|
lines = process_input.split('\n')
|
||||||
|
if '*filter' in lines:
|
||||||
|
self.out_rules = lines
|
||||||
return '', ''
|
return '', ''
|
||||||
if cmd == 'sudo ip6tables-restore':
|
if cmd == ('sudo', 'ip6tables-restore'):
|
||||||
self.out6_rules = process_input.split('\n')
|
lines = process_input.split('\n')
|
||||||
|
if '*filter' in lines:
|
||||||
|
self.out6_rules = lines
|
||||||
return '', ''
|
return '', ''
|
||||||
self.fw.execute = fake_iptables_execute
|
print cmd, kwargs
|
||||||
|
|
||||||
|
from nova.network import linux_net
|
||||||
|
linux_net.iptables_manager.execute = fake_iptables_execute
|
||||||
|
|
||||||
self.fw.prepare_instance_filter(instance_ref)
|
self.fw.prepare_instance_filter(instance_ref)
|
||||||
self.fw.apply_instance_filter(instance_ref)
|
self.fw.apply_instance_filter(instance_ref)
|
||||||
|
|
||||||
in_rules = filter(lambda l: not l.startswith('#'), self.in_rules)
|
in_rules = filter(lambda l: not l.startswith('#'),
|
||||||
|
self.in_filter_rules)
|
||||||
for rule in in_rules:
|
for rule in in_rules:
|
||||||
if not 'nova' in rule:
|
if not 'nova' in rule:
|
||||||
self.assertTrue(rule in self.out_rules,
|
self.assertTrue(rule in self.out_rules,
|
||||||
@@ -354,17 +439,18 @@ class IptablesFirewallTestCase(test.TestCase):
|
|||||||
self.assertTrue(security_group_chain,
|
self.assertTrue(security_group_chain,
|
||||||
"The security group chain wasn't added")
|
"The security group chain wasn't added")
|
||||||
|
|
||||||
self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -j ACCEPT' % \
|
regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -j ACCEPT')
|
||||||
security_group_chain in self.out_rules,
|
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
|
||||||
"ICMP acceptance rule wasn't added")
|
"ICMP acceptance rule wasn't added")
|
||||||
|
|
||||||
self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -m icmp --icmp-type '
|
regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -m icmp '
|
||||||
'8 -j ACCEPT' % security_group_chain in self.out_rules,
|
'--icmp-type 8 -j ACCEPT')
|
||||||
|
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
|
||||||
"ICMP Echo Request acceptance rule wasn't added")
|
"ICMP Echo Request acceptance rule wasn't added")
|
||||||
|
|
||||||
self.assertTrue('-A %s -p tcp -s 192.168.10.0/24 -m multiport '
|
regex = re.compile('-A .* -p tcp -s 192.168.10.0/24 -m multiport '
|
||||||
'--dports 80:81 -j ACCEPT' % security_group_chain \
|
'--dports 80:81 -j ACCEPT')
|
||||||
in self.out_rules,
|
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
|
||||||
"TCP port 80/81 acceptance rule wasn't added")
|
"TCP port 80/81 acceptance rule wasn't added")
|
||||||
db.instance_destroy(admin_ctxt, instance_ref['id'])
|
db.instance_destroy(admin_ctxt, instance_ref['id'])
|
||||||
|
|
||||||
|
|||||||
@@ -99,7 +99,7 @@ class VolumeTestCase(test.TestCase):
|
|||||||
def test_run_attach_detach_volume(self):
|
def test_run_attach_detach_volume(self):
|
||||||
"""Make sure volume can be attached and detached from instance."""
|
"""Make sure volume can be attached and detached from instance."""
|
||||||
inst = {}
|
inst = {}
|
||||||
inst['image_id'] = 'ami-test'
|
inst['image_id'] = 1
|
||||||
inst['reservation_id'] = 'r-fakeres'
|
inst['reservation_id'] = 'r-fakeres'
|
||||||
inst['launch_time'] = '10'
|
inst['launch_time'] = '10'
|
||||||
inst['user_id'] = 'fake'
|
inst['user_id'] = 'fake'
|
||||||
|
|||||||
172
nova/tests/test_zones.py
Normal file
172
nova/tests/test_zones.py
Normal file
@@ -0,0 +1,172 @@
|
|||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
"""
|
||||||
|
Tests For ZoneManager
|
||||||
|
"""
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
import mox
|
||||||
|
import novaclient
|
||||||
|
|
||||||
|
from nova import context
|
||||||
|
from nova import db
|
||||||
|
from nova import flags
|
||||||
|
from nova import service
|
||||||
|
from nova import test
|
||||||
|
from nova import rpc
|
||||||
|
from nova import utils
|
||||||
|
from nova.auth import manager as auth_manager
|
||||||
|
from nova.scheduler import zone_manager
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
|
class FakeZone:
|
||||||
|
"""Represents a fake zone from the db"""
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
for k, v in kwargs.iteritems():
|
||||||
|
setattr(self, k, v)
|
||||||
|
|
||||||
|
|
||||||
|
def exploding_novaclient(zone):
|
||||||
|
"""Used when we want to simulate a novaclient call failing."""
|
||||||
|
raise Exception("kaboom")
|
||||||
|
|
||||||
|
|
||||||
|
class ZoneManagerTestCase(test.TestCase):
|
||||||
|
"""Test case for zone manager"""
|
||||||
|
def test_ping(self):
|
||||||
|
zm = zone_manager.ZoneManager()
|
||||||
|
self.mox.StubOutWithMock(zm, '_refresh_from_db')
|
||||||
|
self.mox.StubOutWithMock(zm, '_poll_zones')
|
||||||
|
zm._refresh_from_db(mox.IgnoreArg())
|
||||||
|
zm._poll_zones(mox.IgnoreArg())
|
||||||
|
|
||||||
|
self.mox.ReplayAll()
|
||||||
|
zm.ping(None)
|
||||||
|
self.mox.VerifyAll()
|
||||||
|
|
||||||
|
def test_refresh_from_db_new(self):
|
||||||
|
zm = zone_manager.ZoneManager()
|
||||||
|
|
||||||
|
self.mox.StubOutWithMock(db, 'zone_get_all')
|
||||||
|
db.zone_get_all(mox.IgnoreArg()).AndReturn([
|
||||||
|
FakeZone(id=1, api_url='http://foo.com', username='user1',
|
||||||
|
password='pass1'),
|
||||||
|
])
|
||||||
|
|
||||||
|
self.assertEquals(len(zm.zone_states), 0)
|
||||||
|
|
||||||
|
self.mox.ReplayAll()
|
||||||
|
zm._refresh_from_db(None)
|
||||||
|
self.mox.VerifyAll()
|
||||||
|
|
||||||
|
self.assertEquals(len(zm.zone_states), 1)
|
||||||
|
self.assertEquals(zm.zone_states[1].username, 'user1')
|
||||||
|
|
||||||
|
def test_refresh_from_db_replace_existing(self):
|
||||||
|
zm = zone_manager.ZoneManager()
|
||||||
|
zone_state = zone_manager.ZoneState()
|
||||||
|
zone_state.update_credentials(FakeZone(id=1, api_url='http://foo.com',
|
||||||
|
username='user1', password='pass1'))
|
||||||
|
zm.zone_states[1] = zone_state
|
||||||
|
|
||||||
|
self.mox.StubOutWithMock(db, 'zone_get_all')
|
||||||
|
db.zone_get_all(mox.IgnoreArg()).AndReturn([
|
||||||
|
FakeZone(id=1, api_url='http://foo.com', username='user2',
|
||||||
|
password='pass2'),
|
||||||
|
])
|
||||||
|
|
||||||
|
self.assertEquals(len(zm.zone_states), 1)
|
||||||
|
|
||||||
|
self.mox.ReplayAll()
|
||||||
|
zm._refresh_from_db(None)
|
||||||
|
self.mox.VerifyAll()
|
||||||
|
|
||||||
|
self.assertEquals(len(zm.zone_states), 1)
|
||||||
|
self.assertEquals(zm.zone_states[1].username, 'user2')
|
||||||
|
|
||||||
|
def test_refresh_from_db_missing(self):
|
||||||
|
zm = zone_manager.ZoneManager()
|
||||||
|
zone_state = zone_manager.ZoneState()
|
||||||
|
zone_state.update_credentials(FakeZone(id=1, api_url='http://foo.com',
|
||||||
|
username='user1', password='pass1'))
|
||||||
|
zm.zone_states[1] = zone_state
|
||||||
|
|
||||||
|
self.mox.StubOutWithMock(db, 'zone_get_all')
|
||||||
|
db.zone_get_all(mox.IgnoreArg()).AndReturn([])
|
||||||
|
|
||||||
|
self.assertEquals(len(zm.zone_states), 1)
|
||||||
|
|
||||||
|
self.mox.ReplayAll()
|
||||||
|
zm._refresh_from_db(None)
|
||||||
|
self.mox.VerifyAll()
|
||||||
|
|
||||||
|
self.assertEquals(len(zm.zone_states), 0)
|
||||||
|
|
||||||
|
def test_refresh_from_db_add_and_delete(self):
|
||||||
|
zm = zone_manager.ZoneManager()
|
||||||
|
zone_state = zone_manager.ZoneState()
|
||||||
|
zone_state.update_credentials(FakeZone(id=1, api_url='http://foo.com',
|
||||||
|
username='user1', password='pass1'))
|
||||||
|
zm.zone_states[1] = zone_state
|
||||||
|
|
||||||
|
self.mox.StubOutWithMock(db, 'zone_get_all')
|
||||||
|
|
||||||
|
db.zone_get_all(mox.IgnoreArg()).AndReturn([
|
||||||
|
FakeZone(id=2, api_url='http://foo.com', username='user2',
|
||||||
|
password='pass2'),
|
||||||
|
])
|
||||||
|
self.assertEquals(len(zm.zone_states), 1)
|
||||||
|
|
||||||
|
self.mox.ReplayAll()
|
||||||
|
zm._refresh_from_db(None)
|
||||||
|
self.mox.VerifyAll()
|
||||||
|
|
||||||
|
self.assertEquals(len(zm.zone_states), 1)
|
||||||
|
self.assertEquals(zm.zone_states[2].username, 'user2')
|
||||||
|
|
||||||
|
def test_poll_zone(self):
|
||||||
|
self.mox.StubOutWithMock(zone_manager, '_call_novaclient')
|
||||||
|
zone_manager._call_novaclient(mox.IgnoreArg()).AndReturn(
|
||||||
|
dict(name='zohan', capabilities='hairdresser'))
|
||||||
|
|
||||||
|
zone_state = zone_manager.ZoneState()
|
||||||
|
zone_state.update_credentials(FakeZone(id=2,
|
||||||
|
api_url='http://foo.com', username='user2',
|
||||||
|
password='pass2'))
|
||||||
|
zone_state.attempt = 1
|
||||||
|
|
||||||
|
self.mox.ReplayAll()
|
||||||
|
zone_manager._poll_zone(zone_state)
|
||||||
|
self.mox.VerifyAll()
|
||||||
|
self.assertEquals(zone_state.attempt, 0)
|
||||||
|
self.assertEquals(zone_state.name, 'zohan')
|
||||||
|
|
||||||
|
def test_poll_zone_fails(self):
|
||||||
|
self.stubs.Set(zone_manager, "_call_novaclient", exploding_novaclient)
|
||||||
|
|
||||||
|
zone_state = zone_manager.ZoneState()
|
||||||
|
zone_state.update_credentials(FakeZone(id=2,
|
||||||
|
api_url='http://foo.com', username='user2',
|
||||||
|
password='pass2'))
|
||||||
|
zone_state.attempt = FLAGS.zone_failures_to_offline - 1
|
||||||
|
|
||||||
|
self.mox.ReplayAll()
|
||||||
|
zone_manager._poll_zone(zone_state)
|
||||||
|
self.mox.VerifyAll()
|
||||||
|
self.assertEquals(zone_state.attempt, 3)
|
||||||
|
self.assertFalse(zone_state.is_active)
|
||||||
|
self.assertEquals(zone_state.name, None)
|
||||||
107
nova/utils.py
107
nova/utils.py
@@ -23,10 +23,14 @@ System-level utilities and helper functions.
|
|||||||
|
|
||||||
import base64
|
import base64
|
||||||
import datetime
|
import datetime
|
||||||
|
import functools
|
||||||
import inspect
|
import inspect
|
||||||
import json
|
import json
|
||||||
|
import lockfile
|
||||||
|
import netaddr
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
|
import re
|
||||||
import socket
|
import socket
|
||||||
import string
|
import string
|
||||||
import struct
|
import struct
|
||||||
@@ -34,20 +38,20 @@ import sys
|
|||||||
import time
|
import time
|
||||||
import types
|
import types
|
||||||
from xml.sax import saxutils
|
from xml.sax import saxutils
|
||||||
import re
|
|
||||||
import netaddr
|
|
||||||
|
|
||||||
from eventlet import event
|
from eventlet import event
|
||||||
from eventlet import greenthread
|
from eventlet import greenthread
|
||||||
from eventlet.green import subprocess
|
from eventlet.green import subprocess
|
||||||
|
None
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.exception import ProcessExecutionError
|
from nova.exception import ProcessExecutionError
|
||||||
|
from nova import flags
|
||||||
from nova import log as logging
|
from nova import log as logging
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger("nova.utils")
|
LOG = logging.getLogger("nova.utils")
|
||||||
TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
|
TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
def import_class(import_str):
|
def import_class(import_str):
|
||||||
@@ -125,40 +129,59 @@ def fetchfile(url, target):
|
|||||||
# c.perform()
|
# c.perform()
|
||||||
# c.close()
|
# c.close()
|
||||||
# fp.close()
|
# fp.close()
|
||||||
execute("curl --fail %s -o %s" % (url, target))
|
execute("curl", "--fail", url, "-o", target)
|
||||||
|
|
||||||
|
|
||||||
def execute(cmd, process_input=None, addl_env=None, check_exit_code=True):
|
def execute(*cmd, **kwargs):
|
||||||
LOG.debug(_("Running cmd (subprocess): %s"), cmd)
|
process_input = kwargs.get('process_input', None)
|
||||||
env = os.environ.copy()
|
addl_env = kwargs.get('addl_env', None)
|
||||||
if addl_env:
|
check_exit_code = kwargs.get('check_exit_code', 0)
|
||||||
env.update(addl_env)
|
stdin = kwargs.get('stdin', subprocess.PIPE)
|
||||||
obj = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
|
stdout = kwargs.get('stdout', subprocess.PIPE)
|
||||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
|
stderr = kwargs.get('stderr', subprocess.PIPE)
|
||||||
result = None
|
attempts = kwargs.get('attempts', 1)
|
||||||
if process_input != None:
|
cmd = map(str, cmd)
|
||||||
result = obj.communicate(process_input)
|
|
||||||
else:
|
while attempts > 0:
|
||||||
result = obj.communicate()
|
attempts -= 1
|
||||||
obj.stdin.close()
|
try:
|
||||||
if obj.returncode:
|
LOG.debug(_("Running cmd (subprocess): %s"), ' '.join(cmd))
|
||||||
LOG.debug(_("Result was %s") % obj.returncode)
|
env = os.environ.copy()
|
||||||
if check_exit_code and obj.returncode != 0:
|
if addl_env:
|
||||||
(stdout, stderr) = result
|
env.update(addl_env)
|
||||||
raise ProcessExecutionError(exit_code=obj.returncode,
|
obj = subprocess.Popen(cmd, stdin=stdin,
|
||||||
stdout=stdout,
|
stdout=stdout, stderr=stderr, env=env)
|
||||||
stderr=stderr,
|
result = None
|
||||||
cmd=cmd)
|
if process_input != None:
|
||||||
# NOTE(termie): this appears to be necessary to let the subprocess call
|
result = obj.communicate(process_input)
|
||||||
# clean something up in between calls, without it two
|
else:
|
||||||
# execute calls in a row hangs the second one
|
result = obj.communicate()
|
||||||
greenthread.sleep(0)
|
obj.stdin.close()
|
||||||
return result
|
if obj.returncode:
|
||||||
|
LOG.debug(_("Result was %s") % obj.returncode)
|
||||||
|
if type(check_exit_code) == types.IntType \
|
||||||
|
and obj.returncode != check_exit_code:
|
||||||
|
(stdout, stderr) = result
|
||||||
|
raise ProcessExecutionError(exit_code=obj.returncode,
|
||||||
|
stdout=stdout,
|
||||||
|
stderr=stderr,
|
||||||
|
cmd=' '.join(cmd))
|
||||||
|
# NOTE(termie): this appears to be necessary to let the subprocess
|
||||||
|
# call clean something up in between calls, without
|
||||||
|
# it two execute calls in a row hangs the second one
|
||||||
|
greenthread.sleep(0)
|
||||||
|
return result
|
||||||
|
except ProcessExecutionError:
|
||||||
|
if not attempts:
|
||||||
|
raise
|
||||||
|
else:
|
||||||
|
LOG.debug(_("%r failed. Retrying."), cmd)
|
||||||
|
greenthread.sleep(random.randint(20, 200) / 100.0)
|
||||||
|
|
||||||
|
|
||||||
def ssh_execute(ssh, cmd, process_input=None,
|
def ssh_execute(ssh, cmd, process_input=None,
|
||||||
addl_env=None, check_exit_code=True):
|
addl_env=None, check_exit_code=True):
|
||||||
LOG.debug(_("Running cmd (SSH): %s"), cmd)
|
LOG.debug(_("Running cmd (SSH): %s"), ' '.join(cmd))
|
||||||
if addl_env:
|
if addl_env:
|
||||||
raise exception.Error("Environment not supported over SSH")
|
raise exception.Error("Environment not supported over SSH")
|
||||||
|
|
||||||
@@ -187,7 +210,7 @@ def ssh_execute(ssh, cmd, process_input=None,
|
|||||||
raise exception.ProcessExecutionError(exit_code=exit_status,
|
raise exception.ProcessExecutionError(exit_code=exit_status,
|
||||||
stdout=stdout,
|
stdout=stdout,
|
||||||
stderr=stderr,
|
stderr=stderr,
|
||||||
cmd=cmd)
|
cmd=' '.join(cmd))
|
||||||
|
|
||||||
return (stdout, stderr)
|
return (stdout, stderr)
|
||||||
|
|
||||||
@@ -220,9 +243,9 @@ def debug(arg):
|
|||||||
return arg
|
return arg
|
||||||
|
|
||||||
|
|
||||||
def runthis(prompt, cmd, check_exit_code=True):
|
def runthis(prompt, *cmd, **kwargs):
|
||||||
LOG.debug(_("Running %s"), (cmd))
|
LOG.debug(_("Running %s"), (" ".join(cmd)))
|
||||||
rv, err = execute(cmd, check_exit_code=check_exit_code)
|
rv, err = execute(*cmd, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
def generate_uid(topic, size=8):
|
def generate_uid(topic, size=8):
|
||||||
@@ -254,7 +277,7 @@ def last_octet(address):
|
|||||||
|
|
||||||
def get_my_linklocal(interface):
|
def get_my_linklocal(interface):
|
||||||
try:
|
try:
|
||||||
if_str = execute("ip -f inet6 -o addr show %s" % interface)
|
if_str = execute("ip", "-f", "inet6", "-o", "addr", "show", interface)
|
||||||
condition = "\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link"
|
condition = "\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link"
|
||||||
links = [re.search(condition, x) for x in if_str[0].split('\n')]
|
links = [re.search(condition, x) for x in if_str[0].split('\n')]
|
||||||
address = [w.group(1) for w in links if w is not None]
|
address = [w.group(1) for w in links if w is not None]
|
||||||
@@ -491,6 +514,18 @@ def loads(s):
|
|||||||
return json.loads(s)
|
return json.loads(s)
|
||||||
|
|
||||||
|
|
||||||
|
def synchronized(name):
|
||||||
|
def wrap(f):
|
||||||
|
@functools.wraps(f)
|
||||||
|
def inner(*args, **kwargs):
|
||||||
|
lock = lockfile.FileLock(os.path.join(FLAGS.lock_path,
|
||||||
|
'nova-%s.lock' % name))
|
||||||
|
with lock:
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
return inner
|
||||||
|
return wrap
|
||||||
|
|
||||||
|
|
||||||
def ensure_b64_encoding(val):
|
def ensure_b64_encoding(val):
|
||||||
"""Safety method to ensure that values expected to be base64-encoded
|
"""Safety method to ensure that values expected to be base64-encoded
|
||||||
actually are. If they are, the value is returned unchanged. Otherwise,
|
actually are. If they are, the value is returned unchanged. Otherwise,
|
||||||
|
|||||||
@@ -49,10 +49,10 @@ def extend(image, size):
|
|||||||
file_size = os.path.getsize(image)
|
file_size = os.path.getsize(image)
|
||||||
if file_size >= size:
|
if file_size >= size:
|
||||||
return
|
return
|
||||||
utils.execute('truncate -s %s %s' % (size, image))
|
utils.execute('truncate', '-s', size, image)
|
||||||
# NOTE(vish): attempts to resize filesystem
|
# NOTE(vish): attempts to resize filesystem
|
||||||
utils.execute('e2fsck -fp %s' % image, check_exit_code=False)
|
utils.execute('e2fsck', '-fp', image, check_exit_code=False)
|
||||||
utils.execute('resize2fs %s' % image, check_exit_code=False)
|
utils.execute('resize2fs', image, check_exit_code=False)
|
||||||
|
|
||||||
|
|
||||||
def inject_data(image, key=None, net=None, partition=None, nbd=False):
|
def inject_data(image, key=None, net=None, partition=None, nbd=False):
|
||||||
@@ -68,7 +68,7 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False):
|
|||||||
try:
|
try:
|
||||||
if not partition is None:
|
if not partition is None:
|
||||||
# create partition
|
# create partition
|
||||||
out, err = utils.execute('sudo kpartx -a %s' % device)
|
out, err = utils.execute('sudo', 'kpartx', '-a', device)
|
||||||
if err:
|
if err:
|
||||||
raise exception.Error(_('Failed to load partition: %s') % err)
|
raise exception.Error(_('Failed to load partition: %s') % err)
|
||||||
mapped_device = '/dev/mapper/%sp%s' % (device.split('/')[-1],
|
mapped_device = '/dev/mapper/%sp%s' % (device.split('/')[-1],
|
||||||
@@ -84,13 +84,14 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False):
|
|||||||
mapped_device)
|
mapped_device)
|
||||||
|
|
||||||
# Configure ext2fs so that it doesn't auto-check every N boots
|
# Configure ext2fs so that it doesn't auto-check every N boots
|
||||||
out, err = utils.execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device)
|
out, err = utils.execute('sudo', 'tune2fs',
|
||||||
|
'-c', 0, '-i', 0, mapped_device)
|
||||||
|
|
||||||
tmpdir = tempfile.mkdtemp()
|
tmpdir = tempfile.mkdtemp()
|
||||||
try:
|
try:
|
||||||
# mount loopback to dir
|
# mount loopback to dir
|
||||||
out, err = utils.execute(
|
out, err = utils.execute(
|
||||||
'sudo mount %s %s' % (mapped_device, tmpdir))
|
'sudo', 'mount', mapped_device, tmpdir)
|
||||||
if err:
|
if err:
|
||||||
raise exception.Error(_('Failed to mount filesystem: %s')
|
raise exception.Error(_('Failed to mount filesystem: %s')
|
||||||
% err)
|
% err)
|
||||||
@@ -103,13 +104,13 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False):
|
|||||||
_inject_net_into_fs(net, tmpdir)
|
_inject_net_into_fs(net, tmpdir)
|
||||||
finally:
|
finally:
|
||||||
# unmount device
|
# unmount device
|
||||||
utils.execute('sudo umount %s' % mapped_device)
|
utils.execute('sudo', 'umount', mapped_device)
|
||||||
finally:
|
finally:
|
||||||
# remove temporary directory
|
# remove temporary directory
|
||||||
utils.execute('rmdir %s' % tmpdir)
|
utils.execute('rmdir', tmpdir)
|
||||||
if not partition is None:
|
if not partition is None:
|
||||||
# remove partitions
|
# remove partitions
|
||||||
utils.execute('sudo kpartx -d %s' % device)
|
utils.execute('sudo', 'kpartx', '-d', device)
|
||||||
finally:
|
finally:
|
||||||
_unlink_device(device, nbd)
|
_unlink_device(device, nbd)
|
||||||
|
|
||||||
@@ -118,7 +119,7 @@ def _link_device(image, nbd):
|
|||||||
"""Link image to device using loopback or nbd"""
|
"""Link image to device using loopback or nbd"""
|
||||||
if nbd:
|
if nbd:
|
||||||
device = _allocate_device()
|
device = _allocate_device()
|
||||||
utils.execute('sudo qemu-nbd -c %s %s' % (device, image))
|
utils.execute('sudo', 'qemu-nbd', '-c', device, image)
|
||||||
# NOTE(vish): this forks into another process, so give it a chance
|
# NOTE(vish): this forks into another process, so give it a chance
|
||||||
# to set up before continuuing
|
# to set up before continuuing
|
||||||
for i in xrange(FLAGS.timeout_nbd):
|
for i in xrange(FLAGS.timeout_nbd):
|
||||||
@@ -127,7 +128,7 @@ def _link_device(image, nbd):
|
|||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
raise exception.Error(_('nbd device %s did not show up') % device)
|
raise exception.Error(_('nbd device %s did not show up') % device)
|
||||||
else:
|
else:
|
||||||
out, err = utils.execute('sudo losetup --find --show %s' % image)
|
out, err = utils.execute('sudo', 'losetup', '--find', '--show', image)
|
||||||
if err:
|
if err:
|
||||||
raise exception.Error(_('Could not attach image to loopback: %s')
|
raise exception.Error(_('Could not attach image to loopback: %s')
|
||||||
% err)
|
% err)
|
||||||
@@ -137,10 +138,10 @@ def _link_device(image, nbd):
|
|||||||
def _unlink_device(device, nbd):
|
def _unlink_device(device, nbd):
|
||||||
"""Unlink image from device using loopback or nbd"""
|
"""Unlink image from device using loopback or nbd"""
|
||||||
if nbd:
|
if nbd:
|
||||||
utils.execute('sudo qemu-nbd -d %s' % device)
|
utils.execute('sudo', 'qemu-nbd', '-d', device)
|
||||||
_free_device(device)
|
_free_device(device)
|
||||||
else:
|
else:
|
||||||
utils.execute('sudo losetup --detach %s' % device)
|
utils.execute('sudo', 'losetup', '--detach', device)
|
||||||
|
|
||||||
|
|
||||||
_DEVICES = ['/dev/nbd%s' % i for i in xrange(FLAGS.max_nbd_devices)]
|
_DEVICES = ['/dev/nbd%s' % i for i in xrange(FLAGS.max_nbd_devices)]
|
||||||
@@ -170,11 +171,12 @@ def _inject_key_into_fs(key, fs):
|
|||||||
fs is the path to the base of the filesystem into which to inject the key.
|
fs is the path to the base of the filesystem into which to inject the key.
|
||||||
"""
|
"""
|
||||||
sshdir = os.path.join(fs, 'root', '.ssh')
|
sshdir = os.path.join(fs, 'root', '.ssh')
|
||||||
utils.execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter
|
utils.execute('sudo', 'mkdir', '-p', sshdir) # existing dir doesn't matter
|
||||||
utils.execute('sudo chown root %s' % sshdir)
|
utils.execute('sudo', 'chown', 'root', sshdir)
|
||||||
utils.execute('sudo chmod 700 %s' % sshdir)
|
utils.execute('sudo', 'chmod', '700', sshdir)
|
||||||
keyfile = os.path.join(sshdir, 'authorized_keys')
|
keyfile = os.path.join(sshdir, 'authorized_keys')
|
||||||
utils.execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n')
|
utils.execute('sudo', 'tee', '-a', keyfile,
|
||||||
|
process_input='\n' + key.strip() + '\n')
|
||||||
|
|
||||||
|
|
||||||
def _inject_net_into_fs(net, fs):
|
def _inject_net_into_fs(net, fs):
|
||||||
@@ -183,8 +185,8 @@ def _inject_net_into_fs(net, fs):
|
|||||||
net is the contents of /etc/network/interfaces.
|
net is the contents of /etc/network/interfaces.
|
||||||
"""
|
"""
|
||||||
netdir = os.path.join(os.path.join(fs, 'etc'), 'network')
|
netdir = os.path.join(os.path.join(fs, 'etc'), 'network')
|
||||||
utils.execute('sudo mkdir -p %s' % netdir) # existing dir doesn't matter
|
utils.execute('sudo', 'mkdir', '-p', netdir) # existing dir doesn't matter
|
||||||
utils.execute('sudo chown root:root %s' % netdir)
|
utils.execute('sudo', 'chown', 'root:root', netdir)
|
||||||
utils.execute('sudo chmod 755 %s' % netdir)
|
utils.execute('sudo', 'chmod', 755, netdir)
|
||||||
netfile = os.path.join(netdir, 'interfaces')
|
netfile = os.path.join(netdir, 'interfaces')
|
||||||
utils.execute('sudo tee %s' % netfile, net)
|
utils.execute('sudo', 'tee', netfile, net)
|
||||||
|
|||||||
@@ -28,29 +28,32 @@ import time
|
|||||||
import urllib2
|
import urllib2
|
||||||
import urlparse
|
import urlparse
|
||||||
|
|
||||||
|
from nova import context
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import log as logging
|
from nova import log as logging
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova.auth import manager
|
from nova.auth import manager
|
||||||
from nova.auth import signer
|
from nova.auth import signer
|
||||||
from nova.objectstore import image
|
|
||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
flags.DEFINE_bool('use_s3', True,
|
|
||||||
'whether to get images from s3 or use local copy')
|
|
||||||
|
|
||||||
LOG = logging.getLogger('nova.virt.images')
|
LOG = logging.getLogger('nova.virt.images')
|
||||||
|
|
||||||
|
|
||||||
def fetch(image, path, user, project):
|
def fetch(image_id, path, _user, _project):
|
||||||
if FLAGS.use_s3:
|
# TODO(vish): Improve context handling and add owner and auth data
|
||||||
f = _fetch_s3_image
|
# when it is added to glance. Right now there is no
|
||||||
else:
|
# auth checking in glance, so we assume that access was
|
||||||
f = _fetch_local_image
|
# checked before we got here.
|
||||||
return f(image, path, user, project)
|
image_service = utils.import_object(FLAGS.image_service)
|
||||||
|
with open(path, "wb") as image_file:
|
||||||
|
elevated = context.get_admin_context()
|
||||||
|
metadata = image_service.get(elevated, image_id, image_file)
|
||||||
|
return metadata
|
||||||
|
|
||||||
|
|
||||||
|
# NOTE(vish): The methods below should be unnecessary, but I'm leaving
|
||||||
|
# them in case the glance client does not work on windows.
|
||||||
def _fetch_image_no_curl(url, path, headers):
|
def _fetch_image_no_curl(url, path, headers):
|
||||||
request = urllib2.Request(url)
|
request = urllib2.Request(url)
|
||||||
for (k, v) in headers.iteritems():
|
for (k, v) in headers.iteritems():
|
||||||
@@ -94,8 +97,7 @@ def _fetch_s3_image(image, path, user, project):
|
|||||||
cmd += ['-H', '\'%s: %s\'' % (k, v)]
|
cmd += ['-H', '\'%s: %s\'' % (k, v)]
|
||||||
|
|
||||||
cmd += ['-o', path]
|
cmd += ['-o', path]
|
||||||
cmd_out = ' '.join(cmd)
|
return utils.execute(*cmd)
|
||||||
return utils.execute(cmd_out)
|
|
||||||
|
|
||||||
|
|
||||||
def _fetch_local_image(image, path, user, project):
|
def _fetch_local_image(image, path, user, project):
|
||||||
@@ -103,13 +105,15 @@ def _fetch_local_image(image, path, user, project):
|
|||||||
if sys.platform.startswith('win'):
|
if sys.platform.startswith('win'):
|
||||||
return shutil.copy(source, path)
|
return shutil.copy(source, path)
|
||||||
else:
|
else:
|
||||||
return utils.execute('cp %s %s' % (source, path))
|
return utils.execute('cp', source, path)
|
||||||
|
|
||||||
|
|
||||||
def _image_path(path):
|
def _image_path(path):
|
||||||
return os.path.join(FLAGS.images_path, path)
|
return os.path.join(FLAGS.images_path, path)
|
||||||
|
|
||||||
|
|
||||||
|
# TODO(vish): xenapi should use the glance client code directly instead
|
||||||
|
# of retrieving the image using this method.
|
||||||
def image_url(image):
|
def image_url(image):
|
||||||
if FLAGS.image_service == "nova.image.glance.GlanceImageService":
|
if FLAGS.image_service == "nova.image.glance.GlanceImageService":
|
||||||
return "http://%s:%s/images/%s" % (FLAGS.glance_host,
|
return "http://%s:%s/images/%s" % (FLAGS.glance_host,
|
||||||
|
|||||||
@@ -44,9 +44,8 @@ import uuid
|
|||||||
from xml.dom import minidom
|
from xml.dom import minidom
|
||||||
|
|
||||||
|
|
||||||
from eventlet import greenthread
|
|
||||||
from eventlet import event
|
|
||||||
from eventlet import tpool
|
from eventlet import tpool
|
||||||
|
from eventlet import semaphore
|
||||||
|
|
||||||
import IPy
|
import IPy
|
||||||
|
|
||||||
@@ -57,7 +56,6 @@ from nova import flags
|
|||||||
from nova import log as logging
|
from nova import log as logging
|
||||||
#from nova import test
|
#from nova import test
|
||||||
from nova import utils
|
from nova import utils
|
||||||
#from nova.api import context
|
|
||||||
from nova.auth import manager
|
from nova.auth import manager
|
||||||
from nova.compute import instance_types
|
from nova.compute import instance_types
|
||||||
from nova.compute import power_state
|
from nova.compute import power_state
|
||||||
@@ -439,8 +437,10 @@ class LibvirtConnection(object):
|
|||||||
|
|
||||||
if virsh_output.startswith('/dev/'):
|
if virsh_output.startswith('/dev/'):
|
||||||
LOG.info(_("cool, it's a device"))
|
LOG.info(_("cool, it's a device"))
|
||||||
out, err = utils.execute("sudo dd if=%s iflag=nonblock" %
|
out, err = utils.execute('sudo', 'dd',
|
||||||
virsh_output, check_exit_code=False)
|
"if=%s" % virsh_output,
|
||||||
|
'iflag=nonblock',
|
||||||
|
check_exit_code=False)
|
||||||
return out
|
return out
|
||||||
else:
|
else:
|
||||||
return ''
|
return ''
|
||||||
@@ -462,11 +462,11 @@ class LibvirtConnection(object):
|
|||||||
console_log = os.path.join(FLAGS.instances_path, instance['name'],
|
console_log = os.path.join(FLAGS.instances_path, instance['name'],
|
||||||
'console.log')
|
'console.log')
|
||||||
|
|
||||||
utils.execute('sudo chown %d %s' % (os.getuid(), console_log))
|
utils.execute('sudo', 'chown', os.getuid(), console_log)
|
||||||
|
|
||||||
if FLAGS.libvirt_type == 'xen':
|
if FLAGS.libvirt_type == 'xen':
|
||||||
# Xen is special
|
# Xen is special
|
||||||
virsh_output = utils.execute("virsh ttyconsole %s" %
|
virsh_output = utils.execute('virsh', 'ttyconsole',
|
||||||
instance['name'])
|
instance['name'])
|
||||||
data = self._flush_xen_console(virsh_output)
|
data = self._flush_xen_console(virsh_output)
|
||||||
fpath = self._append_to_file(data, console_log)
|
fpath = self._append_to_file(data, console_log)
|
||||||
@@ -483,9 +483,10 @@ class LibvirtConnection(object):
|
|||||||
port = random.randint(int(start_port), int(end_port))
|
port = random.randint(int(start_port), int(end_port))
|
||||||
# netcat will exit with 0 only if the port is in use,
|
# netcat will exit with 0 only if the port is in use,
|
||||||
# so a nonzero return value implies it is unused
|
# so a nonzero return value implies it is unused
|
||||||
cmd = 'netcat 0.0.0.0 %s -w 1 </dev/null || echo free' % (port)
|
cmd = 'netcat', '0.0.0.0', port, '-w', '1'
|
||||||
stdout, stderr = utils.execute(cmd)
|
try:
|
||||||
if stdout.strip() == 'free':
|
stdout, stderr = utils.execute(*cmd, process_input='')
|
||||||
|
except ProcessExecutionError:
|
||||||
return port
|
return port
|
||||||
raise Exception(_('Unable to find an open port'))
|
raise Exception(_('Unable to find an open port'))
|
||||||
|
|
||||||
@@ -512,7 +513,10 @@ class LibvirtConnection(object):
|
|||||||
subprocess.Popen(cmd, shell=True)
|
subprocess.Popen(cmd, shell=True)
|
||||||
return {'token': token, 'host': host, 'port': port}
|
return {'token': token, 'host': host, 'port': port}
|
||||||
|
|
||||||
def _cache_image(self, fn, target, fname, cow=False, *args, **kwargs):
|
_image_sems = {}
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _cache_image(fn, target, fname, cow=False, *args, **kwargs):
|
||||||
"""Wrapper for a method that creates an image that caches the image.
|
"""Wrapper for a method that creates an image that caches the image.
|
||||||
|
|
||||||
This wrapper will save the image into a common store and create a
|
This wrapper will save the image into a common store and create a
|
||||||
@@ -531,14 +535,21 @@ class LibvirtConnection(object):
|
|||||||
if not os.path.exists(base_dir):
|
if not os.path.exists(base_dir):
|
||||||
os.mkdir(base_dir)
|
os.mkdir(base_dir)
|
||||||
base = os.path.join(base_dir, fname)
|
base = os.path.join(base_dir, fname)
|
||||||
if not os.path.exists(base):
|
|
||||||
fn(target=base, *args, **kwargs)
|
if fname not in LibvirtConnection._image_sems:
|
||||||
|
LibvirtConnection._image_sems[fname] = semaphore.Semaphore()
|
||||||
|
with LibvirtConnection._image_sems[fname]:
|
||||||
|
if not os.path.exists(base):
|
||||||
|
fn(target=base, *args, **kwargs)
|
||||||
|
if not LibvirtConnection._image_sems[fname].locked():
|
||||||
|
del LibvirtConnection._image_sems[fname]
|
||||||
|
|
||||||
if cow:
|
if cow:
|
||||||
utils.execute('qemu-img create -f qcow2 -o '
|
utils.execute('qemu-img', 'create', '-f', 'qcow2', '-o',
|
||||||
'cluster_size=2M,backing_file=%s %s'
|
'cluster_size=2M,backing_file=%s' % base,
|
||||||
% (base, target))
|
target)
|
||||||
else:
|
else:
|
||||||
utils.execute('cp %s %s' % (base, target))
|
utils.execute('cp', base, target)
|
||||||
|
|
||||||
def _fetch_image(self, target, image_id, user, project, size=None):
|
def _fetch_image(self, target, image_id, user, project, size=None):
|
||||||
"""Grab image and optionally attempt to resize it"""
|
"""Grab image and optionally attempt to resize it"""
|
||||||
@@ -548,7 +559,7 @@ class LibvirtConnection(object):
|
|||||||
|
|
||||||
def _create_local(self, target, local_gb):
|
def _create_local(self, target, local_gb):
|
||||||
"""Create a blank image of specified size"""
|
"""Create a blank image of specified size"""
|
||||||
utils.execute('truncate %s -s %dG' % (target, local_gb))
|
utils.execute('truncate', target, '-s', "%dG" % local_gb)
|
||||||
# TODO(vish): should we format disk by default?
|
# TODO(vish): should we format disk by default?
|
||||||
|
|
||||||
def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None):
|
def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None):
|
||||||
@@ -559,7 +570,7 @@ class LibvirtConnection(object):
|
|||||||
fname + suffix)
|
fname + suffix)
|
||||||
|
|
||||||
# ensure directories exist and are writable
|
# ensure directories exist and are writable
|
||||||
utils.execute('mkdir -p %s' % basepath(suffix=''))
|
utils.execute('mkdir', '-p', basepath(suffix=''))
|
||||||
|
|
||||||
LOG.info(_('instance %s: Creating image'), inst['name'])
|
LOG.info(_('instance %s: Creating image'), inst['name'])
|
||||||
f = open(basepath('libvirt.xml'), 'w')
|
f = open(basepath('libvirt.xml'), 'w')
|
||||||
@@ -579,21 +590,23 @@ class LibvirtConnection(object):
|
|||||||
'ramdisk_id': inst['ramdisk_id']}
|
'ramdisk_id': inst['ramdisk_id']}
|
||||||
|
|
||||||
if disk_images['kernel_id']:
|
if disk_images['kernel_id']:
|
||||||
|
fname = '%08x' % int(disk_images['kernel_id'])
|
||||||
self._cache_image(fn=self._fetch_image,
|
self._cache_image(fn=self._fetch_image,
|
||||||
target=basepath('kernel'),
|
target=basepath('kernel'),
|
||||||
fname=disk_images['kernel_id'],
|
fname=fname,
|
||||||
image_id=disk_images['kernel_id'],
|
image_id=disk_images['kernel_id'],
|
||||||
user=user,
|
user=user,
|
||||||
project=project)
|
project=project)
|
||||||
if disk_images['ramdisk_id']:
|
if disk_images['ramdisk_id']:
|
||||||
|
fname = '%08x' % int(disk_images['ramdisk_id'])
|
||||||
self._cache_image(fn=self._fetch_image,
|
self._cache_image(fn=self._fetch_image,
|
||||||
target=basepath('ramdisk'),
|
target=basepath('ramdisk'),
|
||||||
fname=disk_images['ramdisk_id'],
|
fname=fname,
|
||||||
image_id=disk_images['ramdisk_id'],
|
image_id=disk_images['ramdisk_id'],
|
||||||
user=user,
|
user=user,
|
||||||
project=project)
|
project=project)
|
||||||
|
|
||||||
root_fname = disk_images['image_id']
|
root_fname = '%08x' % int(disk_images['image_id'])
|
||||||
size = FLAGS.minimum_root_size
|
size = FLAGS.minimum_root_size
|
||||||
if inst['instance_type'] == 'm1.tiny' or suffix == '.rescue':
|
if inst['instance_type'] == 'm1.tiny' or suffix == '.rescue':
|
||||||
size = None
|
size = None
|
||||||
@@ -659,7 +672,7 @@ class LibvirtConnection(object):
|
|||||||
' data into image %(img_id)s (%(e)s)') % locals())
|
' data into image %(img_id)s (%(e)s)') % locals())
|
||||||
|
|
||||||
if FLAGS.libvirt_type == 'uml':
|
if FLAGS.libvirt_type == 'uml':
|
||||||
utils.execute('sudo chown root %s' % basepath('disk'))
|
utils.execute('sudo', 'chown', 'root', basepath('disk'))
|
||||||
|
|
||||||
def to_xml(self, instance, rescue=False):
|
def to_xml(self, instance, rescue=False):
|
||||||
# TODO(termie): cache?
|
# TODO(termie): cache?
|
||||||
@@ -1208,10 +1221,14 @@ class NWFilterFirewall(FirewallDriver):
|
|||||||
|
|
||||||
class IptablesFirewallDriver(FirewallDriver):
|
class IptablesFirewallDriver(FirewallDriver):
|
||||||
def __init__(self, execute=None, **kwargs):
|
def __init__(self, execute=None, **kwargs):
|
||||||
self.execute = execute or utils.execute
|
from nova.network import linux_net
|
||||||
|
self.iptables = linux_net.iptables_manager
|
||||||
self.instances = {}
|
self.instances = {}
|
||||||
self.nwfilter = NWFilterFirewall(kwargs['get_connection'])
|
self.nwfilter = NWFilterFirewall(kwargs['get_connection'])
|
||||||
|
|
||||||
|
self.iptables.ipv4['filter'].add_chain('sg-fallback')
|
||||||
|
self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP')
|
||||||
|
|
||||||
def setup_basic_filtering(self, instance):
|
def setup_basic_filtering(self, instance):
|
||||||
"""Use NWFilter from libvirt for this."""
|
"""Use NWFilter from libvirt for this."""
|
||||||
return self.nwfilter.setup_basic_filtering(instance)
|
return self.nwfilter.setup_basic_filtering(instance)
|
||||||
@@ -1220,126 +1237,97 @@ class IptablesFirewallDriver(FirewallDriver):
|
|||||||
"""No-op. Everything is done in prepare_instance_filter"""
|
"""No-op. Everything is done in prepare_instance_filter"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def remove_instance(self, instance):
|
def unfilter_instance(self, instance):
|
||||||
if instance['id'] in self.instances:
|
if instance['id'] in self.instances:
|
||||||
del self.instances[instance['id']]
|
del self.instances[instance['id']]
|
||||||
|
self.remove_filters_for_instance(instance)
|
||||||
|
self.iptables.apply()
|
||||||
else:
|
else:
|
||||||
LOG.info(_('Attempted to unfilter instance %s which is not '
|
LOG.info(_('Attempted to unfilter instance %s which is not '
|
||||||
'filtered'), instance['id'])
|
'filtered'), instance['id'])
|
||||||
|
|
||||||
def add_instance(self, instance):
|
|
||||||
self.instances[instance['id']] = instance
|
|
||||||
|
|
||||||
def unfilter_instance(self, instance):
|
|
||||||
self.remove_instance(instance)
|
|
||||||
self.apply_ruleset()
|
|
||||||
|
|
||||||
def prepare_instance_filter(self, instance):
|
def prepare_instance_filter(self, instance):
|
||||||
self.add_instance(instance)
|
self.instances[instance['id']] = instance
|
||||||
self.apply_ruleset()
|
self.add_filters_for_instance(instance)
|
||||||
|
self.iptables.apply()
|
||||||
|
|
||||||
def apply_ruleset(self):
|
def add_filters_for_instance(self, instance):
|
||||||
current_filter, _ = self.execute('sudo iptables-save -t filter')
|
chain_name = self._instance_chain_name(instance)
|
||||||
current_lines = current_filter.split('\n')
|
|
||||||
new_filter = self.modify_rules(current_lines, 4)
|
|
||||||
self.execute('sudo iptables-restore',
|
|
||||||
process_input='\n'.join(new_filter))
|
|
||||||
if(FLAGS.use_ipv6):
|
|
||||||
current_filter, _ = self.execute('sudo ip6tables-save -t filter')
|
|
||||||
current_lines = current_filter.split('\n')
|
|
||||||
new_filter = self.modify_rules(current_lines, 6)
|
|
||||||
self.execute('sudo ip6tables-restore',
|
|
||||||
process_input='\n'.join(new_filter))
|
|
||||||
|
|
||||||
def modify_rules(self, current_lines, ip_version=4):
|
self.iptables.ipv4['filter'].add_chain(chain_name)
|
||||||
|
ipv4_address = self._ip_for_instance(instance)
|
||||||
|
self.iptables.ipv4['filter'].add_rule('local',
|
||||||
|
'-d %s -j $%s' %
|
||||||
|
(ipv4_address, chain_name))
|
||||||
|
|
||||||
|
if FLAGS.use_ipv6:
|
||||||
|
self.iptables.ipv6['filter'].add_chain(chain_name)
|
||||||
|
ipv6_address = self._ip_for_instance_v6(instance)
|
||||||
|
self.iptables.ipv6['filter'].add_rule('local',
|
||||||
|
'-d %s -j $%s' %
|
||||||
|
(ipv6_address,
|
||||||
|
chain_name))
|
||||||
|
|
||||||
|
ipv4_rules, ipv6_rules = self.instance_rules(instance)
|
||||||
|
|
||||||
|
for rule in ipv4_rules:
|
||||||
|
self.iptables.ipv4['filter'].add_rule(chain_name, rule)
|
||||||
|
|
||||||
|
if FLAGS.use_ipv6:
|
||||||
|
for rule in ipv6_rules:
|
||||||
|
self.iptables.ipv6['filter'].add_rule(chain_name, rule)
|
||||||
|
|
||||||
|
def remove_filters_for_instance(self, instance):
|
||||||
|
chain_name = self._instance_chain_name(instance)
|
||||||
|
|
||||||
|
self.iptables.ipv4['filter'].remove_chain(chain_name)
|
||||||
|
if FLAGS.use_ipv6:
|
||||||
|
self.iptables.ipv6['filter'].remove_chain(chain_name)
|
||||||
|
|
||||||
|
def instance_rules(self, instance):
|
||||||
ctxt = context.get_admin_context()
|
ctxt = context.get_admin_context()
|
||||||
# Remove any trace of nova rules.
|
|
||||||
new_filter = filter(lambda l: 'nova-' not in l, current_lines)
|
|
||||||
|
|
||||||
seen_chains = False
|
ipv4_rules = []
|
||||||
for rules_index in range(len(new_filter)):
|
ipv6_rules = []
|
||||||
if not seen_chains:
|
|
||||||
if new_filter[rules_index].startswith(':'):
|
|
||||||
seen_chains = True
|
|
||||||
elif seen_chains == 1:
|
|
||||||
if not new_filter[rules_index].startswith(':'):
|
|
||||||
break
|
|
||||||
|
|
||||||
our_chains = [':nova-fallback - [0:0]']
|
# Always drop invalid packets
|
||||||
our_rules = ['-A nova-fallback -j DROP']
|
ipv4_rules += ['-m state --state ' 'INVALID -j DROP']
|
||||||
|
ipv6_rules += ['-m state --state ' 'INVALID -j DROP']
|
||||||
|
|
||||||
our_chains += [':nova-local - [0:0]']
|
# Allow established connections
|
||||||
our_rules += ['-A FORWARD -j nova-local']
|
ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
|
||||||
our_rules += ['-A OUTPUT -j nova-local']
|
ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
|
||||||
|
|
||||||
security_groups = {}
|
dhcp_server = self._dhcp_server_for_instance(instance)
|
||||||
# Add our chains
|
ipv4_rules += ['-s %s -p udp --sport 67 --dport 68 '
|
||||||
# First, we add instance chains and rules
|
'-j ACCEPT' % (dhcp_server,)]
|
||||||
for instance_id in self.instances:
|
|
||||||
instance = self.instances[instance_id]
|
|
||||||
chain_name = self._instance_chain_name(instance)
|
|
||||||
if(ip_version == 4):
|
|
||||||
ip_address = self._ip_for_instance(instance)
|
|
||||||
elif(ip_version == 6):
|
|
||||||
ip_address = self._ip_for_instance_v6(instance)
|
|
||||||
|
|
||||||
our_chains += [':%s - [0:0]' % chain_name]
|
#Allow project network traffic
|
||||||
|
if FLAGS.allow_project_net_traffic:
|
||||||
|
cidr = self._project_cidr_for_instance(instance)
|
||||||
|
ipv4_rules += ['-s %s -j ACCEPT' % (cidr,)]
|
||||||
|
|
||||||
# Jump to the per-instance chain
|
# We wrap these in FLAGS.use_ipv6 because they might cause
|
||||||
our_rules += ['-A nova-local -d %s -j %s' % (ip_address,
|
# a DB lookup. The other ones are just list operations, so
|
||||||
chain_name)]
|
# they're not worth the clutter.
|
||||||
|
if FLAGS.use_ipv6:
|
||||||
|
# Allow RA responses
|
||||||
|
ra_server = self._ra_server_for_instance(instance)
|
||||||
|
if ra_server:
|
||||||
|
ipv6_rules += ['-s %s/128 -p icmpv6 -j ACCEPT' % (ra_server,)]
|
||||||
|
|
||||||
# Always drop invalid packets
|
#Allow project network traffic
|
||||||
our_rules += ['-A %s -m state --state '
|
if FLAGS.allow_project_net_traffic:
|
||||||
'INVALID -j DROP' % (chain_name,)]
|
cidrv6 = self._project_cidrv6_for_instance(instance)
|
||||||
|
ipv6_rules += ['-s %s -j ACCEPT' % (cidrv6,)]
|
||||||
|
|
||||||
# Allow established connections
|
security_groups = db.security_group_get_by_instance(ctxt,
|
||||||
our_rules += ['-A %s -m state --state '
|
instance['id'])
|
||||||
'ESTABLISHED,RELATED -j ACCEPT' % (chain_name,)]
|
|
||||||
|
|
||||||
# Jump to each security group chain in turn
|
|
||||||
for security_group in \
|
|
||||||
db.security_group_get_by_instance(ctxt,
|
|
||||||
instance['id']):
|
|
||||||
security_groups[security_group['id']] = security_group
|
|
||||||
|
|
||||||
sg_chain_name = self._security_group_chain_name(
|
|
||||||
security_group['id'])
|
|
||||||
|
|
||||||
our_rules += ['-A %s -j %s' % (chain_name, sg_chain_name)]
|
|
||||||
|
|
||||||
if(ip_version == 4):
|
|
||||||
# Allow DHCP responses
|
|
||||||
dhcp_server = self._dhcp_server_for_instance(instance)
|
|
||||||
our_rules += ['-A %s -s %s -p udp --sport 67 --dport 68 '
|
|
||||||
'-j ACCEPT ' % (chain_name, dhcp_server)]
|
|
||||||
#Allow project network traffic
|
|
||||||
if (FLAGS.allow_project_net_traffic):
|
|
||||||
cidr = self._project_cidr_for_instance(instance)
|
|
||||||
our_rules += ['-A %s -s %s -j ACCEPT' % (chain_name, cidr)]
|
|
||||||
elif(ip_version == 6):
|
|
||||||
# Allow RA responses
|
|
||||||
ra_server = self._ra_server_for_instance(instance)
|
|
||||||
if ra_server:
|
|
||||||
our_rules += ['-A %s -s %s -p icmpv6 -j ACCEPT' %
|
|
||||||
(chain_name, ra_server + "/128")]
|
|
||||||
#Allow project network traffic
|
|
||||||
if (FLAGS.allow_project_net_traffic):
|
|
||||||
cidrv6 = self._project_cidrv6_for_instance(instance)
|
|
||||||
our_rules += ['-A %s -s %s -j ACCEPT' %
|
|
||||||
(chain_name, cidrv6)]
|
|
||||||
|
|
||||||
# If nothing matches, jump to the fallback chain
|
|
||||||
our_rules += ['-A %s -j nova-fallback' % (chain_name,)]
|
|
||||||
|
|
||||||
# then, security group chains and rules
|
# then, security group chains and rules
|
||||||
for security_group_id in security_groups:
|
for security_group in security_groups:
|
||||||
chain_name = self._security_group_chain_name(security_group_id)
|
rules = db.security_group_rule_get_by_security_group(ctxt,
|
||||||
our_chains += [':%s - [0:0]' % chain_name]
|
security_group['id'])
|
||||||
|
|
||||||
rules = \
|
|
||||||
db.security_group_rule_get_by_security_group(ctxt,
|
|
||||||
security_group_id)
|
|
||||||
|
|
||||||
for rule in rules:
|
for rule in rules:
|
||||||
logging.info('%r', rule)
|
logging.info('%r', rule)
|
||||||
@@ -1350,14 +1338,16 @@ class IptablesFirewallDriver(FirewallDriver):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
version = _get_ip_version(rule.cidr)
|
version = _get_ip_version(rule.cidr)
|
||||||
if version != ip_version:
|
if version == 4:
|
||||||
continue
|
rules = ipv4_rules
|
||||||
|
else:
|
||||||
|
rules = ipv6_rules
|
||||||
|
|
||||||
protocol = rule.protocol
|
protocol = rule.protocol
|
||||||
if version == 6 and rule.protocol == 'icmp':
|
if version == 6 and rule.protocol == 'icmp':
|
||||||
protocol = 'icmpv6'
|
protocol = 'icmpv6'
|
||||||
|
|
||||||
args = ['-A', chain_name, '-p', protocol, '-s', rule.cidr]
|
args = ['-p', protocol, '-s', rule.cidr]
|
||||||
|
|
||||||
if rule.protocol in ['udp', 'tcp']:
|
if rule.protocol in ['udp', 'tcp']:
|
||||||
if rule.from_port == rule.to_port:
|
if rule.from_port == rule.to_port:
|
||||||
@@ -1378,32 +1368,39 @@ class IptablesFirewallDriver(FirewallDriver):
|
|||||||
icmp_type_arg += '/%s' % icmp_code
|
icmp_type_arg += '/%s' % icmp_code
|
||||||
|
|
||||||
if icmp_type_arg:
|
if icmp_type_arg:
|
||||||
if(ip_version == 4):
|
if version == 4:
|
||||||
args += ['-m', 'icmp', '--icmp-type',
|
args += ['-m', 'icmp', '--icmp-type',
|
||||||
icmp_type_arg]
|
icmp_type_arg]
|
||||||
elif(ip_version == 6):
|
elif version == 6:
|
||||||
args += ['-m', 'icmp6', '--icmpv6-type',
|
args += ['-m', 'icmp6', '--icmpv6-type',
|
||||||
icmp_type_arg]
|
icmp_type_arg]
|
||||||
|
|
||||||
args += ['-j ACCEPT']
|
args += ['-j ACCEPT']
|
||||||
our_rules += [' '.join(args)]
|
rules += [' '.join(args)]
|
||||||
|
|
||||||
new_filter[rules_index:rules_index] = our_rules
|
ipv4_rules += ['-j $sg-fallback']
|
||||||
new_filter[rules_index:rules_index] = our_chains
|
ipv6_rules += ['-j $sg-fallback']
|
||||||
logging.info('new_filter: %s', '\n'.join(new_filter))
|
|
||||||
return new_filter
|
return ipv4_rules, ipv6_rules
|
||||||
|
|
||||||
def refresh_security_group_members(self, security_group):
|
def refresh_security_group_members(self, security_group):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def refresh_security_group_rules(self, security_group):
|
def refresh_security_group_rules(self, security_group):
|
||||||
self.apply_ruleset()
|
for instance in self.instances.values():
|
||||||
|
# We use the semaphore to make sure noone applies the rule set
|
||||||
|
# after we've yanked the existing rules but before we've put in
|
||||||
|
# the new ones.
|
||||||
|
with self.iptables.semaphore:
|
||||||
|
self.remove_filters_for_instance(instance)
|
||||||
|
self.add_filters_for_instance(instance)
|
||||||
|
self.iptables.apply()
|
||||||
|
|
||||||
def _security_group_chain_name(self, security_group_id):
|
def _security_group_chain_name(self, security_group_id):
|
||||||
return 'nova-sg-%s' % (security_group_id,)
|
return 'nova-sg-%s' % (security_group_id,)
|
||||||
|
|
||||||
def _instance_chain_name(self, instance):
|
def _instance_chain_name(self, instance):
|
||||||
return 'nova-inst-%s' % (instance['id'],)
|
return 'inst-%s' % (instance['id'],)
|
||||||
|
|
||||||
def _ip_for_instance(self, instance):
|
def _ip_for_instance(self, instance):
|
||||||
return db.instance_get_fixed_address(context.get_admin_context(),
|
return db.instance_get_fixed_address(context.get_admin_context(),
|
||||||
|
|||||||
@@ -917,14 +917,13 @@ def _write_partition(virtual_size, dev):
|
|||||||
LOG.debug(_('Writing partition table %(primary_first)d %(primary_last)d'
|
LOG.debug(_('Writing partition table %(primary_first)d %(primary_last)d'
|
||||||
' to %(dest)s...') % locals())
|
' to %(dest)s...') % locals())
|
||||||
|
|
||||||
def execute(cmd, process_input=None, check_exit_code=True):
|
def execute(*cmd, **kwargs):
|
||||||
return utils.execute(cmd=cmd,
|
return utils.execute(*cmd, **kwargs)
|
||||||
process_input=process_input,
|
|
||||||
check_exit_code=check_exit_code)
|
|
||||||
|
|
||||||
execute('parted --script %s mklabel msdos' % dest)
|
execute('parted', '--script', dest, 'mklabel', 'msdos')
|
||||||
execute('parted --script %s mkpart primary %ds %ds' %
|
execute('parted', '--script', dest, 'mkpart', 'primary',
|
||||||
(dest, primary_first, primary_last))
|
'%ds' % primary_first,
|
||||||
|
'%ds' % primary_last)
|
||||||
|
|
||||||
LOG.debug(_('Writing partition table %s done.'), dest)
|
LOG.debug(_('Writing partition table %s done.'), dest)
|
||||||
|
|
||||||
|
|||||||
@@ -65,14 +65,14 @@ class VolumeDriver(object):
|
|||||||
self._execute = execute
|
self._execute = execute
|
||||||
self._sync_exec = sync_exec
|
self._sync_exec = sync_exec
|
||||||
|
|
||||||
def _try_execute(self, command):
|
def _try_execute(self, *command):
|
||||||
# NOTE(vish): Volume commands can partially fail due to timing, but
|
# NOTE(vish): Volume commands can partially fail due to timing, but
|
||||||
# running them a second time on failure will usually
|
# running them a second time on failure will usually
|
||||||
# recover nicely.
|
# recover nicely.
|
||||||
tries = 0
|
tries = 0
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
self._execute(command)
|
self._execute(*command)
|
||||||
return True
|
return True
|
||||||
except exception.ProcessExecutionError:
|
except exception.ProcessExecutionError:
|
||||||
tries = tries + 1
|
tries = tries + 1
|
||||||
@@ -84,7 +84,7 @@ class VolumeDriver(object):
|
|||||||
|
|
||||||
def check_for_setup_error(self):
|
def check_for_setup_error(self):
|
||||||
"""Returns an error if prerequisites aren't met"""
|
"""Returns an error if prerequisites aren't met"""
|
||||||
out, err = self._execute("sudo vgs --noheadings -o name")
|
out, err = self._execute('sudo', 'vgs', '--noheadings', '-o', 'name')
|
||||||
volume_groups = out.split()
|
volume_groups = out.split()
|
||||||
if not FLAGS.volume_group in volume_groups:
|
if not FLAGS.volume_group in volume_groups:
|
||||||
raise exception.Error(_("volume group %s doesn't exist")
|
raise exception.Error(_("volume group %s doesn't exist")
|
||||||
@@ -97,22 +97,22 @@ class VolumeDriver(object):
|
|||||||
sizestr = '100M'
|
sizestr = '100M'
|
||||||
else:
|
else:
|
||||||
sizestr = '%sG' % volume['size']
|
sizestr = '%sG' % volume['size']
|
||||||
self._try_execute("sudo lvcreate -L %s -n %s %s" %
|
self._try_execute('sudo', 'lvcreate', '-L', sizestr, '-n',
|
||||||
(sizestr,
|
|
||||||
volume['name'],
|
volume['name'],
|
||||||
FLAGS.volume_group))
|
FLAGS.volume_group)
|
||||||
|
|
||||||
def delete_volume(self, volume):
|
def delete_volume(self, volume):
|
||||||
"""Deletes a logical volume."""
|
"""Deletes a logical volume."""
|
||||||
try:
|
try:
|
||||||
self._try_execute("sudo lvdisplay %s/%s" %
|
self._try_execute('sudo', 'lvdisplay',
|
||||||
|
'%s/%s' %
|
||||||
(FLAGS.volume_group,
|
(FLAGS.volume_group,
|
||||||
volume['name']))
|
volume['name']))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# If the volume isn't present, then don't attempt to delete
|
# If the volume isn't present, then don't attempt to delete
|
||||||
return True
|
return True
|
||||||
|
|
||||||
self._try_execute("sudo lvremove -f %s/%s" %
|
self._try_execute('sudo', 'lvremove', '-f', "%s/%s" %
|
||||||
(FLAGS.volume_group,
|
(FLAGS.volume_group,
|
||||||
volume['name']))
|
volume['name']))
|
||||||
|
|
||||||
@@ -168,12 +168,13 @@ class AOEDriver(VolumeDriver):
|
|||||||
blade_id) = self.db.volume_allocate_shelf_and_blade(context,
|
blade_id) = self.db.volume_allocate_shelf_and_blade(context,
|
||||||
volume['id'])
|
volume['id'])
|
||||||
self._try_execute(
|
self._try_execute(
|
||||||
"sudo vblade-persist setup %s %s %s /dev/%s/%s" %
|
'sudo', 'vblade-persist', 'setup',
|
||||||
(shelf_id,
|
shelf_id,
|
||||||
blade_id,
|
blade_id,
|
||||||
FLAGS.aoe_eth_dev,
|
FLAGS.aoe_eth_dev,
|
||||||
FLAGS.volume_group,
|
"/dev/%s/%s" %
|
||||||
volume['name']))
|
(FLAGS.volume_group,
|
||||||
|
volume['name']))
|
||||||
# NOTE(vish): The standard _try_execute does not work here
|
# NOTE(vish): The standard _try_execute does not work here
|
||||||
# because these methods throw errors if other
|
# because these methods throw errors if other
|
||||||
# volumes on this host are in the process of
|
# volumes on this host are in the process of
|
||||||
@@ -182,9 +183,9 @@ class AOEDriver(VolumeDriver):
|
|||||||
# just wait a bit for the current volume to
|
# just wait a bit for the current volume to
|
||||||
# be ready and ignore any errors.
|
# be ready and ignore any errors.
|
||||||
time.sleep(2)
|
time.sleep(2)
|
||||||
self._execute("sudo vblade-persist auto all",
|
self._execute('sudo', 'vblade-persist', 'auto', 'all',
|
||||||
check_exit_code=False)
|
check_exit_code=False)
|
||||||
self._execute("sudo vblade-persist start all",
|
self._execute('sudo', 'vblade-persist', 'start', 'all',
|
||||||
check_exit_code=False)
|
check_exit_code=False)
|
||||||
|
|
||||||
def remove_export(self, context, volume):
|
def remove_export(self, context, volume):
|
||||||
@@ -192,15 +193,15 @@ class AOEDriver(VolumeDriver):
|
|||||||
(shelf_id,
|
(shelf_id,
|
||||||
blade_id) = self.db.volume_get_shelf_and_blade(context,
|
blade_id) = self.db.volume_get_shelf_and_blade(context,
|
||||||
volume['id'])
|
volume['id'])
|
||||||
self._try_execute("sudo vblade-persist stop %s %s" %
|
self._try_execute('sudo', 'vblade-persist', 'stop',
|
||||||
(shelf_id, blade_id))
|
shelf_id, blade_id)
|
||||||
self._try_execute("sudo vblade-persist destroy %s %s" %
|
self._try_execute('sudo', 'vblade-persist', 'destroy',
|
||||||
(shelf_id, blade_id))
|
shelf_id, blade_id)
|
||||||
|
|
||||||
def discover_volume(self, _volume):
|
def discover_volume(self, _volume):
|
||||||
"""Discover volume on a remote host."""
|
"""Discover volume on a remote host."""
|
||||||
self._execute("sudo aoe-discover")
|
self._execute('sudo', 'aoe-discover')
|
||||||
self._execute("sudo aoe-stat", check_exit_code=False)
|
self._execute('sudo', 'aoe-stat', check_exit_code=False)
|
||||||
|
|
||||||
def undiscover_volume(self, _volume):
|
def undiscover_volume(self, _volume):
|
||||||
"""Undiscover volume on a remote host."""
|
"""Undiscover volume on a remote host."""
|
||||||
@@ -252,13 +253,16 @@ class ISCSIDriver(VolumeDriver):
|
|||||||
|
|
||||||
iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
|
iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
|
||||||
volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name'])
|
volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name'])
|
||||||
self._sync_exec("sudo ietadm --op new "
|
self._sync_exec('sudo', 'ietadm', '--op', 'new',
|
||||||
"--tid=%s --params Name=%s" %
|
"--tid=%s" % iscsi_target,
|
||||||
(iscsi_target, iscsi_name),
|
'--params',
|
||||||
|
"Name=%s" % iscsi_name,
|
||||||
check_exit_code=False)
|
check_exit_code=False)
|
||||||
self._sync_exec("sudo ietadm --op new --tid=%s "
|
self._sync_exec('sudo', 'ietadm', '--op', 'new',
|
||||||
"--lun=0 --params Path=%s,Type=fileio" %
|
"--tid=%s" % iscsi_target,
|
||||||
(iscsi_target, volume_path),
|
'--lun=0',
|
||||||
|
'--params',
|
||||||
|
"Path=%s,Type=fileio" % volume_path,
|
||||||
check_exit_code=False)
|
check_exit_code=False)
|
||||||
|
|
||||||
def _ensure_iscsi_targets(self, context, host):
|
def _ensure_iscsi_targets(self, context, host):
|
||||||
@@ -279,12 +283,13 @@ class ISCSIDriver(VolumeDriver):
|
|||||||
volume['host'])
|
volume['host'])
|
||||||
iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
|
iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
|
||||||
volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name'])
|
volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name'])
|
||||||
self._execute("sudo ietadm --op new "
|
self._execute('sudo', 'ietadm', '--op', 'new',
|
||||||
"--tid=%s --params Name=%s" %
|
'--tid=%s --params Name=%s' %
|
||||||
(iscsi_target, iscsi_name))
|
(iscsi_target, iscsi_name))
|
||||||
self._execute("sudo ietadm --op new --tid=%s "
|
self._execute('sudo', 'ietadm', '--op', 'new',
|
||||||
"--lun=0 --params Path=%s,Type=fileio" %
|
'--tid=%s' % iscsi_target,
|
||||||
(iscsi_target, volume_path))
|
'--lun=0', '--params',
|
||||||
|
'Path=%s,Type=fileio' % volume_path)
|
||||||
|
|
||||||
def remove_export(self, context, volume):
|
def remove_export(self, context, volume):
|
||||||
"""Removes an export for a logical volume."""
|
"""Removes an export for a logical volume."""
|
||||||
@@ -299,16 +304,18 @@ class ISCSIDriver(VolumeDriver):
|
|||||||
try:
|
try:
|
||||||
# ietadm show will exit with an error
|
# ietadm show will exit with an error
|
||||||
# this export has already been removed
|
# this export has already been removed
|
||||||
self._execute("sudo ietadm --op show --tid=%s " % iscsi_target)
|
self._execute('sudo', 'ietadm', '--op', 'show',
|
||||||
|
'--tid=%s' % iscsi_target)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.info(_("Skipping remove_export. No iscsi_target " +
|
LOG.info(_("Skipping remove_export. No iscsi_target " +
|
||||||
"is presently exported for volume: %d"), volume['id'])
|
"is presently exported for volume: %d"), volume['id'])
|
||||||
return
|
return
|
||||||
|
|
||||||
self._execute("sudo ietadm --op delete --tid=%s "
|
self._execute('sudo', 'ietadm', '--op', 'delete',
|
||||||
"--lun=0" % iscsi_target)
|
'--tid=%s' % iscsi_target,
|
||||||
self._execute("sudo ietadm --op delete --tid=%s" %
|
'--lun=0')
|
||||||
iscsi_target)
|
self._execute('sudo', 'ietadm', '--op', 'delete',
|
||||||
|
'--tid=%s' % iscsi_target)
|
||||||
|
|
||||||
def _do_iscsi_discovery(self, volume):
|
def _do_iscsi_discovery(self, volume):
|
||||||
#TODO(justinsb): Deprecate discovery and use stored info
|
#TODO(justinsb): Deprecate discovery and use stored info
|
||||||
@@ -317,8 +324,8 @@ class ISCSIDriver(VolumeDriver):
|
|||||||
|
|
||||||
volume_name = volume['name']
|
volume_name = volume['name']
|
||||||
|
|
||||||
(out, _err) = self._execute("sudo iscsiadm -m discovery -t "
|
(out, _err) = self._execute('sudo', 'iscsiadm', '-m', 'discovery',
|
||||||
"sendtargets -p %s" % (volume['host']))
|
'-t', 'sendtargets', '-p', volume['host'])
|
||||||
for target in out.splitlines():
|
for target in out.splitlines():
|
||||||
if FLAGS.iscsi_ip_prefix in target and volume_name in target:
|
if FLAGS.iscsi_ip_prefix in target and volume_name in target:
|
||||||
return target
|
return target
|
||||||
@@ -478,7 +485,7 @@ class RBDDriver(VolumeDriver):
|
|||||||
|
|
||||||
def check_for_setup_error(self):
|
def check_for_setup_error(self):
|
||||||
"""Returns an error if prerequisites aren't met"""
|
"""Returns an error if prerequisites aren't met"""
|
||||||
(stdout, stderr) = self._execute("rados lspools")
|
(stdout, stderr) = self._execute('rados', 'lspools')
|
||||||
pools = stdout.split("\n")
|
pools = stdout.split("\n")
|
||||||
if not FLAGS.rbd_pool in pools:
|
if not FLAGS.rbd_pool in pools:
|
||||||
raise exception.Error(_("rbd has no pool %s") %
|
raise exception.Error(_("rbd has no pool %s") %
|
||||||
@@ -490,16 +497,13 @@ class RBDDriver(VolumeDriver):
|
|||||||
size = 100
|
size = 100
|
||||||
else:
|
else:
|
||||||
size = int(volume['size']) * 1024
|
size = int(volume['size']) * 1024
|
||||||
self._try_execute("rbd --pool %s --size %d create %s" %
|
self._try_execute('rbd', '--pool', FLAGS.rbd_pool,
|
||||||
(FLAGS.rbd_pool,
|
'--size', size, 'create', volume['name'])
|
||||||
size,
|
|
||||||
volume['name']))
|
|
||||||
|
|
||||||
def delete_volume(self, volume):
|
def delete_volume(self, volume):
|
||||||
"""Deletes a logical volume."""
|
"""Deletes a logical volume."""
|
||||||
self._try_execute("rbd --pool %s rm %s" %
|
self._try_execute('rbd', '--pool', FLAGS.rbd_pool,
|
||||||
(FLAGS.rbd_pool,
|
'rm', voluname['name'])
|
||||||
volume['name']))
|
|
||||||
|
|
||||||
def local_path(self, volume):
|
def local_path(self, volume):
|
||||||
"""Returns the path of the rbd volume."""
|
"""Returns the path of the rbd volume."""
|
||||||
@@ -534,7 +538,7 @@ class SheepdogDriver(VolumeDriver):
|
|||||||
def check_for_setup_error(self):
|
def check_for_setup_error(self):
|
||||||
"""Returns an error if prerequisites aren't met"""
|
"""Returns an error if prerequisites aren't met"""
|
||||||
try:
|
try:
|
||||||
(out, err) = self._execute("collie cluster info")
|
(out, err) = self._execute('collie', 'cluster', 'info')
|
||||||
if not out.startswith('running'):
|
if not out.startswith('running'):
|
||||||
raise exception.Error(_("Sheepdog is not working: %s") % out)
|
raise exception.Error(_("Sheepdog is not working: %s") % out)
|
||||||
except exception.ProcessExecutionError:
|
except exception.ProcessExecutionError:
|
||||||
@@ -546,12 +550,13 @@ class SheepdogDriver(VolumeDriver):
|
|||||||
sizestr = '100M'
|
sizestr = '100M'
|
||||||
else:
|
else:
|
||||||
sizestr = '%sG' % volume['size']
|
sizestr = '%sG' % volume['size']
|
||||||
self._try_execute("qemu-img create sheepdog:%s %s" %
|
self._try_execute('qemu-img', 'create',
|
||||||
(volume['name'], sizestr))
|
"sheepdog:%s" % volume['name'],
|
||||||
|
sizestr)
|
||||||
|
|
||||||
def delete_volume(self, volume):
|
def delete_volume(self, volume):
|
||||||
"""Deletes a logical volume"""
|
"""Deletes a logical volume"""
|
||||||
self._try_execute("collie vdi delete %s" % volume['name'])
|
self._try_execute('collie', 'vdi', 'delete', volume['name'])
|
||||||
|
|
||||||
def local_path(self, volume):
|
def local_path(self, volume):
|
||||||
return "sheepdog:%s" % volume['name']
|
return "sheepdog:%s" % volume['name']
|
||||||
|
|||||||
134
nova/wsgi.py
134
nova/wsgi.py
@@ -36,6 +36,7 @@ import webob.exc
|
|||||||
|
|
||||||
from paste import deploy
|
from paste import deploy
|
||||||
|
|
||||||
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import log as logging
|
from nova import log as logging
|
||||||
from nova import utils
|
from nova import utils
|
||||||
@@ -82,6 +83,35 @@ class Server(object):
|
|||||||
log=WritableLogger(logger))
|
log=WritableLogger(logger))
|
||||||
|
|
||||||
|
|
||||||
|
class Request(webob.Request):
|
||||||
|
|
||||||
|
def best_match_content_type(self):
|
||||||
|
"""
|
||||||
|
Determine the most acceptable content-type based on the
|
||||||
|
query extension then the Accept header
|
||||||
|
"""
|
||||||
|
|
||||||
|
parts = self.path.rsplit(".", 1)
|
||||||
|
|
||||||
|
if len(parts) > 1:
|
||||||
|
format = parts[1]
|
||||||
|
if format in ["json", "xml"]:
|
||||||
|
return "application/{0}".format(parts[1])
|
||||||
|
|
||||||
|
ctypes = ["application/json", "application/xml"]
|
||||||
|
bm = self.accept.best_match(ctypes)
|
||||||
|
|
||||||
|
return bm or "application/json"
|
||||||
|
|
||||||
|
def get_content_type(self):
|
||||||
|
try:
|
||||||
|
ct = self.headers["Content-Type"]
|
||||||
|
assert ct in ("application/xml", "application/json")
|
||||||
|
return ct
|
||||||
|
except Exception:
|
||||||
|
raise webob.exc.HTTPBadRequest("Invalid content type")
|
||||||
|
|
||||||
|
|
||||||
class Application(object):
|
class Application(object):
|
||||||
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
|
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
|
||||||
|
|
||||||
@@ -113,7 +143,7 @@ class Application(object):
|
|||||||
def __call__(self, environ, start_response):
|
def __call__(self, environ, start_response):
|
||||||
r"""Subclasses will probably want to implement __call__ like this:
|
r"""Subclasses will probably want to implement __call__ like this:
|
||||||
|
|
||||||
@webob.dec.wsgify
|
@webob.dec.wsgify(RequestClass=Request)
|
||||||
def __call__(self, req):
|
def __call__(self, req):
|
||||||
# Any of the following objects work as responses:
|
# Any of the following objects work as responses:
|
||||||
|
|
||||||
@@ -199,7 +229,7 @@ class Middleware(Application):
|
|||||||
"""Do whatever you'd like to the response."""
|
"""Do whatever you'd like to the response."""
|
||||||
return response
|
return response
|
||||||
|
|
||||||
@webob.dec.wsgify
|
@webob.dec.wsgify(RequestClass=Request)
|
||||||
def __call__(self, req):
|
def __call__(self, req):
|
||||||
response = self.process_request(req)
|
response = self.process_request(req)
|
||||||
if response:
|
if response:
|
||||||
@@ -212,7 +242,7 @@ class Debug(Middleware):
|
|||||||
"""Helper class that can be inserted into any WSGI application chain
|
"""Helper class that can be inserted into any WSGI application chain
|
||||||
to get information about the request and response."""
|
to get information about the request and response."""
|
||||||
|
|
||||||
@webob.dec.wsgify
|
@webob.dec.wsgify(RequestClass=Request)
|
||||||
def __call__(self, req):
|
def __call__(self, req):
|
||||||
print ("*" * 40) + " REQUEST ENVIRON"
|
print ("*" * 40) + " REQUEST ENVIRON"
|
||||||
for key, value in req.environ.items():
|
for key, value in req.environ.items():
|
||||||
@@ -276,7 +306,7 @@ class Router(object):
|
|||||||
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
|
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
|
||||||
self.map)
|
self.map)
|
||||||
|
|
||||||
@webob.dec.wsgify
|
@webob.dec.wsgify(RequestClass=Request)
|
||||||
def __call__(self, req):
|
def __call__(self, req):
|
||||||
"""
|
"""
|
||||||
Route the incoming request to a controller based on self.map.
|
Route the incoming request to a controller based on self.map.
|
||||||
@@ -285,7 +315,7 @@ class Router(object):
|
|||||||
return self._router
|
return self._router
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@webob.dec.wsgify
|
@webob.dec.wsgify(RequestClass=Request)
|
||||||
def _dispatch(req):
|
def _dispatch(req):
|
||||||
"""
|
"""
|
||||||
Called by self._router after matching the incoming request to a route
|
Called by self._router after matching the incoming request to a route
|
||||||
@@ -304,11 +334,11 @@ class Controller(object):
|
|||||||
WSGI app that reads routing information supplied by RoutesMiddleware
|
WSGI app that reads routing information supplied by RoutesMiddleware
|
||||||
and calls the requested action method upon itself. All action methods
|
and calls the requested action method upon itself. All action methods
|
||||||
must, in addition to their normal parameters, accept a 'req' argument
|
must, in addition to their normal parameters, accept a 'req' argument
|
||||||
which is the incoming webob.Request. They raise a webob.exc exception,
|
which is the incoming wsgi.Request. They raise a webob.exc exception,
|
||||||
or return a dict which will be serialized by requested content type.
|
or return a dict which will be serialized by requested content type.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@webob.dec.wsgify
|
@webob.dec.wsgify(RequestClass=Request)
|
||||||
def __call__(self, req):
|
def __call__(self, req):
|
||||||
"""
|
"""
|
||||||
Call the method specified in req.environ by RoutesMiddleware.
|
Call the method specified in req.environ by RoutesMiddleware.
|
||||||
@@ -318,32 +348,45 @@ class Controller(object):
|
|||||||
method = getattr(self, action)
|
method = getattr(self, action)
|
||||||
del arg_dict['controller']
|
del arg_dict['controller']
|
||||||
del arg_dict['action']
|
del arg_dict['action']
|
||||||
|
if 'format' in arg_dict:
|
||||||
|
del arg_dict['format']
|
||||||
arg_dict['req'] = req
|
arg_dict['req'] = req
|
||||||
result = method(**arg_dict)
|
result = method(**arg_dict)
|
||||||
|
|
||||||
if type(result) is dict:
|
if type(result) is dict:
|
||||||
return self._serialize(result, req)
|
content_type = req.best_match_content_type()
|
||||||
|
body = self._serialize(result, content_type)
|
||||||
|
|
||||||
|
response = webob.Response()
|
||||||
|
response.headers["Content-Type"] = content_type
|
||||||
|
response.body = body
|
||||||
|
return response
|
||||||
|
|
||||||
else:
|
else:
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def _serialize(self, data, request):
|
def _serialize(self, data, content_type):
|
||||||
"""
|
"""
|
||||||
Serialize the given dict to the response type requested in request.
|
Serialize the given dict to the provided content_type.
|
||||||
Uses self._serialization_metadata if it exists, which is a dict mapping
|
Uses self._serialization_metadata if it exists, which is a dict mapping
|
||||||
MIME types to information needed to serialize to that type.
|
MIME types to information needed to serialize to that type.
|
||||||
"""
|
"""
|
||||||
_metadata = getattr(type(self), "_serialization_metadata", {})
|
_metadata = getattr(type(self), "_serialization_metadata", {})
|
||||||
serializer = Serializer(request.environ, _metadata)
|
serializer = Serializer(_metadata)
|
||||||
return serializer.to_content_type(data)
|
try:
|
||||||
|
return serializer.serialize(data, content_type)
|
||||||
|
except exception.InvalidContentType:
|
||||||
|
raise webob.exc.HTTPNotAcceptable()
|
||||||
|
|
||||||
def _deserialize(self, data, request):
|
def _deserialize(self, data, content_type):
|
||||||
"""
|
"""
|
||||||
Deserialize the request body to the response type requested in request.
|
Deserialize the request body to the specefied content type.
|
||||||
Uses self._serialization_metadata if it exists, which is a dict mapping
|
Uses self._serialization_metadata if it exists, which is a dict mapping
|
||||||
MIME types to information needed to serialize to that type.
|
MIME types to information needed to serialize to that type.
|
||||||
"""
|
"""
|
||||||
_metadata = getattr(type(self), "_serialization_metadata", {})
|
_metadata = getattr(type(self), "_serialization_metadata", {})
|
||||||
serializer = Serializer(request.environ, _metadata)
|
serializer = Serializer(_metadata)
|
||||||
return serializer.deserialize(data)
|
return serializer.deserialize(data, content_type)
|
||||||
|
|
||||||
|
|
||||||
class Serializer(object):
|
class Serializer(object):
|
||||||
@@ -351,50 +394,53 @@ class Serializer(object):
|
|||||||
Serializes and deserializes dictionaries to certain MIME types.
|
Serializes and deserializes dictionaries to certain MIME types.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, environ, metadata=None):
|
def __init__(self, metadata=None):
|
||||||
"""
|
"""
|
||||||
Create a serializer based on the given WSGI environment.
|
Create a serializer based on the given WSGI environment.
|
||||||
'metadata' is an optional dict mapping MIME types to information
|
'metadata' is an optional dict mapping MIME types to information
|
||||||
needed to serialize a dictionary to that type.
|
needed to serialize a dictionary to that type.
|
||||||
"""
|
"""
|
||||||
self.metadata = metadata or {}
|
self.metadata = metadata or {}
|
||||||
req = webob.Request.blank('', environ)
|
|
||||||
suffix = req.path_info.split('.')[-1].lower()
|
|
||||||
if suffix == 'json':
|
|
||||||
self.handler = self._to_json
|
|
||||||
elif suffix == 'xml':
|
|
||||||
self.handler = self._to_xml
|
|
||||||
elif 'application/json' in req.accept:
|
|
||||||
self.handler = self._to_json
|
|
||||||
elif 'application/xml' in req.accept:
|
|
||||||
self.handler = self._to_xml
|
|
||||||
else:
|
|
||||||
# This is the default
|
|
||||||
self.handler = self._to_json
|
|
||||||
|
|
||||||
def to_content_type(self, data):
|
def _get_serialize_handler(self, content_type):
|
||||||
|
handlers = {
|
||||||
|
"application/json": self._to_json,
|
||||||
|
"application/xml": self._to_xml,
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
return handlers[content_type]
|
||||||
|
except Exception:
|
||||||
|
raise exception.InvalidContentType()
|
||||||
|
|
||||||
|
def serialize(self, data, content_type):
|
||||||
"""
|
"""
|
||||||
Serialize a dictionary into a string.
|
Serialize a dictionary into a string of the specified content type.
|
||||||
|
|
||||||
The format of the string will be decided based on the Content Type
|
|
||||||
requested in self.environ: by Accept: header, or by URL suffix.
|
|
||||||
"""
|
"""
|
||||||
return self.handler(data)
|
return self._get_serialize_handler(content_type)(data)
|
||||||
|
|
||||||
def deserialize(self, datastring):
|
def deserialize(self, datastring, content_type):
|
||||||
"""
|
"""
|
||||||
Deserialize a string to a dictionary.
|
Deserialize a string to a dictionary.
|
||||||
|
|
||||||
The string must be in the format of a supported MIME type.
|
The string must be in the format of a supported MIME type.
|
||||||
"""
|
"""
|
||||||
datastring = datastring.strip()
|
return self.get_deserialize_handler(content_type)(datastring)
|
||||||
|
|
||||||
|
def get_deserialize_handler(self, content_type):
|
||||||
|
handlers = {
|
||||||
|
"application/json": self._from_json,
|
||||||
|
"application/xml": self._from_xml,
|
||||||
|
}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
is_xml = (datastring[0] == '<')
|
return handlers[content_type]
|
||||||
if not is_xml:
|
except Exception:
|
||||||
return utils.loads(datastring)
|
raise exception.InvalidContentType(_("Invalid content type %s"
|
||||||
return self._from_xml(datastring)
|
% content_type))
|
||||||
except:
|
|
||||||
return None
|
def _from_json(self, datastring):
|
||||||
|
return utils.loads(datastring)
|
||||||
|
|
||||||
def _from_xml(self, datastring):
|
def _from_xml(self, datastring):
|
||||||
xmldata = self.metadata.get('application/xml', {})
|
xmldata = self.metadata.get('application/xml', {})
|
||||||
|
|||||||
@@ -30,13 +30,14 @@ import simplejson as json
|
|||||||
|
|
||||||
|
|
||||||
def main(dom_id, command, only_this_vif=None):
|
def main(dom_id, command, only_this_vif=None):
|
||||||
xsls = execute("/usr/bin/xenstore-ls /local/domain/%s/vm-data/networking" \
|
xsls = execute('/usr/bin/xenstore-ls',
|
||||||
% dom_id, True)
|
'/local/domain/%s/vm-data/networking' % dom_id, True)
|
||||||
macs = [line.split("=")[0].strip() for line in xsls.splitlines()]
|
macs = [line.split("=")[0].strip() for line in xsls.splitlines()]
|
||||||
|
|
||||||
for mac in macs:
|
for mac in macs:
|
||||||
xsr = "/usr/bin/xenstore-read /local/domain/%s/vm-data/networking/%s"
|
xsread = execute('/usr/bin/enstore-read',
|
||||||
xsread = execute(xsr % (dom_id, mac), True)
|
'/local/domain/%s/vm-data/networking/%s' %
|
||||||
|
(dom_id, mac), True)
|
||||||
data = json.loads(xsread)
|
data = json.loads(xsread)
|
||||||
for ip in data['ips']:
|
for ip in data['ips']:
|
||||||
if data["label"] == "public":
|
if data["label"] == "public":
|
||||||
@@ -51,9 +52,9 @@ def main(dom_id, command, only_this_vif=None):
|
|||||||
apply_iptables_rules(command, params)
|
apply_iptables_rules(command, params)
|
||||||
|
|
||||||
|
|
||||||
def execute(command, return_stdout=False):
|
def execute(*command, return_stdout=False):
|
||||||
devnull = open(os.devnull, 'w')
|
devnull = open(os.devnull, 'w')
|
||||||
proc = subprocess.Popen(command, shell=True, close_fds=True,
|
proc = subprocess.Popen(command, close_fds=True,
|
||||||
stdout=subprocess.PIPE, stderr=devnull)
|
stdout=subprocess.PIPE, stderr=devnull)
|
||||||
devnull.close()
|
devnull.close()
|
||||||
if return_stdout:
|
if return_stdout:
|
||||||
@@ -67,45 +68,69 @@ def execute(command, return_stdout=False):
|
|||||||
|
|
||||||
|
|
||||||
def apply_iptables_rules(command, params):
|
def apply_iptables_rules(command, params):
|
||||||
iptables = lambda rule: execute("/sbin/iptables %s" % rule)
|
iptables = lambda *rule: execute('/sbin/iptables', *rule)
|
||||||
|
|
||||||
iptables("-D FORWARD -m physdev --physdev-in %(VIF)s -s %(IP)s \
|
iptables('-D', 'FORWARD', '-m', 'physdev',
|
||||||
-j ACCEPT" % params)
|
'--physdev-in', '%(VIF)s' % params,
|
||||||
|
'-s', '%(IP)s' % params,
|
||||||
|
'-j', 'ACCEPT')
|
||||||
if command == 'online':
|
if command == 'online':
|
||||||
iptables("-A FORWARD -m physdev --physdev-in %(VIF)s -s %(IP)s \
|
iptables('-A', 'FORWARD', '-m', 'physdev',
|
||||||
-j ACCEPT" % params)
|
'--physdev-in', '%(VIF)s' % params,
|
||||||
|
'-s', '%(IP)s' % params,
|
||||||
|
'-j', 'ACCEPT')
|
||||||
|
|
||||||
|
|
||||||
def apply_arptables_rules(command, params):
|
def apply_arptables_rules(command, params):
|
||||||
arptables = lambda rule: execute("/sbin/arptables %s" % rule)
|
arptables = lambda *rule: execute('/sbin/arptables', *rule)
|
||||||
|
|
||||||
arptables("-D FORWARD --opcode Request --in-interface %(VIF)s \
|
arptables('-D', 'FORWARD', '--opcode', 'Request',
|
||||||
--source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params)
|
'--in-interface', '%(VIF)s' % params,
|
||||||
arptables("-D FORWARD --opcode Reply --in-interface %(VIF)s \
|
'--source-ip', '%(IP)s' % params,
|
||||||
--source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params)
|
'--source-mac', '%(MAC)s' % params,
|
||||||
|
'-j', 'ACCEPT')
|
||||||
|
arptables('-D', 'FORWARD', '--opcode', 'Reply',
|
||||||
|
'--in-interface', '%(VIF)s' % params,
|
||||||
|
'--source-ip', '%(IP)s' % params,
|
||||||
|
'--source-mac', '%(MAC)s' % params,
|
||||||
|
'-j', 'ACCEPT')
|
||||||
if command == 'online':
|
if command == 'online':
|
||||||
arptables("-A FORWARD --opcode Request --in-interface %(VIF)s \
|
arptables('-A', 'FORWARD', '--opcode', 'Request',
|
||||||
--source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params)
|
'--in-interface', '%(VIF)s' % params
|
||||||
arptables("-A FORWARD --opcode Reply --in-interface %(VIF)s \
|
'--source-ip', '%(IP)s' % params,
|
||||||
--source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params)
|
'--source-mac', '%(MAC)s' % params,
|
||||||
|
'-j', 'ACCEPT')
|
||||||
|
arptables('-A', 'FORWARD', '--opcode', 'Reply',
|
||||||
|
'--in-interface', '%(VIF)s' % params,
|
||||||
|
'--source-ip', '%(IP)s' % params,
|
||||||
|
'--source-mac', '%(MAC)s' % params,
|
||||||
|
'-j', 'ACCEPT')
|
||||||
|
|
||||||
|
|
||||||
def apply_ebtables_rules(command, params):
|
def apply_ebtables_rules(command, params):
|
||||||
ebtables = lambda rule: execute("/sbin/ebtables %s" % rule)
|
ebtables = lambda *rule: execute("/sbin/ebtables", *rule)
|
||||||
|
|
||||||
ebtables("-D FORWARD -p 0806 -o %(VIF)s --arp-ip-dst %(IP)s -j ACCEPT" %
|
ebtables('-D', 'FORWARD', '-p', '0806', '-o', params['VIF'],
|
||||||
params)
|
'--arp-ip-dst', params['IP'],
|
||||||
ebtables("-D FORWARD -p 0800 -o %(VIF)s --ip-dst %(IP)s -j ACCEPT" %
|
'-j', 'ACCEPT')
|
||||||
params)
|
ebtables('-D', 'FORWARD', '-p', '0800', '-o',
|
||||||
|
params['VIF'], '--ip-dst', params['IP'],
|
||||||
|
'-j', 'ACCEPT')
|
||||||
if command == 'online':
|
if command == 'online':
|
||||||
ebtables("-A FORWARD -p 0806 -o %(VIF)s --arp-ip-dst %(IP)s \
|
ebtables('-A', 'FORWARD', '-p', '0806',
|
||||||
-j ACCEPT" % params)
|
'-o', params['VIF'],
|
||||||
ebtables("-A FORWARD -p 0800 -o %(VIF)s --ip-dst %(IP)s \
|
'--arp-ip-dst', params['IP'],
|
||||||
-j ACCEPT" % params)
|
'-j', 'ACCEPT')
|
||||||
|
ebtables('-A', 'FORWARD', '-p', '0800',
|
||||||
|
'-o', params['VIF'],
|
||||||
|
'--ip-dst', params['IP'],
|
||||||
|
'-j', 'ACCEPT')
|
||||||
|
|
||||||
ebtables("-D FORWARD -s ! %(MAC)s -i %(VIF)s -j DROP" % params)
|
ebtables('-D', 'FORWARD', '-s', '!', params['MAC'],
|
||||||
|
'-i', params['VIF'], '-j', 'DROP')
|
||||||
if command == 'online':
|
if command == 'online':
|
||||||
ebtables("-I FORWARD 1 -s ! %(MAC)s -i %(VIF)s -j DROP" % params)
|
ebtables('-I', 'FORWARD', '1', '-s', '!', params['MAC'],
|
||||||
|
'-i', '%(VIF)s', '-j', 'DROP')
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
|||||||
import boto
|
import boto
|
||||||
import nova
|
import nova
|
||||||
from boto.ec2.connection import EC2Connection
|
from boto.ec2.connection import EC2Connection
|
||||||
from euca2ools import Euca2ool, InstanceValidationError, Util, ConnectionFailed
|
from euca2ools import Euca2ool, InstanceValidationError, Util
|
||||||
|
|
||||||
usage_string = """
|
usage_string = """
|
||||||
Retrieves a url to an ajax console terminal
|
Retrieves a url to an ajax console terminal
|
||||||
@@ -147,7 +147,7 @@ def main():
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
euca_conn = euca.make_connection()
|
euca_conn = euca.make_connection()
|
||||||
except ConnectionFailed, e:
|
except Exception, e:
|
||||||
print e.message
|
print e.message
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ boto==1.9b
|
|||||||
carrot==0.10.5
|
carrot==0.10.5
|
||||||
eventlet==0.9.12
|
eventlet==0.9.12
|
||||||
lockfile==0.8
|
lockfile==0.8
|
||||||
|
python-novaclient==2.3
|
||||||
python-daemon==1.5.5
|
python-daemon==1.5.5
|
||||||
python-gflags==1.3
|
python-gflags==1.3
|
||||||
redis==2.0.0
|
redis==2.0.0
|
||||||
|
|||||||
Reference in New Issue
Block a user