merged to trunk rev781

This commit is contained in:
Kei Masumoto 2011-03-10 15:23:13 +09:00
commit 0dcd680771
62 changed files with 1802 additions and 753 deletions

View File

@ -36,51 +36,15 @@ gettext.install('nova', unicode=1)
from nova import flags
from nova import log as logging
from nova import service
from nova import utils
from nova import version
from nova import wsgi
LOG = logging.getLogger('nova.api')
FLAGS = flags.FLAGS
flags.DEFINE_string('paste_config', "api-paste.ini",
'File name for the paste.deploy config for nova-api')
flags.DEFINE_string('ec2_listen', "0.0.0.0",
'IP address for EC2 API to listen')
flags.DEFINE_integer('ec2_listen_port', 8773, 'port for ec2 api to listen')
flags.DEFINE_string('osapi_listen', "0.0.0.0",
'IP address for OpenStack API to listen')
flags.DEFINE_integer('osapi_listen_port', 8774, 'port for os api to listen')
flags.DEFINE_flag(flags.HelpFlag())
flags.DEFINE_flag(flags.HelpshortFlag())
flags.DEFINE_flag(flags.HelpXMLFlag())
API_ENDPOINTS = ['ec2', 'osapi']
def run_app(paste_config_file):
LOG.debug(_("Using paste.deploy config at: %s"), paste_config_file)
apps = []
for api in API_ENDPOINTS:
config = wsgi.load_paste_configuration(paste_config_file, api)
if config is None:
LOG.debug(_("No paste configuration for app: %s"), api)
continue
LOG.debug(_("App Config: %(api)s\n%(config)r") % locals())
LOG.info(_("Running %s API"), api)
app = wsgi.load_paste_app(paste_config_file, api)
apps.append((app, getattr(FLAGS, "%s_listen_port" % api),
getattr(FLAGS, "%s_listen" % api)))
if len(apps) == 0:
LOG.error(_("No known API applications configured in %s."),
paste_config_file)
return
server = wsgi.Server()
for app in apps:
server.start(*app)
server.wait()
if __name__ == '__main__':
utils.default_flagfile()
@ -92,9 +56,6 @@ if __name__ == '__main__':
for flag in FLAGS:
flag_get = FLAGS.get(flag, None)
LOG.debug("%(flag)s : %(flag_get)s" % locals())
conf = wsgi.paste_config_file(FLAGS.paste_config)
if conf:
run_app(conf)
else:
LOG.error(_("No paste configuration found for: %s"),
FLAGS.paste_config)
service = service.serve_wsgi(service.ApiService)
service.wait()

View File

@ -55,6 +55,8 @@
import datetime
import gettext
import glob
import json
import os
import re
import sys
@ -81,7 +83,7 @@ from nova import log as logging
from nova import quota
from nova import rpc
from nova import utils
from nova.api.ec2.cloud import ec2_id_to_id
from nova.api.ec2 import ec2utils
from nova.auth import manager
from nova.cloudpipe import pipelib
from nova.compute import instance_types
@ -94,6 +96,7 @@ flags.DECLARE('network_size', 'nova.network.manager')
flags.DECLARE('vlan_start', 'nova.network.manager')
flags.DECLARE('vpn_start', 'nova.network.manager')
flags.DECLARE('fixed_range_v6', 'nova.network.manager')
flags.DECLARE('images_path', 'nova.image.local')
flags.DEFINE_flag(flags.HelpFlag())
flags.DEFINE_flag(flags.HelpshortFlag())
flags.DEFINE_flag(flags.HelpXMLFlag())
@ -104,7 +107,7 @@ def param2id(object_id):
args: [object_id], e.g. 'vol-0000000a' or 'volume-0000000a' or '10'
"""
if '-' in object_id:
return ec2_id_to_id(object_id)
return ec2utils.ec2_id_to_id(object_id)
else:
return int(object_id)
@ -545,6 +548,15 @@ class NetworkCommands(object):
network.dhcp_start,
network.dns)
def delete(self, fixed_range):
"""Deletes a network"""
network = db.network_get_by_cidr(context.get_admin_context(), \
fixed_range)
if network.project_id is not None:
raise ValueError(_('Network must be disassociated from project %s'
' before delete' % network.project_id))
db.network_delete_safe(context.get_admin_context(), network.id)
class VmCommands(object):
"""Class for mangaging VM instances."""
@ -822,6 +834,155 @@ class InstanceTypeCommands(object):
self._print_instance_types(name, inst_types)
class ImageCommands(object):
"""Methods for dealing with a cloud in an odd state"""
def __init__(self, *args, **kwargs):
self.image_service = utils.import_object(FLAGS.image_service)
def _register(self, image_type, disk_format, container_format,
path, owner, name=None, is_public='T',
architecture='x86_64', kernel_id=None, ramdisk_id=None):
meta = {'is_public': True,
'name': name,
'disk_format': disk_format,
'container_format': container_format,
'properties': {'image_state': 'available',
'owner': owner,
'type': image_type,
'architecture': architecture,
'image_location': 'local',
'is_public': (is_public == 'T')}}
print image_type, meta
if kernel_id:
meta['properties']['kernel_id'] = int(kernel_id)
if ramdisk_id:
meta['properties']['ramdisk_id'] = int(ramdisk_id)
elevated = context.get_admin_context()
try:
with open(path) as ifile:
image = self.image_service.create(elevated, meta, ifile)
new = image['id']
print _("Image registered to %(new)s (%(new)08x).") % locals()
return new
except Exception as exc:
print _("Failed to register %(path)s: %(exc)s") % locals()
def all_register(self, image, kernel, ramdisk, owner, name=None,
is_public='T', architecture='x86_64'):
"""Uploads an image, kernel, and ramdisk into the image_service
arguments: image kernel ramdisk owner [name] [is_public='T']
[architecture='x86_64']"""
kernel_id = self.kernel_register(kernel, owner, None,
is_public, architecture)
ramdisk_id = self.ramdisk_register(ramdisk, owner, None,
is_public, architecture)
self.image_register(image, owner, name, is_public,
architecture, kernel_id, ramdisk_id)
def image_register(self, path, owner, name=None, is_public='T',
architecture='x86_64', kernel_id=None, ramdisk_id=None,
disk_format='ami', container_format='ami'):
"""Uploads an image into the image_service
arguments: path owner [name] [is_public='T'] [architecture='x86_64']
[kernel_id=None] [ramdisk_id=None]
[disk_format='ami'] [container_format='ami']"""
return self._register('machine', disk_format, container_format, path,
owner, name, is_public, architecture,
kernel_id, ramdisk_id)
def kernel_register(self, path, owner, name=None, is_public='T',
architecture='x86_64'):
"""Uploads a kernel into the image_service
arguments: path owner [name] [is_public='T'] [architecture='x86_64']
"""
return self._register('kernel', 'aki', 'aki', path, owner, name,
is_public, architecture)
def ramdisk_register(self, path, owner, name=None, is_public='T',
architecture='x86_64'):
"""Uploads a ramdisk into the image_service
arguments: path owner [name] [is_public='T'] [architecture='x86_64']
"""
return self._register('ramdisk', 'ari', 'ari', path, owner, name,
is_public, architecture)
def _lookup(self, old_image_id):
try:
internal_id = ec2utils.ec2_id_to_id(old_image_id)
image = self.image_service.show(context, internal_id)
except exception.NotFound:
image = self.image_service.show_by_name(context, old_image_id)
return image['id']
def _old_to_new(self, old):
mapping = {'machine': 'ami',
'kernel': 'aki',
'ramdisk': 'ari'}
container_format = mapping[old['type']]
disk_format = container_format
new = {'disk_format': disk_format,
'container_format': container_format,
'is_public': True,
'name': old['imageId'],
'properties': {'image_state': old['imageState'],
'owner': old['imageOwnerId'],
'architecture': old['architecture'],
'type': old['type'],
'image_location': old['imageLocation'],
'is_public': old['isPublic']}}
if old.get('kernelId'):
new['properties']['kernel_id'] = self._lookup(old['kernelId'])
if old.get('ramdiskId'):
new['properties']['ramdisk_id'] = self._lookup(old['ramdiskId'])
return new
def _convert_images(self, images):
elevated = context.get_admin_context()
for image_path, image_metadata in images.iteritems():
meta = self._old_to_new(image_metadata)
old = meta['name']
try:
with open(image_path) as ifile:
image = self.image_service.create(elevated, meta, ifile)
new = image['id']
print _("Image %(old)s converted to " \
"%(new)s (%(new)08x).") % locals()
except Exception as exc:
print _("Failed to convert %(old)s: %(exc)s") % locals()
def convert(self, directory):
"""Uploads old objectstore images in directory to new service
arguments: directory"""
machine_images = {}
other_images = {}
directory = os.path.abspath(directory)
# NOTE(vish): If we're importing from the images path dir, attempt
# to move the files out of the way before importing
# so we aren't writing to the same directory. This
# may fail if the dir was a mointpoint.
if (FLAGS.image_service == 'nova.image.local.LocalImageService'
and directory == os.path.abspath(FLAGS.images_path)):
new_dir = "%s_bak" % directory
os.move(directory, new_dir)
os.mkdir(directory)
directory = new_dir
for fn in glob.glob("%s/*/info.json" % directory):
try:
image_path = os.path.join(fn.rpartition('/')[0], 'image')
with open(fn) as metadata_file:
image_metadata = json.load(metadata_file)
if image_metadata['type'] == 'machine':
machine_images[image_path] = image_metadata
else:
other_images[image_path] = image_metadata
except Exception as exc:
print _("Failed to load %(fn)s.") % locals()
# NOTE(vish): do kernels and ramdisks first so images
self._convert_images(other_images)
self._convert_images(machine_images)
CATEGORIES = [
('user', UserCommands),
('project', ProjectCommands),
@ -837,6 +998,7 @@ CATEGORIES = [
('db', DbCommands),
('volume', VolumeCommands),
('instance_type', InstanceTypeCommands),
('image', ImageCommands),
('flavor', InstanceTypeCommands)]

View File

@ -167,9 +167,6 @@ NOVA_CONF_EOF
$NOVA_DIR/bin/nova-manage user admin admin admin admin
# create a project called 'admin' with project manager of 'admin'
$NOVA_DIR/bin/nova-manage project create admin admin
# export environment variables for project 'admin' and user 'admin'
$NOVA_DIR/bin/nova-manage project zipfile admin admin $NOVA_DIR/nova.zip
unzip -o $NOVA_DIR/nova.zip -d $NOVA_DIR/
# create a small network
$NOVA_DIR/bin/nova-manage network create 10.0.0.0/8 1 32
@ -185,6 +182,11 @@ NOVA_CONF_EOF
screen_it scheduler "$NOVA_DIR/bin/nova-scheduler"
screen_it volume "$NOVA_DIR/bin/nova-volume"
screen_it ajax_console_proxy "$NOVA_DIR/bin/nova-ajax-console-proxy"
sleep 2
# export environment variables for project 'admin' and user 'admin'
$NOVA_DIR/bin/nova-manage project zipfile admin admin $NOVA_DIR/nova.zip
unzip -o $NOVA_DIR/nova.zip -d $NOVA_DIR/
screen_it test ". $NOVA_DIR/novarc"
screen -S nova -x
fi

View File

@ -8,5 +8,6 @@ from nova import utils
def setup(app):
rootdir = os.path.abspath(app.srcdir + '/..')
print "**Autodocumenting from %s" % rootdir
rv = utils.execute('cd %s && ./generate_autodoc_index.sh' % rootdir)
os.chdir(rootdir)
rv = utils.execute('./generate_autodoc_index.sh')
print rv[0]

View File

@ -173,7 +173,10 @@ Nova Floating IPs
``nova-manage floating create <host> <ip_range>``
Creates floating IP addresses for the named host by the given range.
floating delete <ip_range> Deletes floating IP addresses in the range given.
``nova-manage floating delete <ip_range>``
Deletes floating IP addresses in the range given.
``nova-manage floating list``
@ -193,7 +196,7 @@ Nova Flavor
``nova-manage flavor create <name> <memory> <vCPU> <local_storage> <flavorID> <(optional) swap> <(optional) RXTX Quota> <(optional) RXTX Cap>``
creates a flavor with the following positional arguments:
* memory (expressed in megabytes)
* memory (expressed in megabytes)
* vcpu(s) (integer)
* local storage (expressed in gigabytes)
* flavorid (unique integer)
@ -209,12 +212,33 @@ Nova Flavor
Purges the flavor with the name <name>. This removes this flavor from the database.
Nova Instance_type
~~~~~~~~~~~~~~~~~~
The instance_type command is provided as an alias for the flavor command. All the same subcommands and arguments from nova-manage flavor can be used.
Nova Images
~~~~~~~~~~~
``nova-manage image image_register <path> <owner>``
Registers an image with the image service.
``nova-manage image kernel_register <path> <owner>``
Registers a kernel with the image service.
``nova-manage image ramdisk_register <path> <owner>``
Registers a ramdisk with the image service.
``nova-manage image all_register <image_path> <kernel_path> <ramdisk_path> <owner>``
Registers an image kernel and ramdisk with the image service.
``nova-manage image convert <directory>``
Converts all images in directory from the old (Bexar) format to the new format.
FILES
========

View File

@ -182,6 +182,29 @@ Nova Floating IPs
Displays a list of all floating IP addresses.
Nova Images
~~~~~~~~~~~
``nova-manage image image_register <path> <owner>``
Registers an image with the image service.
``nova-manage image kernel_register <path> <owner>``
Registers a kernel with the image service.
``nova-manage image ramdisk_register <path> <owner>``
Registers a ramdisk with the image service.
``nova-manage image all_register <image_path> <kernel_path> <ramdisk_path> <owner>``
Registers an image kernel and ramdisk with the image service.
``nova-manage image convert <directory>``
Converts all images in directory from the old (Bexar) format to the new format.
Concept: Flags
--------------

View File

@ -187,7 +187,7 @@ class ServiceWrapper(wsgi.Controller):
def __init__(self, service_handle):
self.service_handle = service_handle
@webob.dec.wsgify
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
arg_dict = req.environ['wsgiorg.routing_args'][1]
action = arg_dict['action']
@ -206,7 +206,7 @@ class ServiceWrapper(wsgi.Controller):
params = dict([(str(k), v) for (k, v) in params.iteritems()])
result = method(context, **params)
if type(result) is dict or type(result) is list:
return self._serialize(result, req)
return self._serialize(result, req.best_match_content_type())
else:
return result
@ -218,7 +218,7 @@ class Proxy(object):
self.prefix = prefix
def __do_request(self, path, context, **kwargs):
req = webob.Request.blank(path)
req = wsgi.Request.blank(path)
req.method = 'POST'
req.body = urllib.urlencode({'json': utils.dumps(kwargs)})
req.environ['openstack.context'] = context

View File

@ -53,7 +53,7 @@ flags.DEFINE_list('lockout_memcached_servers', None,
class RequestLogging(wsgi.Middleware):
"""Access-Log akin logging for all EC2 API requests."""
@webob.dec.wsgify
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
start = utils.utcnow()
rv = req.get_response(self.application)
@ -112,7 +112,7 @@ class Lockout(wsgi.Middleware):
debug=0)
super(Lockout, self).__init__(application)
@webob.dec.wsgify
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
access_key = str(req.params['AWSAccessKeyId'])
failures_key = "authfailures-%s" % access_key
@ -141,7 +141,7 @@ class Authenticate(wsgi.Middleware):
"""Authenticate an EC2 request and add 'ec2.context' to WSGI environ."""
@webob.dec.wsgify
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
# Read request signature and access id.
try:
@ -190,7 +190,7 @@ class Requestify(wsgi.Middleware):
super(Requestify, self).__init__(app)
self.controller = utils.import_class(controller)()
@webob.dec.wsgify
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
non_args = ['Action', 'Signature', 'AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Version', 'Timestamp']
@ -275,7 +275,7 @@ class Authorizer(wsgi.Middleware):
},
}
@webob.dec.wsgify
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
context = req.environ['ec2.context']
controller = req.environ['ec2.request'].controller.__class__.__name__
@ -309,7 +309,7 @@ class Executor(wsgi.Application):
response, or a 400 upon failure.
"""
@webob.dec.wsgify
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
context = req.environ['ec2.context']
api_request = req.environ['ec2.request']
@ -371,7 +371,7 @@ class Executor(wsgi.Application):
class Versions(wsgi.Application):
@webob.dec.wsgify
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
"""Respond to a request for all EC2 versions."""
# available api versions

View File

@ -39,7 +39,9 @@ from nova import log as logging
from nova import network
from nova import utils
from nova import volume
from nova.api.ec2 import ec2utils
from nova.compute import instance_types
from nova.image import s3
FLAGS = flags.FLAGS
@ -73,30 +75,19 @@ def _gen_key(context, user_id, key_name):
return {'private_key': private_key, 'fingerprint': fingerprint}
def ec2_id_to_id(ec2_id):
"""Convert an ec2 ID (i-[base 16 number]) to an instance id (int)"""
return int(ec2_id.split('-')[-1], 16)
def id_to_ec2_id(instance_id, template='i-%08x'):
"""Convert an instance ID (int) to an ec2 ID (i-[base 16 number])"""
return template % instance_id
class CloudController(object):
""" CloudController provides the critical dispatch between
inbound API calls through the endpoint and messages
sent to the other nodes.
"""
def __init__(self):
self.image_service = utils.import_object(FLAGS.image_service)
self.image_service = s3.S3ImageService()
self.network_api = network.API()
self.volume_api = volume.API()
self.compute_api = compute.API(
network_api=self.network_api,
image_service=self.image_service,
volume_api=self.volume_api,
hostname_factory=id_to_ec2_id)
hostname_factory=ec2utils.id_to_ec2_id)
self.setup()
def __str__(self):
@ -115,7 +106,7 @@ class CloudController(object):
start = os.getcwd()
os.chdir(FLAGS.ca_path)
# TODO(vish): Do this with M2Crypto instead
utils.runthis(_("Generating root CA: %s"), "sh genrootca.sh")
utils.runthis(_("Generating root CA: %s"), "sh", "genrootca.sh")
os.chdir(start)
def _get_mpi_data(self, context, project_id):
@ -154,11 +145,14 @@ class CloudController(object):
availability_zone = self._get_availability_zone_by_host(ctxt, host)
floating_ip = db.instance_get_floating_address(ctxt,
instance_ref['id'])
ec2_id = id_to_ec2_id(instance_ref['id'])
ec2_id = ec2utils.id_to_ec2_id(instance_ref['id'])
image_ec2_id = self._image_ec2_id(instance_ref['image_id'], 'machine')
k_ec2_id = self._image_ec2_id(instance_ref['kernel_id'], 'kernel')
r_ec2_id = self._image_ec2_id(instance_ref['ramdisk_id'], 'ramdisk')
data = {
'user-data': base64.b64decode(instance_ref['user_data']),
'meta-data': {
'ami-id': instance_ref['image_id'],
'ami-id': image_ec2_id,
'ami-launch-index': instance_ref['launch_index'],
'ami-manifest-path': 'FIXME',
'block-device-mapping': {
@ -173,12 +167,12 @@ class CloudController(object):
'instance-type': instance_ref['instance_type'],
'local-hostname': hostname,
'local-ipv4': address,
'kernel-id': instance_ref['kernel_id'],
'kernel-id': k_ec2_id,
'ramdisk-id': r_ec2_id,
'placement': {'availability-zone': availability_zone},
'public-hostname': hostname,
'public-ipv4': floating_ip or '',
'public-keys': keys,
'ramdisk-id': instance_ref['ramdisk_id'],
'reservation-id': instance_ref['reservation_id'],
'security-groups': '',
'mpi': mpi}}
@ -525,7 +519,7 @@ class CloudController(object):
ec2_id = instance_id[0]
else:
ec2_id = instance_id
instance_id = ec2_id_to_id(ec2_id)
instance_id = ec2utils.ec2_id_to_id(ec2_id)
output = self.compute_api.get_console_output(
context, instance_id=instance_id)
now = datetime.datetime.utcnow()
@ -535,7 +529,7 @@ class CloudController(object):
def get_ajax_console(self, context, instance_id, **kwargs):
ec2_id = instance_id[0]
instance_id = ec2_id_to_id(ec2_id)
instance_id = ec2utils.ec2_id_to_id(ec2_id)
return self.compute_api.get_ajax_console(context,
instance_id=instance_id)
@ -543,7 +537,7 @@ class CloudController(object):
if volume_id:
volumes = []
for ec2_id in volume_id:
internal_id = ec2_id_to_id(ec2_id)
internal_id = ec2utils.ec2_id_to_id(ec2_id)
volume = self.volume_api.get(context, internal_id)
volumes.append(volume)
else:
@ -556,11 +550,11 @@ class CloudController(object):
instance_data = None
if volume.get('instance', None):
instance_id = volume['instance']['id']
instance_ec2_id = id_to_ec2_id(instance_id)
instance_ec2_id = ec2utils.id_to_ec2_id(instance_id)
instance_data = '%s[%s]' % (instance_ec2_id,
volume['instance']['host'])
v = {}
v['volumeId'] = id_to_ec2_id(volume['id'], 'vol-%08x')
v['volumeId'] = ec2utils.id_to_ec2_id(volume['id'], 'vol-%08x')
v['status'] = volume['status']
v['size'] = volume['size']
v['availabilityZone'] = volume['availability_zone']
@ -578,8 +572,7 @@ class CloudController(object):
'device': volume['mountpoint'],
'instanceId': instance_ec2_id,
'status': 'attached',
'volumeId': id_to_ec2_id(volume['id'],
'vol-%08x')}]
'volumeId': v['volumeId']}]
else:
v['attachmentSet'] = [{}]
@ -598,12 +591,12 @@ class CloudController(object):
return {'volumeSet': [self._format_volume(context, dict(volume))]}
def delete_volume(self, context, volume_id, **kwargs):
volume_id = ec2_id_to_id(volume_id)
volume_id = ec2utils.ec2_id_to_id(volume_id)
self.volume_api.delete(context, volume_id=volume_id)
return True
def update_volume(self, context, volume_id, **kwargs):
volume_id = ec2_id_to_id(volume_id)
volume_id = ec2utils.ec2_id_to_id(volume_id)
updatable_fields = ['display_name', 'display_description']
changes = {}
for field in updatable_fields:
@ -614,8 +607,8 @@ class CloudController(object):
return True
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
volume_id = ec2_id_to_id(volume_id)
instance_id = ec2_id_to_id(instance_id)
volume_id = ec2utils.ec2_id_to_id(volume_id)
instance_id = ec2utils.ec2_id_to_id(instance_id)
msg = _("Attach volume %(volume_id)s to instance %(instance_id)s"
" at %(device)s") % locals()
LOG.audit(msg, context=context)
@ -626,22 +619,22 @@ class CloudController(object):
volume = self.volume_api.get(context, volume_id)
return {'attachTime': volume['attach_time'],
'device': volume['mountpoint'],
'instanceId': id_to_ec2_id(instance_id),
'instanceId': ec2utils.id_to_ec2_id(instance_id),
'requestId': context.request_id,
'status': volume['attach_status'],
'volumeId': id_to_ec2_id(volume_id, 'vol-%08x')}
'volumeId': ec2utils.id_to_ec2_id(volume_id, 'vol-%08x')}
def detach_volume(self, context, volume_id, **kwargs):
volume_id = ec2_id_to_id(volume_id)
volume_id = ec2utils.ec2_id_to_id(volume_id)
LOG.audit(_("Detach volume %s"), volume_id, context=context)
volume = self.volume_api.get(context, volume_id)
instance = self.compute_api.detach_volume(context, volume_id=volume_id)
return {'attachTime': volume['attach_time'],
'device': volume['mountpoint'],
'instanceId': id_to_ec2_id(instance['id']),
'instanceId': ec2utils.id_to_ec2_id(instance['id']),
'requestId': context.request_id,
'status': volume['attach_status'],
'volumeId': id_to_ec2_id(volume_id, 'vol-%08x')}
'volumeId': ec2utils.id_to_ec2_id(volume_id, 'vol-%08x')}
def _convert_to_set(self, lst, label):
if lst == None or lst == []:
@ -675,7 +668,7 @@ class CloudController(object):
if instance_id:
instances = []
for ec2_id in instance_id:
internal_id = ec2_id_to_id(ec2_id)
internal_id = ec2utils.ec2_id_to_id(ec2_id)
instance = self.compute_api.get(context,
instance_id=internal_id)
instances.append(instance)
@ -687,9 +680,9 @@ class CloudController(object):
continue
i = {}
instance_id = instance['id']
ec2_id = id_to_ec2_id(instance_id)
ec2_id = ec2utils.id_to_ec2_id(instance_id)
i['instanceId'] = ec2_id
i['imageId'] = instance['image_id']
i['imageId'] = self._image_ec2_id(instance['image_id'])
i['instanceState'] = {
'code': instance['state'],
'name': instance['state_description']}
@ -755,7 +748,7 @@ class CloudController(object):
if (floating_ip_ref['fixed_ip']
and floating_ip_ref['fixed_ip']['instance']):
instance_id = floating_ip_ref['fixed_ip']['instance']['id']
ec2_id = id_to_ec2_id(instance_id)
ec2_id = ec2utils.id_to_ec2_id(instance_id)
address_rv = {'public_ip': address,
'instance_id': ec2_id}
if context.is_admin:
@ -778,7 +771,7 @@ class CloudController(object):
def associate_address(self, context, instance_id, public_ip, **kwargs):
LOG.audit(_("Associate address %(public_ip)s to"
" instance %(instance_id)s") % locals(), context=context)
instance_id = ec2_id_to_id(instance_id)
instance_id = ec2utils.ec2_id_to_id(instance_id)
self.compute_api.associate_floating_ip(context,
instance_id=instance_id,
address=public_ip)
@ -791,13 +784,19 @@ class CloudController(object):
def run_instances(self, context, **kwargs):
max_count = int(kwargs.get('max_count', 1))
if kwargs.get('kernel_id'):
kernel = self._get_image(context, kwargs['kernel_id'])
kwargs['kernel_id'] = kernel['id']
if kwargs.get('ramdisk_id'):
ramdisk = self._get_image(context, kwargs['ramdisk_id'])
kwargs['ramdisk_id'] = ramdisk['id']
instances = self.compute_api.create(context,
instance_type=instance_types.get_by_type(
kwargs.get('instance_type', None)),
image_id=kwargs['image_id'],
image_id=self._get_image(context, kwargs['image_id'])['id'],
min_count=int(kwargs.get('min_count', max_count)),
max_count=max_count,
kernel_id=kwargs.get('kernel_id', None),
kernel_id=kwargs.get('kernel_id'),
ramdisk_id=kwargs.get('ramdisk_id'),
display_name=kwargs.get('display_name'),
display_description=kwargs.get('display_description'),
@ -814,7 +813,7 @@ class CloudController(object):
instance_id is a kwarg so its name cannot be modified."""
LOG.debug(_("Going to start terminating instances"))
for ec2_id in instance_id:
instance_id = ec2_id_to_id(ec2_id)
instance_id = ec2utils.ec2_id_to_id(ec2_id)
self.compute_api.delete(context, instance_id=instance_id)
return True
@ -822,19 +821,19 @@ class CloudController(object):
"""instance_id is a list of instance ids"""
LOG.audit(_("Reboot instance %r"), instance_id, context=context)
for ec2_id in instance_id:
instance_id = ec2_id_to_id(ec2_id)
instance_id = ec2utils.ec2_id_to_id(ec2_id)
self.compute_api.reboot(context, instance_id=instance_id)
return True
def rescue_instance(self, context, instance_id, **kwargs):
"""This is an extension to the normal ec2_api"""
instance_id = ec2_id_to_id(instance_id)
instance_id = ec2utils.ec2_id_to_id(instance_id)
self.compute_api.rescue(context, instance_id=instance_id)
return True
def unrescue_instance(self, context, instance_id, **kwargs):
"""This is an extension to the normal ec2_api"""
instance_id = ec2_id_to_id(instance_id)
instance_id = ec2utils.ec2_id_to_id(instance_id)
self.compute_api.unrescue(context, instance_id=instance_id)
return True
@ -845,41 +844,80 @@ class CloudController(object):
if field in kwargs:
changes[field] = kwargs[field]
if changes:
instance_id = ec2_id_to_id(instance_id)
instance_id = ec2utils.ec2_id_to_id(instance_id)
self.compute_api.update(context, instance_id=instance_id, **kwargs)
return True
def _format_image(self, context, image):
_type_prefix_map = {'machine': 'ami',
'kernel': 'aki',
'ramdisk': 'ari'}
def _image_ec2_id(self, image_id, image_type='machine'):
prefix = self._type_prefix_map[image_type]
template = prefix + '-%08x'
return ec2utils.id_to_ec2_id(int(image_id), template=template)
def _get_image(self, context, ec2_id):
try:
internal_id = ec2utils.ec2_id_to_id(ec2_id)
return self.image_service.show(context, internal_id)
except exception.NotFound:
return self.image_service.show_by_name(context, ec2_id)
def _format_image(self, image):
"""Convert from format defined by BaseImageService to S3 format."""
i = {}
i['imageId'] = image.get('id')
i['kernelId'] = image.get('kernel_id')
i['ramdiskId'] = image.get('ramdisk_id')
i['imageOwnerId'] = image.get('owner_id')
i['imageLocation'] = image.get('location')
i['imageState'] = image.get('status')
i['type'] = image.get('type')
i['isPublic'] = image.get('is_public')
i['architecture'] = image.get('architecture')
image_type = image['properties'].get('type')
ec2_id = self._image_ec2_id(image.get('id'), image_type)
name = image.get('name')
if name:
i['imageId'] = "%s (%s)" % (ec2_id, name)
else:
i['imageId'] = ec2_id
kernel_id = image['properties'].get('kernel_id')
if kernel_id:
i['kernelId'] = self._image_ec2_id(kernel_id, 'kernel')
ramdisk_id = image['properties'].get('ramdisk_id')
if ramdisk_id:
i['ramdiskId'] = self._image_ec2_id(ramdisk_id, 'ramdisk')
i['imageOwnerId'] = image['properties'].get('owner_id')
i['imageLocation'] = image['properties'].get('image_location')
i['imageState'] = image['properties'].get('image_state')
i['type'] = image_type
i['isPublic'] = str(image['properties'].get('is_public', '')) == 'True'
i['architecture'] = image['properties'].get('architecture')
return i
def describe_images(self, context, image_id=None, **kwargs):
# NOTE: image_id is a list!
images = self.image_service.index(context)
if image_id:
images = filter(lambda x: x['id'] in image_id, images)
images = [self._format_image(context, i) for i in images]
images = []
for ec2_id in image_id:
try:
image = self._get_image(context, ec2_id)
except exception.NotFound:
raise exception.NotFound(_('Image %s not found') %
ec2_id)
images.append(image)
else:
images = self.image_service.detail(context)
images = [self._format_image(i) for i in images]
return {'imagesSet': images}
def deregister_image(self, context, image_id, **kwargs):
LOG.audit(_("De-registering image %s"), image_id, context=context)
self.image_service.deregister(context, image_id)
image = self._get_image(context, image_id)
internal_id = image['id']
self.image_service.delete(context, internal_id)
return {'imageId': image_id}
def register_image(self, context, image_location=None, **kwargs):
if image_location is None and 'name' in kwargs:
image_location = kwargs['name']
image_id = self.image_service.register(context, image_location)
metadata = {'properties': {'image_location': image_location}}
image = self.image_service.create(context, metadata)
image_id = self._image_ec2_id(image['id'],
image['properties']['type'])
msg = _("Registered image %(image_location)s with"
" id %(image_id)s") % locals()
LOG.audit(msg, context=context)
@ -890,13 +928,11 @@ class CloudController(object):
raise exception.ApiError(_('attribute not supported: %s')
% attribute)
try:
image = self._format_image(context,
self.image_service.show(context,
image_id))
except IndexError:
raise exception.ApiError(_('invalid id: %s') % image_id)
result = {'image_id': image_id, 'launchPermission': []}
if image['isPublic']:
image = self._get_image(context, image_id)
except exception.NotFound:
raise exception.NotFound(_('Image %s not found') % image_id)
result = {'imageId': image_id, 'launchPermission': []}
if image['properties']['is_public']:
result['launchPermission'].append({'group': 'all'})
return result
@ -913,8 +949,18 @@ class CloudController(object):
if not operation_type in ['add', 'remove']:
raise exception.ApiError(_('operation_type must be add or remove'))
LOG.audit(_("Updating image %s publicity"), image_id, context=context)
return self.image_service.modify(context, image_id, operation_type)
try:
image = self._get_image(context, image_id)
except exception.NotFound:
raise exception.NotFound(_('Image %s not found') % image_id)
internal_id = image['id']
del(image['id'])
raise Exception(image)
image['properties']['is_public'] = (operation_type == 'add')
return self.image_service.update(context, internal_id, image)
def update_image(self, context, image_id, **kwargs):
result = self.image_service.update(context, image_id, dict(kwargs))
internal_id = ec2utils.ec2_id_to_id(image_id)
result = self.image_service.update(context, internal_id, dict(kwargs))
return result

32
nova/api/ec2/ec2utils.py Normal file
View File

@ -0,0 +1,32 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import exception
def ec2_id_to_id(ec2_id):
"""Convert an ec2 ID (i-[base 16 number]) to an instance id (int)"""
try:
return int(ec2_id.split('-')[-1], 16)
except ValueError:
raise exception.NotFound(_("Id %s Not Found") % ec2_id)
def id_to_ec2_id(instance_id, template='i-%08x'):
"""Convert an instance ID (int) to an ec2 ID (i-[base 16 number])"""
return template % instance_id

View File

@ -65,7 +65,7 @@ class MetadataRequestHandler(wsgi.Application):
data = data[item]
return data
@webob.dec.wsgify
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
cc = cloud.CloudController()
remote_address = req.remote_addr

View File

@ -47,7 +47,7 @@ flags.DEFINE_bool('allow_admin_api',
class FaultWrapper(wsgi.Middleware):
"""Calls down the middleware stack, making exceptions into faults."""
@webob.dec.wsgify
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
try:
return req.get_response(self.application)
@ -115,7 +115,7 @@ class APIRouter(wsgi.Router):
class Versions(wsgi.Application):
@webob.dec.wsgify
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
"""Respond to a request for all OpenStack API versions."""
response = {
@ -124,4 +124,6 @@ class Versions(wsgi.Application):
metadata = {
"application/xml": {
"attributes": dict(version=["status", "id"])}}
return wsgi.Serializer(req.environ, metadata).to_content_type(response)
content_type = req.best_match_content_type()
return wsgi.Serializer(metadata).serialize(response, content_type)

View File

@ -46,7 +46,7 @@ class AuthMiddleware(wsgi.Middleware):
self.auth = auth.manager.AuthManager()
super(AuthMiddleware, self).__init__(application)
@webob.dec.wsgify
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if not self.has_authentication(req):
return self.authenticate(req)
@ -121,7 +121,7 @@ class AuthMiddleware(wsgi.Middleware):
username - string
key - string API key
req - webob.Request object
req - wsgi.Request object
"""
ctxt = context.get_admin_context()
user = self.auth.get_user_from_access_key(key)

View File

@ -25,7 +25,7 @@ def limited(items, request, max_limit=1000):
Return a slice of items according to requested offset and limit.
@param items: A sliceable entity
@param request: `webob.Request` possibly containing 'offset' and 'limit'
@param request: `wsgi.Request` possibly containing 'offset' and 'limit'
GET variables. 'offset' is where to start in the list,
and 'limit' is the maximum number of items to return. If
'limit' is not specified, 0, or > max_limit, we default
@ -36,15 +36,18 @@ def limited(items, request, max_limit=1000):
try:
offset = int(request.GET.get('offset', 0))
except ValueError:
offset = 0
raise webob.exc.HTTPBadRequest(_('offset param must be an integer'))
try:
limit = int(request.GET.get('limit', max_limit))
except ValueError:
limit = max_limit
raise webob.exc.HTTPBadRequest(_('limit param must be an integer'))
if offset < 0 or limit < 0:
raise webob.exc.HTTPBadRequest()
if limit < 0:
raise webob.exc.HTTPBadRequest(_('limit param must be positive'))
if offset < 0:
raise webob.exc.HTTPBadRequest(_('offset param must be positive'))
limit = min(max_limit, limit or max_limit)
range_end = offset + limit

View File

@ -65,7 +65,7 @@ class Controller(wsgi.Controller):
def create(self, req, server_id):
"""Creates a new console"""
#info = self._deserialize(req.body, req)
#info = self._deserialize(req.body, req.get_content_type())
self.console_api.create_console(
req.environ['nova.context'],
int(server_id))

View File

@ -42,7 +42,7 @@ class Fault(webob.exc.HTTPException):
"""Create a Fault for the given webob.exc.exception."""
self.wrapped_exc = exception
@webob.dec.wsgify
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
"""Generate a WSGI response based on the exception passed to ctor."""
# Replace the body with fault details.
@ -57,6 +57,7 @@ class Fault(webob.exc.HTTPException):
fault_data[fault_name]['retryAfter'] = retry
# 'code' is an attribute on the fault tag itself
metadata = {'application/xml': {'attributes': {fault_name: 'code'}}}
serializer = wsgi.Serializer(req.environ, metadata)
self.wrapped_exc.body = serializer.to_content_type(fault_data)
serializer = wsgi.Serializer(metadata)
content_type = req.best_match_content_type()
self.wrapped_exc.body = serializer.serialize(fault_data, content_type)
return self.wrapped_exc

View File

@ -151,7 +151,7 @@ class Controller(wsgi.Controller):
def create(self, req):
context = req.environ['nova.context']
env = self._deserialize(req.body, req)
env = self._deserialize(req.body, req.get_content_type())
instance_id = env["image"]["serverId"]
name = env["image"]["name"]

View File

@ -57,7 +57,7 @@ class RateLimitingMiddleware(wsgi.Middleware):
self.limiter = WSGIAppProxy(service_host)
super(RateLimitingMiddleware, self).__init__(application)
@webob.dec.wsgify
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
"""Rate limit the request.
@ -183,7 +183,7 @@ class WSGIApp(object):
"""Create the WSGI application using the given Limiter instance."""
self.limiter = limiter
@webob.dec.wsgify
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
parts = req.path_info.split('/')
# format: /limiter/<username>/<urlencoded action>

View File

@ -98,7 +98,7 @@ class Controller(wsgi.Controller):
'application/xml': {
"attributes": {
"server": ["id", "imageId", "name", "flavorId", "hostId",
"status", "progress"]}}}
"status", "progress", "adminPass"]}}}
def __init__(self):
self.compute_api = compute.API()
@ -141,7 +141,7 @@ class Controller(wsgi.Controller):
def create(self, req):
""" Creates a new server for a given user """
env = self._deserialize(req.body, req)
env = self._deserialize(req.body, req.get_content_type())
if not env:
return faults.Fault(exc.HTTPUnprocessableEntity())
@ -178,11 +178,21 @@ class Controller(wsgi.Controller):
key_data=key_pair['public_key'],
metadata=metadata,
onset_files=env.get('onset_files', []))
return _translate_keys(instances[0])
server = _translate_keys(instances[0])
password = "%s%s" % (server['server']['name'][:4],
utils.generate_password(12))
server['server']['adminPass'] = password
self.compute_api.set_admin_password(context, server['server']['id'],
password)
return server
def update(self, req, id):
""" Updates the server name or password """
inst_dict = self._deserialize(req.body, req)
if len(req.body) == 0:
raise exc.HTTPUnprocessableEntity()
inst_dict = self._deserialize(req.body, req.get_content_type())
if not inst_dict:
return faults.Fault(exc.HTTPUnprocessableEntity())
@ -214,7 +224,7 @@ class Controller(wsgi.Controller):
'rebuild': self._action_rebuild,
}
input_dict = self._deserialize(req.body, req)
input_dict = self._deserialize(req.body, req.get_content_type())
for key in actions.keys():
if key in input_dict:
return actions[key](input_dict, req, id)

View File

@ -67,13 +67,13 @@ class Controller(wsgi.Controller):
def create(self, req):
context = req.environ['nova.context']
env = self._deserialize(req.body, req)
env = self._deserialize(req.body, req.get_content_type())
zone = db.zone_create(context, env["zone"])
return dict(zone=_scrub_zone(zone))
def update(self, req, id):
context = req.environ['nova.context']
env = self._deserialize(req.body, req)
env = self._deserialize(req.body, req.get_content_type())
zone_id = int(id)
zone = db.zone_update(context, zone_id, env["zone"])
return dict(zone=_scrub_zone(zone))

View File

@ -126,9 +126,9 @@ class API(base.Base):
image = self.image_service.show(context, image_id)
if kernel_id is None:
kernel_id = image.get('kernel_id', None)
kernel_id = image['properties'].get('kernel_id', None)
if ramdisk_id is None:
ramdisk_id = image.get('ramdisk_id', None)
ramdisk_id = image['properties'].get('ramdisk_id', None)
# FIXME(sirp): is there a way we can remove null_kernel?
# No kernel and ramdisk for raw images
if kernel_id == str(FLAGS.null_kernel):
@ -498,9 +498,10 @@ class API(base.Base):
"""Unrescue the given instance."""
self._cast_compute_message('unrescue_instance', context, instance_id)
def set_admin_password(self, context, instance_id):
def set_admin_password(self, context, instance_id, password=None):
"""Set the root/admin password for the given instance."""
self._cast_compute_message('set_admin_password', context, instance_id)
self._cast_compute_message('set_admin_password', context, instance_id,
password)
def inject_file(self, context, instance_id):
"""Write a file to the given instance."""

View File

@ -133,10 +133,10 @@ class XVPConsoleProxy(object):
return
logging.debug(_("Starting xvp"))
try:
utils.execute('xvp -p %s -c %s -l %s' %
(FLAGS.console_xvp_pid,
FLAGS.console_xvp_conf,
FLAGS.console_xvp_log))
utils.execute('xvp',
'-p', FLAGS.console_xvp_pid,
'-c', FLAGS.console_xvp_conf,
'-l', FLAGS.console_xvp_log)
except exception.ProcessExecutionError, err:
logging.error(_("Error starting xvp: %s") % err)
@ -190,5 +190,5 @@ class XVPConsoleProxy(object):
flag = '-x'
#xvp will blow up on passwords that are too long (mdragon)
password = password[:maxlen]
out, err = utils.execute('xvp %s' % flag, process_input=password)
out, err = utils.execute('xvp', flag, process_input=password)
return out.strip()

View File

@ -105,8 +105,10 @@ def generate_key_pair(bits=1024):
tmpdir = tempfile.mkdtemp()
keyfile = os.path.join(tmpdir, 'temp')
utils.execute('ssh-keygen -q -b %d -N "" -f %s' % (bits, keyfile))
(out, err) = utils.execute('ssh-keygen -q -l -f %s.pub' % (keyfile))
utils.execute('ssh-keygen', '-q', '-b', bits, '-N', '',
'-f', keyfile)
(out, err) = utils.execute('ssh-keygen', '-q', '-l', '-f',
'%s.pub' % (keyfile))
fingerprint = out.split(' ')[1]
private_key = open(keyfile).read()
public_key = open(keyfile + '.pub').read()
@ -118,7 +120,8 @@ def generate_key_pair(bits=1024):
# bio = M2Crypto.BIO.MemoryBuffer()
# key.save_pub_key_bio(bio)
# public_key = bio.read()
# public_key, err = execute('ssh-keygen -y -f /dev/stdin', private_key)
# public_key, err = execute('ssh-keygen', '-y', '-f',
# '/dev/stdin', private_key)
return (private_key, public_key, fingerprint)
@ -143,9 +146,10 @@ def revoke_cert(project_id, file_name):
start = os.getcwd()
os.chdir(ca_folder(project_id))
# NOTE(vish): potential race condition here
utils.execute("openssl ca -config ./openssl.cnf -revoke '%s'" % file_name)
utils.execute("openssl ca -gencrl -config ./openssl.cnf -out '%s'" %
FLAGS.crl_file)
utils.execute('openssl', 'ca', '-config', './openssl.cnf', '-revoke',
file_name)
utils.execute('openssl', 'ca', '-gencrl', '-config', './openssl.cnf',
'-out', FLAGS.crl_file)
os.chdir(start)
@ -193,9 +197,9 @@ def generate_x509_cert(user_id, project_id, bits=1024):
tmpdir = tempfile.mkdtemp()
keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key'))
csrfile = os.path.join(tmpdir, 'temp.csr')
utils.execute("openssl genrsa -out %s %s" % (keyfile, bits))
utils.execute("openssl req -new -key %s -out %s -batch -subj %s" %
(keyfile, csrfile, subject))
utils.execute('openssl', 'genrsa', '-out', keyfile, str(bits))
utils.execute('openssl', 'req', '-new', '-key', keyfile, '-out', csrfile,
'-batch', '-subj', subject)
private_key = open(keyfile).read()
csr = open(csrfile).read()
shutil.rmtree(tmpdir)
@ -212,8 +216,8 @@ def _ensure_project_folder(project_id):
if not os.path.exists(ca_path(project_id)):
start = os.getcwd()
os.chdir(ca_folder())
utils.execute("sh geninter.sh %s %s" %
(project_id, _project_cert_subject(project_id)))
utils.execute('sh', 'geninter.sh', project_id,
_project_cert_subject(project_id))
os.chdir(start)
@ -228,8 +232,8 @@ def generate_vpn_files(project_id):
start = os.getcwd()
os.chdir(ca_folder())
# TODO(vish): the shell scripts could all be done in python
utils.execute("sh genvpn.sh %s %s" %
(project_id, _vpn_cert_subject(project_id)))
utils.execute('sh', 'genvpn.sh',
project_id, _vpn_cert_subject(project_id))
with open(csr_fn, "r") as csrfile:
csr_text = csrfile.read()
(serial, signed_csr) = sign_csr(csr_text, project_id)
@ -259,9 +263,10 @@ def _sign_csr(csr_text, ca_folder):
start = os.getcwd()
# Change working dir to CA
os.chdir(ca_folder)
utils.execute("openssl ca -batch -out %s -config "
"./openssl.cnf -infiles %s" % (outbound, inbound))
out, _err = utils.execute("openssl x509 -in %s -serial -noout" % outbound)
utils.execute('openssl', 'ca', '-batch', '-out', outbound, '-config',
'./openssl.cnf', '-infiles', inbound)
out, _err = utils.execute('openssl', 'x509', '-in', outbound,
'-serial', '-noout')
serial = out.rpartition("=")[2]
os.chdir(start)
with open(outbound, "r") as crtfile:

View File

@ -571,6 +571,13 @@ def network_create_safe(context, values):
return IMPL.network_create_safe(context, values)
def network_delete_safe(context, network_id):
"""Delete network with key network_id.
This method assumes that the network is not associated with any project
"""
return IMPL.network_delete_safe(context, network_id)
def network_create_fixed_ips(context, network_id, num_vpn_clients):
"""Create the ips for the network, reserving sepecified ips."""
return IMPL.network_create_fixed_ips(context, network_id, num_vpn_clients)
@ -607,6 +614,11 @@ def network_get_by_bridge(context, bridge):
return IMPL.network_get_by_bridge(context, bridge)
def network_get_by_cidr(context, cidr):
"""Get a network by cidr or raise if it does not exist"""
return IMPL.network_get_by_cidr(context, cidr)
def network_get_by_instance(context, instance_id):
"""Get a network by instance id or raise if it does not exist."""
return IMPL.network_get_by_instance(context, instance_id)

View File

@ -1163,6 +1163,15 @@ def network_create_safe(context, values):
return None
@require_admin_context
def network_delete_safe(context, network_id):
session = get_session()
with session.begin():
network_ref = network_get(context, network_id=network_id, \
session=session)
session.delete(network_ref)
@require_admin_context
def network_disassociate(context, network_id):
network_update(context, network_id, {'project_id': None,
@ -1236,6 +1245,18 @@ def network_get_by_bridge(context, bridge):
return result
@require_admin_context
def network_get_by_cidr(context, cidr):
session = get_session()
result = session.query(models.Network).\
filter_by(cidr=cidr).first()
if not result:
raise exception.NotFound(_('Network with cidr %s does not exist') %
cidr)
return result
@require_admin_context
def network_get_by_instance(_context, instance_id):
session = get_session()

View File

@ -88,6 +88,10 @@ class InvalidInputException(Error):
pass
class InvalidContentType(Error):
pass
class TimeoutException(Error):
pass

View File

@ -321,6 +321,8 @@ DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger')
DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'),
"Top-level directory for maintaining nova's state")
DEFINE_string('lock_path', os.path.join(os.path.dirname(__file__), '../'),
"Directory for lock files")
DEFINE_string('logdir', None, 'output to a per-service log file in named '
'directory')
@ -346,7 +348,7 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager',
'Manager for scheduler')
# The service to use for image search and retrieval
DEFINE_string('image_service', 'nova.image.s3.S3ImageService',
DEFINE_string('image_service', 'nova.image.local.LocalImageService',
'The service to use for retrieving and searching for images.')
DEFINE_string('host', socket.gethostname(),

View File

@ -17,9 +17,8 @@
"""Implementation of an image service that uses Glance as the backend"""
from __future__ import absolute_import
import httplib
import json
import urlparse
from glance.common import exception as glance_exception
from nova import exception
from nova import flags
@ -53,31 +52,64 @@ class GlanceImageService(service.BaseImageService):
"""
return self.client.get_images_detailed()
def show(self, context, id):
def show(self, context, image_id):
"""
Returns a dict containing image data for the given opaque image id.
"""
image = self.client.get_image_meta(id)
if image:
return image
raise exception.NotFound
try:
image = self.client.get_image_meta(image_id)
except glance_exception.NotFound:
raise exception.NotFound
return image
def create(self, context, data):
def show_by_name(self, context, name):
"""
Returns a dict containing image data for the given name.
"""
# TODO(vish): replace this with more efficient call when glance
# supports it.
images = self.detail(context)
image = None
for cantidate in images:
if name == cantidate.get('name'):
image = cantidate
break
if image is None:
raise exception.NotFound
return image
def get(self, context, image_id, data):
"""
Calls out to Glance for metadata and data and writes data.
"""
try:
metadata, image_chunks = self.client.get_image(image_id)
except glance_exception.NotFound:
raise exception.NotFound
for chunk in image_chunks:
data.write(chunk)
return metadata
def create(self, context, metadata, data=None):
"""
Store the image data and return the new image id.
:raises AlreadyExists if the image already exist.
"""
return self.client.add_image(image_meta=data)
return self.client.add_image(metadata, data)
def update(self, context, image_id, data):
def update(self, context, image_id, metadata, data=None):
"""Replace the contents of the given image with the new data.
:raises NotFound if the image does not exist.
"""
return self.client.update_image(image_id, data)
try:
result = self.client.update_image(image_id, metadata, data)
except glance_exception.NotFound:
raise exception.NotFound
return result
def delete(self, context, image_id):
"""
@ -86,7 +118,11 @@ class GlanceImageService(service.BaseImageService):
:raises NotFound if the image does not exist.
"""
return self.client.delete_image(image_id)
try:
result = self.client.delete_image(image_id)
except glance_exception.NotFound:
raise exception.NotFound
return result
def delete_all(self):
"""

View File

@ -15,57 +15,110 @@
# License for the specific language governing permissions and limitations
# under the License.
import cPickle as pickle
import json
import os.path
import random
import tempfile
import shutil
from nova import flags
from nova import exception
from nova.image import service
class LocalImageService(service.BaseImageService):
FLAGS = flags.FLAGS
flags.DEFINE_string('images_path', '$state_path/images',
'path to decrypted images')
class LocalImageService(service.BaseImageService):
"""Image service storing images to local disk.
It assumes that image_ids are integers.
"""
def __init__(self):
self._path = tempfile.mkdtemp()
self._path = FLAGS.images_path
def _path_to(self, image_id):
return os.path.join(self._path, str(image_id))
def _path_to(self, image_id, fname='info.json'):
if fname:
return os.path.join(self._path, '%08x' % int(image_id), fname)
return os.path.join(self._path, '%08x' % int(image_id))
def _ids(self):
"""The list of all image ids."""
return [int(i) for i in os.listdir(self._path)]
return [int(i, 16) for i in os.listdir(self._path)]
def index(self, context):
return [dict(id=i['id'], name=i['name']) for i in self.detail(context)]
return [dict(image_id=i['id'], name=i.get('name'))
for i in self.detail(context)]
def detail(self, context):
return [self.show(context, id) for id in self._ids()]
images = []
for image_id in self._ids():
try:
image = self.show(context, image_id)
images.append(image)
except exception.NotFound:
continue
return images
def show(self, context, id):
def show(self, context, image_id):
try:
return pickle.load(open(self._path_to(id)))
except IOError:
with open(self._path_to(image_id)) as metadata_file:
return json.load(metadata_file)
except (IOError, ValueError):
raise exception.NotFound
def create(self, context, data):
"""Store the image data and return the new image id."""
id = random.randint(0, 2 ** 31 - 1)
data['id'] = id
self.update(context, id, data)
return id
def show_by_name(self, context, name):
"""Returns a dict containing image data for the given name."""
# NOTE(vish): Not very efficient, but the local image service
# is for testing so it should be fine.
images = self.detail(context)
image = None
for cantidate in images:
if name == cantidate.get('name'):
image = cantidate
break
if image == None:
raise exception.NotFound
return image
def update(self, context, image_id, data):
def get(self, context, image_id, data):
"""Get image and metadata."""
try:
with open(self._path_to(image_id)) as metadata_file:
metadata = json.load(metadata_file)
with open(self._path_to(image_id, 'image')) as image_file:
shutil.copyfileobj(image_file, data)
except (IOError, ValueError):
raise exception.NotFound
return metadata
def create(self, context, metadata, data=None):
"""Store the image data and return the new image."""
image_id = random.randint(0, 2 ** 31 - 1)
image_path = self._path_to(image_id, None)
if not os.path.exists(image_path):
os.mkdir(image_path)
return self.update(context, image_id, metadata, data)
def update(self, context, image_id, metadata, data=None):
"""Replace the contents of the given image with the new data."""
metadata['id'] = image_id
try:
pickle.dump(data, open(self._path_to(image_id), 'w'))
except IOError:
if data:
location = self._path_to(image_id, 'image')
with open(location, 'w') as image_file:
shutil.copyfileobj(data, image_file)
# NOTE(vish): update metadata similarly to glance
metadata['status'] = 'active'
metadata['location'] = location
with open(self._path_to(image_id), 'w') as metadata_file:
json.dump(metadata, metadata_file)
except (IOError, ValueError):
raise exception.NotFound
return metadata
def delete(self, context, image_id):
"""Delete the given image.
@ -73,18 +126,11 @@ class LocalImageService(service.BaseImageService):
"""
try:
os.unlink(self._path_to(image_id))
except IOError:
shutil.rmtree(self._path_to(image_id, None))
except (IOError, ValueError):
raise exception.NotFound
def delete_all(self):
"""Clears out all images in local directory."""
for id in self._ids():
os.unlink(self._path_to(id))
def delete_imagedir(self):
"""Deletes the local directory.
Raises OSError if directory is not empty.
"""
os.rmdir(self._path)
for image_id in self._ids():
shutil.rmtree(self._path_to(image_id, None))

View File

@ -21,8 +21,13 @@ Proxy AMI-related calls from the cloud controller, to the running
objectstore service.
"""
import json
import urllib
import binascii
import eventlet
import os
import shutil
import tarfile
import tempfile
from xml.etree import ElementTree
import boto.s3.connection
@ -31,84 +36,78 @@ from nova import flags
from nova import utils
from nova.auth import manager
from nova.image import service
from nova.api.ec2 import ec2utils
FLAGS = flags.FLAGS
def map_s3_to_base(image):
"""Convert from S3 format to format defined by BaseImageService."""
i = {}
i['id'] = image.get('imageId')
i['name'] = image.get('imageId')
i['kernel_id'] = image.get('kernelId')
i['ramdisk_id'] = image.get('ramdiskId')
i['location'] = image.get('imageLocation')
i['owner_id'] = image.get('imageOwnerId')
i['status'] = image.get('imageState')
i['type'] = image.get('type')
i['is_public'] = image.get('isPublic')
i['architecture'] = image.get('architecture')
return i
flags.DEFINE_string('image_decryption_dir', '/tmp',
'parent dir for tempdir used for image decryption')
class S3ImageService(service.BaseImageService):
def __init__(self, service=None, *args, **kwargs):
if service == None:
service = utils.import_object(FLAGS.image_service)
self.service = service
self.service.__init__(*args, **kwargs)
def modify(self, context, image_id, operation):
self._conn(context).make_request(
method='POST',
bucket='_images',
query_args=self._qs({'image_id': image_id,
'operation': operation}))
return True
def update(self, context, image_id, attributes):
"""update an image's attributes / info.json"""
attributes.update({"image_id": image_id})
self._conn(context).make_request(
method='POST',
bucket='_images',
query_args=self._qs(attributes))
return True
def register(self, context, image_location):
""" rpc call to register a new image based from a manifest """
image_id = utils.generate_uid('ami')
self._conn(context).make_request(
method='PUT',
bucket='_images',
query_args=self._qs({'image_location': image_location,
'image_id': image_id}))
return image_id
def index(self, context):
"""Return a list of all images that a user can see."""
response = self._conn(context).make_request(
method='GET',
bucket='_images')
images = json.loads(response.read())
return [map_s3_to_base(i) for i in images]
def show(self, context, image_id):
"""return a image object if the context has permissions"""
if FLAGS.connection_type == 'fake':
return {'imageId': 'bar'}
result = self.index(context)
result = [i for i in result if i['id'] == image_id]
if not result:
raise exception.NotFound(_('Image %s could not be found')
% image_id)
image = result[0]
def create(self, context, metadata, data=None):
"""metadata['properties'] should contain image_location"""
image = self._s3_create(context, metadata)
return image
def deregister(self, context, image_id):
""" unregister an image """
self._conn(context).make_request(
method='DELETE',
bucket='_images',
query_args=self._qs({'image_id': image_id}))
def delete(self, context, image_id):
# FIXME(vish): call to show is to check filter
self.show(context, image_id)
self.service.delete(context, image_id)
def _conn(self, context):
def update(self, context, image_id, metadata, data=None):
# FIXME(vish): call to show is to check filter
self.show(context, image_id)
image = self.service.update(context, image_id, metadata, data)
return image
def index(self, context):
images = self.service.index(context)
# FIXME(vish): index doesn't filter so we do it manually
return self._filter(context, images)
def detail(self, context):
images = self.service.detail(context)
# FIXME(vish): detail doesn't filter so we do it manually
return self._filter(context, images)
@classmethod
def _is_visible(cls, context, image):
return (context.is_admin
or context.project_id == image['properties']['owner_id']
or image['properties']['is_public'] == 'True')
@classmethod
def _filter(cls, context, images):
filtered = []
for image in images:
if not cls._is_visible(context, image):
continue
filtered.append(image)
return filtered
def show(self, context, image_id):
image = self.service.show(context, image_id)
if not self._is_visible(context, image):
raise exception.NotFound
return image
def show_by_name(self, context, name):
image = self.service.show_by_name(context, name)
if not self._is_visible(context, image):
raise exception.NotFound
return image
@staticmethod
def _conn(context):
# TODO(vish): is there a better way to get creds to sign
# for the user?
access = manager.AuthManager().get_access_key(context.user,
context.project)
secret = str(context.user.secret)
@ -120,8 +119,152 @@ class S3ImageService(service.BaseImageService):
port=FLAGS.s3_port,
host=FLAGS.s3_host)
def _qs(self, params):
pairs = []
for key in params.keys():
pairs.append(key + '=' + urllib.quote(params[key]))
return '&'.join(pairs)
@staticmethod
def _download_file(bucket, filename, local_dir):
key = bucket.get_key(filename)
local_filename = os.path.join(local_dir, filename)
key.get_contents_to_filename(local_filename)
return local_filename
def _s3_create(self, context, metadata):
"""Gets a manifext from s3 and makes an image"""
image_path = tempfile.mkdtemp(dir=FLAGS.image_decryption_dir)
image_location = metadata['properties']['image_location']
bucket_name = image_location.split("/")[0]
manifest_path = image_location[len(bucket_name) + 1:]
bucket = self._conn(context).get_bucket(bucket_name)
key = bucket.get_key(manifest_path)
manifest = key.get_contents_as_string()
manifest = ElementTree.fromstring(manifest)
image_format = 'ami'
image_type = 'machine'
try:
kernel_id = manifest.find("machine_configuration/kernel_id").text
if kernel_id == 'true':
image_format = 'aki'
image_type = 'kernel'
kernel_id = None
except Exception:
kernel_id = None
try:
ramdisk_id = manifest.find("machine_configuration/ramdisk_id").text
if ramdisk_id == 'true':
image_format = 'ari'
image_type = 'ramdisk'
ramdisk_id = None
except Exception:
ramdisk_id = None
try:
arch = manifest.find("machine_configuration/architecture").text
except Exception:
arch = 'x86_64'
properties = metadata['properties']
properties['owner_id'] = context.project_id
properties['architecture'] = arch
if kernel_id:
properties['kernel_id'] = ec2utils.ec2_id_to_id(kernel_id)
if ramdisk_id:
properties['ramdisk_id'] = ec2utils.ec2_id_to_id(ramdisk_id)
properties['is_public'] = False
properties['type'] = image_type
metadata.update({'disk_format': image_format,
'container_format': image_format,
'status': 'queued',
'is_public': True,
'properties': properties})
metadata['properties']['image_state'] = 'pending'
image = self.service.create(context, metadata)
image_id = image['id']
def delayed_create():
"""This handles the fetching and decrypting of the part files."""
parts = []
for fn_element in manifest.find("image").getiterator("filename"):
part = self._download_file(bucket, fn_element.text, image_path)
parts.append(part)
# NOTE(vish): this may be suboptimal, should we use cat?
encrypted_filename = os.path.join(image_path, 'image.encrypted')
with open(encrypted_filename, 'w') as combined:
for filename in parts:
with open(filename) as part:
shutil.copyfileobj(part, combined)
metadata['properties']['image_state'] = 'decrypting'
self.service.update(context, image_id, metadata)
hex_key = manifest.find("image/ec2_encrypted_key").text
encrypted_key = binascii.a2b_hex(hex_key)
hex_iv = manifest.find("image/ec2_encrypted_iv").text
encrypted_iv = binascii.a2b_hex(hex_iv)
# FIXME(vish): grab key from common service so this can run on
# any host.
cloud_pk = os.path.join(FLAGS.ca_path, "private/cakey.pem")
decrypted_filename = os.path.join(image_path, 'image.tar.gz')
self._decrypt_image(encrypted_filename, encrypted_key,
encrypted_iv, cloud_pk, decrypted_filename)
metadata['properties']['image_state'] = 'untarring'
self.service.update(context, image_id, metadata)
unz_filename = self._untarzip_image(image_path, decrypted_filename)
metadata['properties']['image_state'] = 'uploading'
with open(unz_filename) as image_file:
self.service.update(context, image_id, metadata, image_file)
metadata['properties']['image_state'] = 'available'
self.service.update(context, image_id, metadata)
shutil.rmtree(image_path)
eventlet.spawn_n(delayed_create)
return image
@staticmethod
def _decrypt_image(encrypted_filename, encrypted_key, encrypted_iv,
cloud_private_key, decrypted_filename):
key, err = utils.execute(
'openssl rsautl -decrypt -inkey %s' % cloud_private_key,
process_input=encrypted_key,
check_exit_code=False)
if err:
raise exception.Error(_("Failed to decrypt private key: %s")
% err)
iv, err = utils.execute(
'openssl rsautl -decrypt -inkey %s' % cloud_private_key,
process_input=encrypted_iv,
check_exit_code=False)
if err:
raise exception.Error(_("Failed to decrypt initialization "
"vector: %s") % err)
_out, err = utils.execute(
'openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s'
% (encrypted_filename, key, iv, decrypted_filename),
check_exit_code=False)
if err:
raise exception.Error(_("Failed to decrypt image file "
"%(image_file)s: %(err)s") %
{'image_file': encrypted_filename,
'err': err})
@staticmethod
def _untarzip_image(path, filename):
tar_file = tarfile.open(filename, "r|gz")
tar_file.extractall(path)
image_file = tar_file.getnames()[0]
tar_file.close()
return os.path.join(path, image_file)

View File

@ -56,9 +56,9 @@ class BaseImageService(object):
"""
raise NotImplementedError
def show(self, context, id):
def show(self, context, image_id):
"""
Returns a dict containing image data for the given opaque image id.
Returns a dict containing image metadata for the given opaque image id.
:retval a mapping with the following signature:
@ -76,17 +76,27 @@ class BaseImageService(object):
"""
raise NotImplementedError
def create(self, context, data):
def get(self, context, data):
"""
Store the image data and return the new image id.
Returns a dict containing image metadata and writes image data to data.
:param data: a file-like object to hold binary image data
:raises NotFound if the image does not exist
"""
raise NotImplementedError
def create(self, context, metadata, data=None):
"""
Store the image metadata and data and return the new image id.
:raises AlreadyExists if the image already exist.
"""
raise NotImplementedError
def update(self, context, image_id, data):
"""Replace the contents of the given image with the new data.
def update(self, context, image_id, metadata, data=None):
"""Update the given image with the new metadata and data.
:raises NotFound if the image does not exist.

View File

@ -65,113 +65,119 @@ flags.DEFINE_string('dmz_cidr', '10.128.0.0/24',
def metadata_forward():
"""Create forwarding rule for metadata"""
_confirm_rule("PREROUTING", "-t nat -s 0.0.0.0/0 "
"-d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT "
"--to-destination %s:%s" % (FLAGS.ec2_dmz_host, FLAGS.ec2_port))
_confirm_rule("PREROUTING", '-t', 'nat', '-s', '0.0.0.0/0',
'-d', '169.254.169.254/32', '-p', 'tcp', '-m', 'tcp',
'--dport', '80', '-j', 'DNAT',
'--to-destination',
'%s:%s' % (FLAGS.ec2_dmz_host, FLAGS.ec2_port))
def init_host():
"""Basic networking setup goes here"""
if FLAGS.use_nova_chains:
_execute("sudo iptables -N nova_input", check_exit_code=False)
_execute("sudo iptables -D %s -j nova_input" % FLAGS.input_chain,
_execute('sudo', 'iptables', '-N', 'nova_input', check_exit_code=False)
_execute('sudo', 'iptables', '-D', FLAGS.input_chain,
'-j', 'nova_input',
check_exit_code=False)
_execute("sudo iptables -A %s -j nova_input" % FLAGS.input_chain)
_execute("sudo iptables -N nova_forward", check_exit_code=False)
_execute("sudo iptables -D FORWARD -j nova_forward",
_execute('sudo', 'iptables', '-A', FLAGS.input_chain,
'-j', 'nova_input')
_execute('sudo', 'iptables', '-N', 'nova_forward',
check_exit_code=False)
_execute("sudo iptables -A FORWARD -j nova_forward")
_execute("sudo iptables -N nova_output", check_exit_code=False)
_execute("sudo iptables -D OUTPUT -j nova_output",
_execute('sudo', 'iptables', '-D', 'FORWARD', '-j', 'nova_forward',
check_exit_code=False)
_execute("sudo iptables -A OUTPUT -j nova_output")
_execute("sudo iptables -t nat -N nova_prerouting",
_execute('sudo', 'iptables', '-A', 'FORWARD', '-j', 'nova_forward')
_execute('sudo', 'iptables', '-N', 'nova_output',
check_exit_code=False)
_execute("sudo iptables -t nat -D PREROUTING -j nova_prerouting",
_execute('sudo', 'iptables', '-D', 'OUTPUT', '-j', 'nova_output',
check_exit_code=False)
_execute("sudo iptables -t nat -A PREROUTING -j nova_prerouting")
_execute("sudo iptables -t nat -N nova_postrouting",
_execute('sudo', 'iptables', '-A', 'OUTPUT', '-j', 'nova_output')
_execute('sudo', 'iptables', '-t', 'nat', '-N', 'nova_prerouting',
check_exit_code=False)
_execute("sudo iptables -t nat -D POSTROUTING -j nova_postrouting",
_execute('sudo', 'iptables', '-t', 'nat', '-D', 'PREROUTING',
'-j', 'nova_prerouting', check_exit_code=False)
_execute('sudo', 'iptables', '-t', 'nat', '-A', 'PREROUTING',
'-j', 'nova_prerouting')
_execute('sudo', 'iptables', '-t', 'nat', '-N', 'nova_postrouting',
check_exit_code=False)
_execute("sudo iptables -t nat -A POSTROUTING -j nova_postrouting")
_execute("sudo iptables -t nat -N nova_snatting",
_execute('sudo', 'iptables', '-t', 'nat', '-D', 'POSTROUTING',
'-j', 'nova_postrouting', check_exit_code=False)
_execute('sudo', 'iptables', '-t', 'nat', '-A', 'POSTROUTING',
'-j', 'nova_postrouting')
_execute('sudo', 'iptables', '-t', 'nat', '-N', 'nova_snatting',
check_exit_code=False)
_execute("sudo iptables -t nat -D POSTROUTING -j nova_snatting",
_execute('sudo', 'iptables', '-t', 'nat', '-D', 'POSTROUTING',
'-j nova_snatting', check_exit_code=False)
_execute('sudo', 'iptables', '-t', 'nat', '-A', 'POSTROUTING',
'-j', 'nova_snatting')
_execute('sudo', 'iptables', '-t', 'nat', '-N', 'nova_output',
check_exit_code=False)
_execute("sudo iptables -t nat -A POSTROUTING -j nova_snatting")
_execute("sudo iptables -t nat -N nova_output", check_exit_code=False)
_execute("sudo iptables -t nat -D OUTPUT -j nova_output",
check_exit_code=False)
_execute("sudo iptables -t nat -A OUTPUT -j nova_output")
_execute('sudo', 'iptables', '-t', 'nat', '-D', 'OUTPUT',
'-j nova_output', check_exit_code=False)
_execute('sudo', 'iptables', '-t', 'nat', '-A', 'OUTPUT',
'-j', 'nova_output')
else:
# NOTE(vish): This makes it easy to ensure snatting rules always
# come after the accept rules in the postrouting chain
_execute("sudo iptables -t nat -N SNATTING",
_execute('sudo', 'iptables', '-t', 'nat', '-N', 'SNATTING',
check_exit_code=False)
_execute("sudo iptables -t nat -D POSTROUTING -j SNATTING",
check_exit_code=False)
_execute("sudo iptables -t nat -A POSTROUTING -j SNATTING")
_execute('sudo', 'iptables', '-t', 'nat', '-D', 'POSTROUTING',
'-j', 'SNATTING', check_exit_code=False)
_execute('sudo', 'iptables', '-t', 'nat', '-A', 'POSTROUTING',
'-j', 'SNATTING')
# NOTE(devcamcar): Cloud public SNAT entries and the default
# SNAT rule for outbound traffic.
_confirm_rule("SNATTING", "-t nat -s %s "
"-j SNAT --to-source %s"
% (FLAGS.fixed_range, FLAGS.routing_source_ip), append=True)
_confirm_rule("SNATTING", '-t', 'nat', '-s', FLAGS.fixed_range,
'-j', 'SNAT', '--to-source', FLAGS.routing_source_ip,
append=True)
_confirm_rule("POSTROUTING", "-t nat -s %s -d %s -j ACCEPT" %
(FLAGS.fixed_range, FLAGS.dmz_cidr))
_confirm_rule("POSTROUTING", "-t nat -s %(range)s -d %(range)s -j ACCEPT" %
{'range': FLAGS.fixed_range})
_confirm_rule("POSTROUTING", '-t', 'nat', '-s', FLAGS.fixed_range,
'-d', FLAGS.dmz_cidr, '-j', 'ACCEPT')
_confirm_rule("POSTROUTING", '-t', 'nat', '-s', FLAGS.fixed_range,
'-d', FLAGS.fixed_range, '-j', 'ACCEPT')
def bind_floating_ip(floating_ip, check_exit_code=True):
"""Bind ip to public interface"""
_execute("sudo ip addr add %s dev %s" % (floating_ip,
FLAGS.public_interface),
_execute('sudo', 'ip', 'addr', 'add', floating_ip,
'dev', FLAGS.public_interface,
check_exit_code=check_exit_code)
def unbind_floating_ip(floating_ip):
"""Unbind a public ip from public interface"""
_execute("sudo ip addr del %s dev %s" % (floating_ip,
FLAGS.public_interface))
_execute('sudo', 'ip', 'addr', 'del', floating_ip,
'dev', FLAGS.public_interface)
def ensure_vlan_forward(public_ip, port, private_ip):
"""Sets up forwarding rules for vlan"""
_confirm_rule("FORWARD", "-d %s -p udp --dport 1194 -j ACCEPT" %
private_ip)
_confirm_rule("PREROUTING",
"-t nat -d %s -p udp --dport %s -j DNAT --to %s:1194"
% (public_ip, port, private_ip))
_confirm_rule("FORWARD", '-d', private_ip, '-p', 'udp',
'--dport', '1194', '-j', 'ACCEPT')
_confirm_rule("PREROUTING", '-t', 'nat', '-d', public_ip, '-p', 'udp',
'--dport', port, '-j', 'DNAT', '--to', '%s:1194'
% private_ip)
def ensure_floating_forward(floating_ip, fixed_ip):
"""Ensure floating ip forwarding rule"""
_confirm_rule("PREROUTING", "-t nat -d %s -j DNAT --to %s"
% (floating_ip, fixed_ip))
_confirm_rule("OUTPUT", "-t nat -d %s -j DNAT --to %s"
% (floating_ip, fixed_ip))
_confirm_rule("SNATTING", "-t nat -s %s -j SNAT --to %s"
% (fixed_ip, floating_ip))
_confirm_rule("PREROUTING", '-t', 'nat', '-d', floating_ip, '-j', 'DNAT',
'--to', fixed_ip)
_confirm_rule("OUTPUT", '-t', 'nat', '-d', floating_ip, '-j', 'DNAT',
'--to', fixed_ip)
_confirm_rule("SNATTING", '-t', 'nat', '-s', fixed_ip, '-j', 'SNAT',
'--to', floating_ip)
def remove_floating_forward(floating_ip, fixed_ip):
"""Remove forwarding for floating ip"""
_remove_rule("PREROUTING", "-t nat -d %s -j DNAT --to %s"
% (floating_ip, fixed_ip))
_remove_rule("OUTPUT", "-t nat -d %s -j DNAT --to %s"
% (floating_ip, fixed_ip))
_remove_rule("SNATTING", "-t nat -s %s -j SNAT --to %s"
% (fixed_ip, floating_ip))
_remove_rule("PREROUTING", '-t', 'nat', '-d', floating_ip, '-j', 'DNAT',
'--to', fixed_ip)
_remove_rule("OUTPUT", '-t', 'nat', '-d', floating_ip, '-j', 'DNAT',
'--to', fixed_ip)
_remove_rule("SNATTING", '-t', 'nat', '-s', fixed_ip, '-j', 'SNAT',
'--to', floating_ip)
def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
@ -185,9 +191,9 @@ def ensure_vlan(vlan_num):
interface = "vlan%s" % vlan_num
if not _device_exists(interface):
LOG.debug(_("Starting VLAN inteface %s"), interface)
_execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD")
_execute("sudo vconfig add %s %s" % (FLAGS.vlan_interface, vlan_num))
_execute("sudo ip link set %s up" % interface)
_execute('sudo', 'vconfig', 'set_name_type', 'VLAN_PLUS_VID_NO_PAD')
_execute('sudo', 'vconfig', 'add', FLAGS.vlan_interface, vlan_num)
_execute('sudo', 'ip', 'link', 'set', interface, 'up')
return interface
@ -206,52 +212,57 @@ def ensure_bridge(bridge, interface, net_attrs=None):
"""
if not _device_exists(bridge):
LOG.debug(_("Starting Bridge interface for %s"), interface)
_execute("sudo brctl addbr %s" % bridge)
_execute("sudo brctl setfd %s 0" % bridge)
_execute('sudo', 'brctl', 'addbr', bridge)
_execute('sudo', 'brctl', 'setfd', bridge, 0)
# _execute("sudo brctl setageing %s 10" % bridge)
_execute("sudo brctl stp %s off" % bridge)
_execute("sudo ip link set %s up" % bridge)
_execute('sudo', 'brctl', 'stp', bridge, 'off')
_execute('sudo', 'ip', 'link', 'set', bridge, up)
if net_attrs:
# NOTE(vish): The ip for dnsmasq has to be the first address on the
# bridge for it to respond to reqests properly
suffix = net_attrs['cidr'].rpartition('/')[2]
out, err = _execute("sudo ip addr add %s/%s brd %s dev %s" %
(net_attrs['gateway'],
suffix,
net_attrs['broadcast'],
bridge),
out, err = _execute('sudo', 'ip', 'addr', 'add',
"%s/%s" %
(net_attrs['gateway'], suffix),
'brd',
net_attrs['broadcast'],
'dev',
bridge,
check_exit_code=False)
if err and err != "RTNETLINK answers: File exists\n":
raise exception.Error("Failed to add ip: %s" % err)
if(FLAGS.use_ipv6):
_execute("sudo ip -f inet6 addr change %s dev %s" %
(net_attrs['cidr_v6'], bridge))
_execute('sudo', 'ip', '-f', 'inet6', 'addr',
'change', net_attrs['cidr_v6'],
'dev', bridge)
# NOTE(vish): If the public interface is the same as the
# bridge, then the bridge has to be in promiscuous
# to forward packets properly.
if(FLAGS.public_interface == bridge):
_execute("sudo ip link set dev %s promisc on" % bridge)
_execute('sudo', 'ip', 'link', 'set',
'dev', bridge, 'promisc', 'on')
if interface:
# NOTE(vish): This will break if there is already an ip on the
# interface, so we move any ips to the bridge
gateway = None
out, err = _execute("sudo route -n")
out, err = _execute('sudo', 'route', '-n')
for line in out.split("\n"):
fields = line.split()
if fields and fields[0] == "0.0.0.0" and fields[-1] == interface:
gateway = fields[1]
out, err = _execute("sudo ip addr show dev %s scope global" %
interface)
out, err = _execute('sudo', 'ip', 'addr', 'show', 'dev', interface,
'scope', 'global')
for line in out.split("\n"):
fields = line.split()
if fields and fields[0] == "inet":
params = ' '.join(fields[1:-1])
_execute("sudo ip addr del %s dev %s" % (params, fields[-1]))
_execute("sudo ip addr add %s dev %s" % (params, bridge))
_execute('sudo', 'ip', 'addr',
'del', params, 'dev', fields[-1])
_execute('sudo', 'ip', 'addr',
'add', params, 'dev', bridge)
if gateway:
_execute("sudo route add 0.0.0.0 gw %s" % gateway)
out, err = _execute("sudo brctl addif %s %s" %
(bridge, interface),
_execute('sudo', 'route', 'add', '0.0.0.0', 'gw', gateway)
out, err = _execute('sudo', 'brctl', 'addif', bridge, interface,
check_exit_code=False)
if (err and err != "device %s is already a member of a bridge; can't "
@ -259,18 +270,18 @@ def ensure_bridge(bridge, interface, net_attrs=None):
raise exception.Error("Failed to add interface: %s" % err)
if FLAGS.use_nova_chains:
(out, err) = _execute("sudo iptables -N nova_forward",
(out, err) = _execute('sudo', 'iptables', '-N', 'nova_forward',
check_exit_code=False)
if err != 'iptables: Chain already exists.\n':
# NOTE(vish): chain didn't exist link chain
_execute("sudo iptables -D FORWARD -j nova_forward",
_execute('sudo', 'iptables', '-D', 'FORWARD', '-j', 'nova_forward',
check_exit_code=False)
_execute("sudo iptables -A FORWARD -j nova_forward")
_execute('sudo', 'iptables', '-A', 'FORWARD', '-j', 'nova_forward')
_confirm_rule("FORWARD", "--in-interface %s -j ACCEPT" % bridge)
_confirm_rule("FORWARD", "--out-interface %s -j ACCEPT" % bridge)
_execute("sudo iptables -N nova-local", check_exit_code=False)
_confirm_rule("FORWARD", "-j nova-local")
_confirm_rule("FORWARD", '--in-interface', bridge, '-j', 'ACCEPT')
_confirm_rule("FORWARD", '--out-interface', bridge, '-j', 'ACCEPT')
_execute('sudo', 'iptables', '-N', 'nova-local', check_exit_code=False)
_confirm_rule("FORWARD", '-j', 'nova-local')
def get_dhcp_hosts(context, network_id):
@ -304,11 +315,11 @@ def update_dhcp(context, network_id):
# if dnsmasq is already running, then tell it to reload
if pid:
out, _err = _execute('cat /proc/%d/cmdline' % pid,
out, _err = _execute('cat', "/proc/%d/cmdline" % pid,
check_exit_code=False)
if conffile in out:
try:
_execute('sudo kill -HUP %d' % pid)
_execute('sudo', 'kill', '-HUP', pid)
return
except Exception as exc: # pylint: disable-msg=W0703
LOG.debug(_("Hupping dnsmasq threw %s"), exc)
@ -349,11 +360,11 @@ interface %s
# if radvd is already running, then tell it to reload
if pid:
out, _err = _execute('cat /proc/%d/cmdline'
out, _err = _execute('cat', '/proc/%d/cmdline'
% pid, check_exit_code=False)
if conffile in out:
try:
_execute('sudo kill %d' % pid)
_execute('sudo', 'kill', pid)
except Exception as exc: # pylint: disable-msg=W0703
LOG.debug(_("killing radvd threw %s"), exc)
else:
@ -374,23 +385,24 @@ def _host_dhcp(fixed_ip_ref):
fixed_ip_ref['address'])
def _execute(cmd, *args, **kwargs):
def _execute(*cmd, **kwargs):
"""Wrapper around utils._execute for fake_network"""
if FLAGS.fake_network:
LOG.debug("FAKE NET: %s", cmd)
LOG.debug("FAKE NET: %s", " ".join(map(str, cmd)))
return "fake", 0
else:
return utils.execute(cmd, *args, **kwargs)
return utils.execute(*cmd, **kwargs)
def _device_exists(device):
"""Check if ethernet device exists"""
(_out, err) = _execute("ip link show dev %s" % device,
(_out, err) = _execute('ip', 'link', 'show', 'dev', device,
check_exit_code=False)
return not err
def _confirm_rule(chain, cmd, append=False):
def _confirm_rule(chain, *cmd, **kwargs):
append = kwargs.get('append', False)
"""Delete and re-add iptables rule"""
if FLAGS.use_nova_chains:
chain = "nova_%s" % chain.lower()
@ -398,16 +410,16 @@ def _confirm_rule(chain, cmd, append=False):
loc = "-A"
else:
loc = "-I"
_execute("sudo iptables --delete %s %s" % (chain, cmd),
_execute('sudo', 'iptables', '--delete', chain, *cmd,
check_exit_code=False)
_execute("sudo iptables %s %s %s" % (loc, chain, cmd))
_execute('sudo', 'iptables', loc, chain, *cmd)
def _remove_rule(chain, cmd):
def _remove_rule(chain, *cmd):
"""Remove iptables rule"""
if FLAGS.use_nova_chains:
chain = "%s" % chain.lower()
_execute("sudo iptables --delete %s %s" % (chain, cmd))
_execute('sudo', 'iptables', '--delete', chain, *cmd)
def _dnsmasq_cmd(net):
@ -444,7 +456,7 @@ def _stop_dnsmasq(network):
if pid:
try:
_execute('sudo kill -TERM %d' % pid)
_execute('sudo', 'kill', '-TERM', pid)
except Exception as exc: # pylint: disable-msg=W0703
LOG.debug(_("Killing dnsmasq threw %s"), exc)

View File

@ -563,6 +563,16 @@ class VlanManager(NetworkManager):
# NOTE(vish): This makes ports unique accross the cloud, a more
# robust solution would be to make them unique per ip
net['vpn_public_port'] = vpn_start + index
network_ref = None
try:
network_ref = db.network_get_by_cidr(context, cidr)
except exception.NotFound:
pass
if network_ref is not None:
raise ValueError(_('Network with cidr %s already exists' %
cidr))
network_ref = self.db.network_create_safe(context, net)
if network_ref:
self._create_fixed_ips(context, network_ref['id'])

View File

@ -37,8 +37,7 @@ from nova.objectstore import bucket
FLAGS = flags.FLAGS
flags.DEFINE_string('images_path', '$state_path/images',
'path to decrypted images')
flags.DECLARE('images_path', 'nova.image.local')
class Image(object):

View File

@ -2,6 +2,7 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -39,6 +40,7 @@ from nova import flags
from nova import rpc
from nova import utils
from nova import version
from nova import wsgi
FLAGS = flags.FLAGS
@ -48,6 +50,14 @@ flags.DEFINE_integer('report_interval', 10,
flags.DEFINE_integer('periodic_interval', 60,
'seconds between running periodic tasks',
lower_bound=1)
flags.DEFINE_string('ec2_listen', "0.0.0.0",
'IP address for EC2 API to listen')
flags.DEFINE_integer('ec2_listen_port', 8773, 'port for ec2 api to listen')
flags.DEFINE_string('osapi_listen', "0.0.0.0",
'IP address for OpenStack API to listen')
flags.DEFINE_integer('osapi_listen_port', 8774, 'port for os api to listen')
flags.DEFINE_string('api_paste_config', "api-paste.ini",
'File name for the paste.deploy config for nova-api')
class Service(object):
@ -213,6 +223,41 @@ class Service(object):
logging.exception(_("model server went away"))
class WsgiService(object):
"""Base class for WSGI based services.
For each api you define, you must also define these flags:
:<api>_listen: The address on which to listen
:<api>_listen_port: The port on which to listen
"""
def __init__(self, conf, apis):
self.conf = conf
self.apis = apis
self.wsgi_app = None
def start(self):
self.wsgi_app = _run_wsgi(self.conf, self.apis)
def wait(self):
self.wsgi_app.wait()
class ApiService(WsgiService):
"""Class for our nova-api service"""
@classmethod
def create(cls, conf=None):
if not conf:
conf = wsgi.paste_config_file(FLAGS.api_paste_config)
if not conf:
message = (_("No paste configuration found for: %s"),
FLAGS.api_paste_config)
raise exception.Error(message)
api_endpoints = ['ec2', 'osapi']
service = cls(conf, api_endpoints)
return service
def serve(*services):
try:
if not services:
@ -242,3 +287,46 @@ def serve(*services):
def wait():
while True:
greenthread.sleep(5)
def serve_wsgi(cls, conf=None):
try:
service = cls.create(conf)
except Exception:
logging.exception('in WsgiService.create()')
raise
finally:
# After we've loaded up all our dynamic bits, check
# whether we should print help
flags.DEFINE_flag(flags.HelpFlag())
flags.DEFINE_flag(flags.HelpshortFlag())
flags.DEFINE_flag(flags.HelpXMLFlag())
FLAGS.ParseNewFlags()
service.start()
return service
def _run_wsgi(paste_config_file, apis):
logging.debug(_("Using paste.deploy config at: %s"), paste_config_file)
apps = []
for api in apis:
config = wsgi.load_paste_configuration(paste_config_file, api)
if config is None:
logging.debug(_("No paste configuration for app: %s"), api)
continue
logging.debug(_("App Config: %(api)s\n%(config)r") % locals())
logging.info(_("Running %s API"), api)
app = wsgi.load_paste_app(paste_config_file, api)
apps.append((app, getattr(FLAGS, "%s_listen_port" % api),
getattr(FLAGS, "%s_listen" % api)))
if len(apps) == 0:
logging.error(_("No known API applications configured in %s."),
paste_config_file)
return
server = wsgi.Server()
for app in apps:
server.start(*app)
return server

View File

@ -28,6 +28,7 @@ def webob_factory(url):
def web_request(url, method=None, body=None):
req = webob.Request.blank("%s%s" % (base_url, url))
if method:
req.content_type = "application/json"
req.method = method
if body:
req.body = json.dumps(body)

View File

@ -25,6 +25,7 @@ import webob.dec
from paste import urlmap
from glance import client as glance_client
from glance.common import exception as glance_exc
from nova import auth
from nova import context
@ -149,25 +150,26 @@ def stub_out_glance(stubs, initial_fixtures=None):
for f in self.fixtures:
if f['id'] == image_id:
return f
return None
raise glance_exc.NotFound
def fake_add_image(self, image_meta):
def fake_add_image(self, image_meta, data=None):
id = ''.join(random.choice(string.letters) for _ in range(20))
image_meta['id'] = id
self.fixtures.append(image_meta)
return id
return image_meta
def fake_update_image(self, image_id, image_meta):
def fake_update_image(self, image_id, image_meta, data=None):
f = self.fake_get_image_meta(image_id)
if not f:
raise exc.NotFound
raise glance_exc.NotFound
f.update(image_meta)
return f
def fake_delete_image(self, image_id):
f = self.fake_get_image_meta(image_id)
if not f:
raise exc.NotFound
raise glance_exc.NotFound
self.fixtures.remove(f)

View File

@ -79,20 +79,14 @@ class LimiterTest(test.TestCase):
Test offset key works with a blank offset.
"""
req = Request.blank('/?offset=')
self.assertEqual(limited(self.tiny, req), self.tiny)
self.assertEqual(limited(self.small, req), self.small)
self.assertEqual(limited(self.medium, req), self.medium)
self.assertEqual(limited(self.large, req), self.large[:1000])
self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req)
def test_limiter_offset_bad(self):
"""
Test offset key works with a BAD offset.
"""
req = Request.blank(u'/?offset=\u0020aa')
self.assertEqual(limited(self.tiny, req), self.tiny)
self.assertEqual(limited(self.small, req), self.small)
self.assertEqual(limited(self.medium, req), self.medium)
self.assertEqual(limited(self.large, req), self.large[:1000])
self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req)
def test_limiter_nothing(self):
"""
@ -166,18 +160,12 @@ class LimiterTest(test.TestCase):
"""
Test a negative limit.
"""
def _limit_large():
limited(self.large, req, max_limit=2000)
req = Request.blank('/?limit=-3000')
self.assertRaises(webob.exc.HTTPBadRequest, _limit_large)
self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req)
def test_limiter_negative_offset(self):
"""
Test a negative offset.
"""
def _limit_large():
limited(self.large, req, max_limit=2000)
req = Request.blank('/?offset=-30')
self.assertRaises(webob.exc.HTTPBadRequest, _limit_large)
self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req)

View File

@ -22,6 +22,8 @@ and as a WSGI layer
import json
import datetime
import shutil
import tempfile
import stubout
import webob
@ -54,7 +56,7 @@ class BaseImageServiceTests(object):
num_images = len(self.service.index(self.context))
id = self.service.create(self.context, fixture)
id = self.service.create(self.context, fixture)['id']
self.assertNotEquals(None, id)
self.assertEquals(num_images + 1,
@ -71,7 +73,7 @@ class BaseImageServiceTests(object):
num_images = len(self.service.index(self.context))
id = self.service.create(self.context, fixture)
id = self.service.create(self.context, fixture)['id']
self.assertNotEquals(None, id)
@ -89,7 +91,7 @@ class BaseImageServiceTests(object):
'instance_id': None,
'progress': None}
id = self.service.create(self.context, fixture)
id = self.service.create(self.context, fixture)['id']
fixture['status'] = 'in progress'
@ -118,7 +120,7 @@ class BaseImageServiceTests(object):
ids = []
for fixture in fixtures:
new_id = self.service.create(self.context, fixture)
new_id = self.service.create(self.context, fixture)['id']
ids.append(new_id)
num_images = len(self.service.index(self.context))
@ -137,14 +139,15 @@ class LocalImageServiceTest(test.TestCase,
def setUp(self):
super(LocalImageServiceTest, self).setUp()
self.tempdir = tempfile.mkdtemp()
self.flags(images_path=self.tempdir)
self.stubs = stubout.StubOutForTesting()
service_class = 'nova.image.local.LocalImageService'
self.service = utils.import_object(service_class)
self.context = context.RequestContext(None, None)
def tearDown(self):
self.service.delete_all()
self.service.delete_imagedir()
shutil.rmtree(self.tempdir)
self.stubs.UnsetAll()
super(LocalImageServiceTest, self).tearDown()

View File

@ -188,9 +188,37 @@ class ServersTest(test.TestCase):
self.assertEqual(s.get('imageId', None), None)
i += 1
def test_get_servers_with_limit(self):
req = webob.Request.blank('/v1.0/servers?limit=3')
res = req.get_response(fakes.wsgi_app())
servers = json.loads(res.body)['servers']
self.assertEqual([s['id'] for s in servers], [0, 1, 2])
req = webob.Request.blank('/v1.0/servers?limit=aaa')
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
self.assertTrue('limit' in res.body)
def test_get_servers_with_offset(self):
req = webob.Request.blank('/v1.0/servers?offset=2')
res = req.get_response(fakes.wsgi_app())
servers = json.loads(res.body)['servers']
self.assertEqual([s['id'] for s in servers], [2, 3, 4])
req = webob.Request.blank('/v1.0/servers?offset=aaa')
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
self.assertTrue('offset' in res.body)
def test_get_servers_with_limit_and_offset(self):
req = webob.Request.blank('/v1.0/servers?limit=2&offset=1')
res = req.get_response(fakes.wsgi_app())
servers = json.loads(res.body)['servers']
self.assertEqual([s['id'] for s in servers], [1, 2])
def test_create_instance(self):
def instance_create(context, inst):
return {'id': '1', 'display_name': ''}
return {'id': '1', 'display_name': 'server_test'}
def server_update(context, id, params):
return instance_create(context, id)
@ -231,9 +259,16 @@ class ServersTest(test.TestCase):
req = webob.Request.blank('/v1.0/servers')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["Content-Type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
server = json.loads(res.body)['server']
self.assertEqual('serv', server['adminPass'][:4])
self.assertEqual(16, len(server['adminPass']))
self.assertEqual('server_test', server['name'])
self.assertEqual('1', server['id'])
self.assertEqual(res.status_int, 200)
def test_update_no_body(self):

View File

@ -86,24 +86,27 @@ class ZonesTest(test.TestCase):
def test_get_zone_list(self):
req = webob.Request.blank('/v1.0/zones')
req.headers["Content-Type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 200)
res_dict = json.loads(res.body)
self.assertEqual(len(res_dict['zones']), 2)
def test_get_zone_by_id(self):
req = webob.Request.blank('/v1.0/zones/1')
req.headers["Content-Type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 200)
res_dict = json.loads(res.body)
self.assertEqual(res_dict['zone']['id'], 1)
self.assertEqual(res_dict['zone']['api_url'], 'http://foo.com')
self.assertFalse('password' in res_dict['zone'])
self.assertEqual(res.status_int, 200)
def test_zone_delete(self):
req = webob.Request.blank('/v1.0/zones/1')
req.headers["Content-Type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
@ -112,13 +115,14 @@ class ZonesTest(test.TestCase):
body = dict(zone=dict(api_url='http://blah.zoo', username='fred',
password='fubar'))
req = webob.Request.blank('/v1.0/zones')
req.headers["Content-Type"] = "application/json"
req.method = 'POST'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 200)
res_dict = json.loads(res.body)
self.assertEqual(res_dict['zone']['id'], 1)
self.assertEqual(res_dict['zone']['api_url'], 'http://blah.zoo')
self.assertFalse('username' in res_dict['zone'])
@ -126,13 +130,14 @@ class ZonesTest(test.TestCase):
def test_zone_update(self):
body = dict(zone=dict(username='zeb', password='sneaky'))
req = webob.Request.blank('/v1.0/zones/1')
req.headers["Content-Type"] = "application/json"
req.method = 'PUT'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 200)
res_dict = json.loads(res.body)
self.assertEqual(res_dict['zone']['id'], 1)
self.assertEqual(res_dict['zone']['api_url'], 'http://foo.com')
self.assertFalse('username' in res_dict['zone'])

View File

@ -21,11 +21,13 @@
Test WSGI basics and provide some helper functions for other WSGI tests.
"""
import json
from nova import test
import routes
import webob
from nova import exception
from nova import wsgi
@ -66,63 +68,164 @@ class Test(test.TestCase):
result = webob.Request.blank('/bad').get_response(Router())
self.assertNotEqual(result.body, "Router result")
def test_controller(self):
class Controller(wsgi.Controller):
"""Test controller to call from router."""
test = self
class ControllerTest(test.TestCase):
class TestRouter(wsgi.Router):
class TestController(wsgi.Controller):
_serialization_metadata = {
'application/xml': {
"attributes": {
"test": ["id"]}}}
def show(self, req, id): # pylint: disable-msg=W0622,C0103
"""Default action called for requests with an ID."""
self.test.assertEqual(req.path_info, '/tests/123')
self.test.assertEqual(id, '123')
return id
return {"test": {"id": id}}
class Router(wsgi.Router):
"""Test router."""
def __init__(self):
mapper = routes.Mapper()
mapper.resource("test", "tests", controller=self.TestController())
wsgi.Router.__init__(self, mapper)
def __init__(self):
mapper = routes.Mapper()
mapper.resource("test", "tests", controller=Controller())
super(Router, self).__init__(mapper)
def test_show(self):
request = wsgi.Request.blank('/tests/123')
result = request.get_response(self.TestRouter())
self.assertEqual(json.loads(result.body), {"test": {"id": "123"}})
result = webob.Request.blank('/tests/123').get_response(Router())
self.assertEqual(result.body, "123")
result = webob.Request.blank('/test/123').get_response(Router())
self.assertNotEqual(result.body, "123")
def test_response_content_type_from_accept_xml(self):
request = webob.Request.blank('/tests/123')
request.headers["Accept"] = "application/xml"
result = request.get_response(self.TestRouter())
self.assertEqual(result.headers["Content-Type"], "application/xml")
def test_response_content_type_from_accept_json(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/json"
result = request.get_response(self.TestRouter())
self.assertEqual(result.headers["Content-Type"], "application/json")
def test_response_content_type_from_query_extension_xml(self):
request = wsgi.Request.blank('/tests/123.xml')
result = request.get_response(self.TestRouter())
self.assertEqual(result.headers["Content-Type"], "application/xml")
def test_response_content_type_from_query_extension_json(self):
request = wsgi.Request.blank('/tests/123.json')
result = request.get_response(self.TestRouter())
self.assertEqual(result.headers["Content-Type"], "application/json")
def test_response_content_type_default_when_unsupported(self):
request = wsgi.Request.blank('/tests/123.unsupported')
request.headers["Accept"] = "application/unsupported1"
result = request.get_response(self.TestRouter())
self.assertEqual(result.status_int, 200)
self.assertEqual(result.headers["Content-Type"], "application/json")
class RequestTest(test.TestCase):
def test_request_content_type_missing(self):
request = wsgi.Request.blank('/tests/123')
request.body = "<body />"
self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type)
def test_request_content_type_unsupported(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "text/html"
request.body = "asdf<br />"
self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type)
def test_content_type_from_accept_xml(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/xml"
result = request.best_match_content_type()
self.assertEqual(result, "application/xml")
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/xml, application/json"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = \
"application/json; q=0.3, application/xml; q=0.9"
result = request.best_match_content_type()
self.assertEqual(result, "application/xml")
def test_content_type_from_query_extension(self):
request = wsgi.Request.blank('/tests/123.xml')
result = request.best_match_content_type()
self.assertEqual(result, "application/xml")
request = wsgi.Request.blank('/tests/123.json')
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
request = wsgi.Request.blank('/tests/123.invalid')
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_content_type_accept_and_query_extension(self):
request = wsgi.Request.blank('/tests/123.xml')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual(result, "application/xml")
def test_content_type_accept_default(self):
request = wsgi.Request.blank('/tests/123.unsupported')
request.headers["Accept"] = "application/unsupported1"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
class SerializerTest(test.TestCase):
def match(self, url, accept, expect):
def test_xml(self):
input_dict = dict(servers=dict(a=(2, 3)))
expected_xml = '<servers><a>(2,3)</a></servers>'
expected_json = '{"servers":{"a":[2,3]}}'
req = webob.Request.blank(url, headers=dict(Accept=accept))
result = wsgi.Serializer(req.environ).to_content_type(input_dict)
serializer = wsgi.Serializer()
result = serializer.serialize(input_dict, "application/xml")
result = result.replace('\n', '').replace(' ', '')
if expect == 'xml':
self.assertEqual(result, expected_xml)
elif expect == 'json':
self.assertEqual(result, expected_json)
else:
raise "Bad expect value"
self.assertEqual(result, expected_xml)
def test_basic(self):
self.match('/servers/4.json', None, expect='json')
self.match('/servers/4', 'application/json', expect='json')
self.match('/servers/4', 'application/xml', expect='xml')
self.match('/servers/4.xml', None, expect='xml')
def test_json(self):
input_dict = dict(servers=dict(a=(2, 3)))
expected_json = '{"servers":{"a":[2,3]}}'
serializer = wsgi.Serializer()
result = serializer.serialize(input_dict, "application/json")
result = result.replace('\n', '').replace(' ', '')
self.assertEqual(result, expected_json)
def test_defaults_to_json(self):
self.match('/servers/4', None, expect='json')
self.match('/servers/4', 'text/html', expect='json')
def test_unsupported_content_type(self):
serializer = wsgi.Serializer()
self.assertRaises(exception.InvalidContentType, serializer.serialize,
{}, "text/null")
def test_suffix_takes_precedence_over_accept_header(self):
self.match('/servers/4.xml', 'application/json', expect='xml')
self.match('/servers/4.xml.', 'application/json', expect='json')
def test_deserialize_json(self):
data = """{"a": {
"a1": "1",
"a2": "2",
"bs": ["1", "2", "3", {"c": {"c1": "1"}}],
"d": {"e": "1"},
"f": "1"}}"""
as_dict = dict(a={
'a1': '1',
'a2': '2',
'bs': ['1', '2', '3', {'c': dict(c1='1')}],
'd': {'e': '1'},
'f': '1'})
metadata = {}
serializer = wsgi.Serializer(metadata)
self.assertEqual(serializer.deserialize(data, "application/json"),
as_dict)
def test_deserialize(self):
def test_deserialize_xml(self):
xml = """
<a a1="1" a2="2">
<bs><b>1</b><b>2</b><b>3</b><b><c c1="1"/></b></bs>
@ -137,11 +240,13 @@ class SerializerTest(test.TestCase):
'd': {'e': '1'},
'f': '1'})
metadata = {'application/xml': dict(plurals={'bs': 'b', 'ts': 't'})}
serializer = wsgi.Serializer({}, metadata)
self.assertEqual(serializer.deserialize(xml), as_dict)
serializer = wsgi.Serializer(metadata)
self.assertEqual(serializer.deserialize(xml, "application/xml"),
as_dict)
def test_deserialize_empty_xml(self):
xml = """<a></a>"""
as_dict = {"a": {}}
serializer = wsgi.Serializer({})
self.assertEqual(serializer.deserialize(xml), as_dict)
serializer = wsgi.Serializer()
self.assertEqual(serializer.deserialize(xml, "application/xml"),
as_dict)

View File

@ -32,6 +32,7 @@ flags.DECLARE('fake_network', 'nova.network.manager')
FLAGS.network_size = 8
FLAGS.num_networks = 2
FLAGS.fake_network = True
FLAGS.image_service = 'nova.image.local.LocalImageService'
flags.DECLARE('num_shelves', 'nova.volume.driver')
flags.DECLARE('blades_per_shelf', 'nova.volume.driver')
flags.DECLARE('iscsi_num_targets', 'nova.volume.driver')

View File

@ -38,6 +38,8 @@ from nova import test
from nova.auth import manager
from nova.compute import power_state
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova.image import local
from nova.objectstore import image
@ -76,6 +78,12 @@ class CloudTestCase(test.TestCase):
project=self.project)
host = self.network.get_network_host(self.context.elevated())
def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
self.stubs.Set(local.LocalImageService, 'show', fake_show)
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
def tearDown(self):
network_ref = db.project_get_network(self.context,
self.project.id)
@ -122,7 +130,7 @@ class CloudTestCase(test.TestCase):
self.cloud.allocate_address(self.context)
inst = db.instance_create(self.context, {'host': self.compute.host})
fixed = self.network.allocate_fixed_ip(self.context, inst['id'])
ec2_id = cloud.id_to_ec2_id(inst['id'])
ec2_id = ec2utils.id_to_ec2_id(inst['id'])
self.cloud.associate_address(self.context,
instance_id=ec2_id,
public_ip=address)
@ -158,12 +166,12 @@ class CloudTestCase(test.TestCase):
vol2 = db.volume_create(self.context, {})
result = self.cloud.describe_volumes(self.context)
self.assertEqual(len(result['volumeSet']), 2)
volume_id = cloud.id_to_ec2_id(vol2['id'], 'vol-%08x')
volume_id = ec2utils.id_to_ec2_id(vol2['id'], 'vol-%08x')
result = self.cloud.describe_volumes(self.context,
volume_id=[volume_id])
self.assertEqual(len(result['volumeSet']), 1)
self.assertEqual(
cloud.ec2_id_to_id(result['volumeSet'][0]['volumeId']),
ec2utils.ec2_id_to_id(result['volumeSet'][0]['volumeId']),
vol2['id'])
db.volume_destroy(self.context, vol1['id'])
db.volume_destroy(self.context, vol2['id'])
@ -188,8 +196,10 @@ class CloudTestCase(test.TestCase):
def test_describe_instances(self):
"""Makes sure describe_instances works and filters results."""
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
'image_id': 1,
'host': 'host1'})
inst2 = db.instance_create(self.context, {'reservation_id': 'a',
'image_id': 1,
'host': 'host2'})
comp1 = db.service_create(self.context, {'host': 'host1',
'availability_zone': 'zone1',
@ -200,7 +210,7 @@ class CloudTestCase(test.TestCase):
result = self.cloud.describe_instances(self.context)
result = result['reservationSet'][0]
self.assertEqual(len(result['instancesSet']), 2)
instance_id = cloud.id_to_ec2_id(inst2['id'])
instance_id = ec2utils.id_to_ec2_id(inst2['id'])
result = self.cloud.describe_instances(self.context,
instance_id=[instance_id])
result = result['reservationSet'][0]
@ -215,10 +225,9 @@ class CloudTestCase(test.TestCase):
db.service_destroy(self.context, comp2['id'])
def test_console_output(self):
image_id = FLAGS.default_image
instance_type = FLAGS.default_instance_type
max_count = 1
kwargs = {'image_id': image_id,
kwargs = {'image_id': 'ami-1',
'instance_type': instance_type,
'max_count': max_count}
rv = self.cloud.run_instances(self.context, **kwargs)
@ -234,8 +243,7 @@ class CloudTestCase(test.TestCase):
greenthread.sleep(0.3)
def test_ajax_console(self):
image_id = FLAGS.default_image
kwargs = {'image_id': image_id}
kwargs = {'image_id': 'ami-1'}
rv = self.cloud.run_instances(self.context, **kwargs)
instance_id = rv['instancesSet'][0]['instanceId']
greenthread.sleep(0.3)
@ -347,7 +355,7 @@ class CloudTestCase(test.TestCase):
def test_update_of_instance_display_fields(self):
inst = db.instance_create(self.context, {})
ec2_id = cloud.id_to_ec2_id(inst['id'])
ec2_id = ec2utils.id_to_ec2_id(inst['id'])
self.cloud.update_instance(self.context, ec2_id,
display_name='c00l 1m4g3')
inst = db.instance_get(self.context, inst['id'])
@ -365,7 +373,7 @@ class CloudTestCase(test.TestCase):
def test_update_of_volume_display_fields(self):
vol = db.volume_create(self.context, {})
self.cloud.update_volume(self.context,
cloud.id_to_ec2_id(vol['id'], 'vol-%08x'),
ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x'),
display_name='c00l v0lum3')
vol = db.volume_get(self.context, vol['id'])
self.assertEqual('c00l v0lum3', vol['display_name'])
@ -374,7 +382,7 @@ class CloudTestCase(test.TestCase):
def test_update_of_volume_wont_update_private_fields(self):
vol = db.volume_create(self.context, {})
self.cloud.update_volume(self.context,
cloud.id_to_ec2_id(vol['id'], 'vol-%08x'),
ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x'),
mountpoint='/not/here')
vol = db.volume_get(self.context, vol['id'])
self.assertEqual(None, vol['mountpoint'])

View File

@ -36,7 +36,7 @@ from nova.compute import instance_types
from nova.compute import manager as compute_manager
from nova.compute import power_state
from nova.db.sqlalchemy import models
from nova.image import local
LOG = logging.getLogger('nova.tests.compute')
FLAGS = flags.FLAGS
@ -58,6 +58,11 @@ class ComputeTestCase(test.TestCase):
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.context = context.RequestContext('fake', 'fake', False)
def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
self.stubs.Set(local.LocalImageService, 'show', fake_show)
def tearDown(self):
self.manager.delete_user(self.user)
self.manager.delete_project(self.project)
@ -66,7 +71,7 @@ class ComputeTestCase(test.TestCase):
def _create_instance(self, params={}):
"""Create a test instance"""
inst = {}
inst['image_id'] = 'ami-test'
inst['image_id'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['user_id'] = self.user.id

View File

@ -57,7 +57,7 @@ class ConsoleTestCase(test.TestCase):
inst = {}
#inst['host'] = self.host
#inst['name'] = 'instance-1234'
inst['image_id'] = 'ami-test'
inst['image_id'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['user_id'] = self.user.id

View File

@ -59,6 +59,7 @@ class DirectTestCase(test.TestCase):
req.headers['X-OpenStack-User'] = 'user1'
req.headers['X-OpenStack-Project'] = 'proj1'
resp = req.get_response(self.auth_router)
self.assertEqual(resp.status_int, 200)
data = json.loads(resp.body)
self.assertEqual(data['user'], 'user1')
self.assertEqual(data['project'], 'proj1')
@ -69,6 +70,7 @@ class DirectTestCase(test.TestCase):
req.method = 'POST'
req.body = 'json=%s' % json.dumps({'data': 'foo'})
resp = req.get_response(self.router)
self.assertEqual(resp.status_int, 200)
resp_parsed = json.loads(resp.body)
self.assertEqual(resp_parsed['data'], 'foo')
@ -78,6 +80,7 @@ class DirectTestCase(test.TestCase):
req.method = 'POST'
req.body = 'data=foo'
resp = req.get_response(self.router)
self.assertEqual(resp.status_int, 200)
resp_parsed = json.loads(resp.body)
self.assertEqual(resp_parsed['data'], 'foo')
@ -90,8 +93,7 @@ class DirectTestCase(test.TestCase):
class DirectCloudTestCase(test_cloud.CloudTestCase):
def setUp(self):
super(DirectCloudTestCase, self).setUp()
compute_handle = compute.API(image_service=self.cloud.image_service,
network_api=self.cloud.network_api,
compute_handle = compute.API(network_api=self.cloud.network_api,
volume_api=self.cloud.volume_api)
direct.register_service('compute', compute_handle)
self.router = direct.JsonParamsMiddleware(direct.Router())

View File

@ -14,10 +14,12 @@
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
import select
from nova import test
from nova.utils import parse_mailmap, str_dict_replace
from nova.utils import parse_mailmap, str_dict_replace, synchronized
class ProjectTestCase(test.TestCase):
@ -55,3 +57,47 @@ class ProjectTestCase(test.TestCase):
'%r not listed in Authors' % missing)
finally:
tree.unlock()
class LockTestCase(test.TestCase):
def test_synchronized_wrapped_function_metadata(self):
@synchronized('whatever')
def foo():
"""Bar"""
pass
self.assertEquals(foo.__doc__, 'Bar', "Wrapped function's docstring "
"got lost")
self.assertEquals(foo.__name__, 'foo', "Wrapped function's name "
"got mangled")
def test_synchronized(self):
rpipe1, wpipe1 = os.pipe()
rpipe2, wpipe2 = os.pipe()
@synchronized('testlock')
def f(rpipe, wpipe):
try:
os.write(wpipe, "foo")
except OSError, e:
self.assertEquals(e.errno, errno.EPIPE)
return
rfds, _, __ = select.select([rpipe], [], [], 1)
self.assertEquals(len(rfds), 0, "The other process, which was"
" supposed to be locked, "
"wrote on its end of the "
"pipe")
os.close(rpipe)
pid = os.fork()
if pid > 0:
os.close(wpipe1)
os.close(rpipe2)
f(rpipe1, wpipe2)
else:
os.close(rpipe1)
os.close(wpipe2)
f(rpipe2, wpipe1)
os._exit(0)

View File

@ -343,13 +343,13 @@ def lease_ip(private_ip):
private_ip)
instance_ref = db.fixed_ip_get_instance(context.get_admin_context(),
private_ip)
cmd = "%s add %s %s fake" % (binpath('nova-dhcpbridge'),
instance_ref['mac_address'],
private_ip)
cmd = (binpath('nova-dhcpbridge'), 'add',
instance_ref['mac_address'],
private_ip, 'fake')
env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
'TESTING': '1',
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
(out, err) = utils.execute(cmd, addl_env=env)
(out, err) = utils.execute(*cmd, addl_env=env)
LOG.debug("ISSUE_IP: %s, %s ", out, err)
@ -359,11 +359,11 @@ def release_ip(private_ip):
private_ip)
instance_ref = db.fixed_ip_get_instance(context.get_admin_context(),
private_ip)
cmd = "%s del %s %s fake" % (binpath('nova-dhcpbridge'),
instance_ref['mac_address'],
private_ip)
cmd = (binpath('nova-dhcpbridge'), 'del',
instance_ref['mac_address'],
private_ip, 'fake')
env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
'TESTING': '1',
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
(out, err) = utils.execute(cmd, addl_env=env)
(out, err) = utils.execute(*cmd, addl_env=env)
LOG.debug("RELEASE_IP: %s, %s ", out, err)

View File

@ -20,11 +20,12 @@ from nova import compute
from nova import context
from nova import db
from nova import flags
from nova import network
from nova import quota
from nova import test
from nova import utils
from nova import volume
from nova.auth import manager
from nova.api.ec2 import cloud
from nova.compute import instance_types
@ -41,7 +42,6 @@ class QuotaTestCase(test.TestCase):
quota_gigabytes=20,
quota_floating_ips=1)
self.cloud = cloud.CloudController()
self.manager = manager.AuthManager()
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
self.project = self.manager.create_project('admin', 'admin', 'admin')
@ -57,7 +57,7 @@ class QuotaTestCase(test.TestCase):
def _create_instance(self, cores=2):
"""Create a test instance"""
inst = {}
inst['image_id'] = 'ami-test'
inst['image_id'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user.id
inst['project_id'] = self.project.id
@ -118,12 +118,12 @@ class QuotaTestCase(test.TestCase):
for i in range(FLAGS.quota_instances):
instance_id = self._create_instance()
instance_ids.append(instance_id)
self.assertRaises(quota.QuotaError, self.cloud.run_instances,
self.assertRaises(quota.QuotaError, compute.API().create,
self.context,
min_count=1,
max_count=1,
instance_type='m1.small',
image_id='fake')
image_id=1)
for instance_id in instance_ids:
db.instance_destroy(self.context, instance_id)
@ -131,12 +131,12 @@ class QuotaTestCase(test.TestCase):
instance_ids = []
instance_id = self._create_instance(cores=4)
instance_ids.append(instance_id)
self.assertRaises(quota.QuotaError, self.cloud.run_instances,
self.assertRaises(quota.QuotaError, compute.API().create,
self.context,
min_count=1,
max_count=1,
instance_type='m1.small',
image_id='fake')
image_id=1)
for instance_id in instance_ids:
db.instance_destroy(self.context, instance_id)
@ -145,9 +145,12 @@ class QuotaTestCase(test.TestCase):
for i in range(FLAGS.quota_volumes):
volume_id = self._create_volume()
volume_ids.append(volume_id)
self.assertRaises(quota.QuotaError, self.cloud.create_volume,
self.context,
size=10)
self.assertRaises(quota.QuotaError,
volume.API().create,
self.context,
size=10,
name='',
description='')
for volume_id in volume_ids:
db.volume_destroy(self.context, volume_id)
@ -156,9 +159,11 @@ class QuotaTestCase(test.TestCase):
volume_id = self._create_volume(size=20)
volume_ids.append(volume_id)
self.assertRaises(quota.QuotaError,
self.cloud.create_volume,
volume.API().create,
self.context,
size=10)
size=10,
name='',
description='')
for volume_id in volume_ids:
db.volume_destroy(self.context, volume_id)
@ -172,7 +177,8 @@ class QuotaTestCase(test.TestCase):
# make an rpc.call, the test just finishes with OK. It
# appears to be something in the magic inline callbacks
# that is breaking.
self.assertRaises(quota.QuotaError, self.cloud.allocate_address,
self.assertRaises(quota.QuotaError,
network.API().allocate_floating_ip,
self.context)
db.floating_ip_destroy(context.get_admin_context(), address)

View File

@ -255,7 +255,7 @@ class SimpleDriverTestCase(test.TestCase):
def _create_instance(self, **kwargs):
"""Create a test instance"""
inst = {}
inst['image_id'] = 'ami-test'
inst['image_id'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user.id
inst['project_id'] = self.project.id
@ -275,8 +275,6 @@ class SimpleDriverTestCase(test.TestCase):
def _create_volume(self):
"""Create a test volume"""
vol = {}
vol['image_id'] = 'ami-test'
vol['reservation_id'] = 'r-fakeres'
vol['size'] = 1
vol['availability_zone'] = 'test'
return db.volume_create(self.context, vol)['id']

View File

@ -14,7 +14,9 @@
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
import mox
import os
import sys
from xml.etree.ElementTree import fromstring as xml_to_tree
@ -38,6 +40,70 @@ FLAGS = flags.FLAGS
flags.DECLARE('instances_path', 'nova.compute.manager')
def _concurrency(wait, done, target):
wait.wait()
done.send()
class CacheConcurrencyTestCase(test.TestCase):
def setUp(self):
super(CacheConcurrencyTestCase, self).setUp()
def fake_exists(fname):
basedir = os.path.join(FLAGS.instances_path, '_base')
if fname == basedir:
return True
return False
def fake_execute(*args, **kwargs):
pass
self.stubs.Set(os.path, 'exists', fake_exists)
self.stubs.Set(utils, 'execute', fake_execute)
def test_same_fname_concurrency(self):
"""Ensures that the same fname cache runs at a sequentially"""
conn = libvirt_conn.LibvirtConnection
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
eventlet.spawn(conn._cache_image, _concurrency,
'target', 'fname', False, wait1, done1)
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
eventlet.spawn(conn._cache_image, _concurrency,
'target', 'fname', False, wait2, done2)
wait2.send()
eventlet.sleep(0)
try:
self.assertFalse(done2.ready())
self.assertTrue('fname' in conn._image_sems)
finally:
wait1.send()
done1.wait()
eventlet.sleep(0)
self.assertTrue(done2.ready())
self.assertFalse('fname' in conn._image_sems)
def test_different_fname_concurrency(self):
"""Ensures that two different fname caches are concurrent"""
conn = libvirt_conn.LibvirtConnection
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
eventlet.spawn(conn._cache_image, _concurrency,
'target', 'fname2', False, wait1, done1)
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
eventlet.spawn(conn._cache_image, _concurrency,
'target', 'fname1', False, wait2, done2)
wait2.send()
eventlet.sleep(0)
try:
self.assertTrue(done2.ready())
finally:
wait1.send()
eventlet.sleep(0)
class LibvirtConnTestCase(test.TestCase):
def setUp(self):
super(LibvirtConnTestCase, self).setUp()
@ -536,15 +602,16 @@ class IptablesFirewallTestCase(test.TestCase):
instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
# self.fw.add_instance(instance_ref)
def fake_iptables_execute(cmd, process_input=None):
if cmd == 'sudo ip6tables-save -t filter':
def fake_iptables_execute(*cmd, **kwargs):
process_input = kwargs.get('process_input', None)
if cmd == ('sudo', 'ip6tables-save', '-t', 'filter'):
return '\n'.join(self.in6_rules), None
if cmd == 'sudo iptables-save -t filter':
if cmd == ('sudo', 'iptables-save', '-t', 'filter'):
return '\n'.join(self.in_rules), None
if cmd == 'sudo iptables-restore':
if cmd == ('sudo', 'iptables-restore'):
self.out_rules = process_input.split('\n')
return '', ''
if cmd == 'sudo ip6tables-restore':
if cmd == ('sudo', 'ip6tables-restore'):
self.out6_rules = process_input.split('\n')
return '', ''
self.fw.execute = fake_iptables_execute

View File

@ -101,7 +101,7 @@ class VolumeTestCase(test.TestCase):
def test_run_attach_detach_volume(self):
"""Make sure volume can be attached and detached from instance."""
inst = {}
inst['image_id'] = 'ami-test'
inst['image_id'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['user_id'] = 'fake'

View File

@ -23,10 +23,14 @@ System-level utilities and helper functions.
import base64
import datetime
import functools
import inspect
import json
import lockfile
import netaddr
import os
import random
import re
import socket
import string
import struct
@ -34,20 +38,20 @@ import sys
import time
import types
from xml.sax import saxutils
import re
import netaddr
from eventlet import event
from eventlet import greenthread
from eventlet.green import subprocess
None
from nova import exception
from nova.exception import ProcessExecutionError
from nova import flags
from nova import log as logging
LOG = logging.getLogger("nova.utils")
TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
FLAGS = flags.FLAGS
def import_class(import_str):
@ -125,16 +129,24 @@ def fetchfile(url, target):
# c.perform()
# c.close()
# fp.close()
execute("curl --fail %s -o %s" % (url, target))
execute("curl", "--fail", url, "-o", target)
def execute(cmd, process_input=None, addl_env=None, check_exit_code=True):
LOG.debug(_("Running cmd (subprocess): %s"), cmd)
def execute(*cmd, **kwargs):
process_input = kwargs.get('process_input', None)
addl_env = kwargs.get('addl_env', None)
check_exit_code = kwargs.get('check_exit_code', 0)
stdin = kwargs.get('stdin', subprocess.PIPE)
stdout = kwargs.get('stdout', subprocess.PIPE)
stderr = kwargs.get('stderr', subprocess.PIPE)
cmd = map(str, cmd)
LOG.debug(_("Running cmd (subprocess): %s"), ' '.join(cmd))
env = os.environ.copy()
if addl_env:
env.update(addl_env)
obj = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
obj = subprocess.Popen(cmd, stdin=stdin,
stdout=stdout, stderr=stderr, env=env)
result = None
if process_input != None:
result = obj.communicate(process_input)
@ -143,12 +155,13 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True):
obj.stdin.close()
if obj.returncode:
LOG.debug(_("Result was %s") % obj.returncode)
if check_exit_code and obj.returncode != 0:
if type(check_exit_code) == types.IntType \
and obj.returncode != check_exit_code:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=obj.returncode,
stdout=stdout,
stderr=stderr,
cmd=cmd)
cmd=' '.join(cmd))
# NOTE(termie): this appears to be necessary to let the subprocess call
# clean something up in between calls, without it two
# execute calls in a row hangs the second one
@ -158,7 +171,7 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True):
def ssh_execute(ssh, cmd, process_input=None,
addl_env=None, check_exit_code=True):
LOG.debug(_("Running cmd (SSH): %s"), cmd)
LOG.debug(_("Running cmd (SSH): %s"), ' '.join(cmd))
if addl_env:
raise exception.Error("Environment not supported over SSH")
@ -187,7 +200,7 @@ def ssh_execute(ssh, cmd, process_input=None,
raise exception.ProcessExecutionError(exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=cmd)
cmd=' '.join(cmd))
return (stdout, stderr)
@ -220,9 +233,9 @@ def debug(arg):
return arg
def runthis(prompt, cmd, check_exit_code=True):
LOG.debug(_("Running %s"), (cmd))
rv, err = execute(cmd, check_exit_code=check_exit_code)
def runthis(prompt, *cmd, **kwargs):
LOG.debug(_("Running %s"), (" ".join(cmd)))
rv, err = execute(*cmd, **kwargs)
def generate_uid(topic, size=8):
@ -254,7 +267,7 @@ def last_octet(address):
def get_my_linklocal(interface):
try:
if_str = execute("ip -f inet6 -o addr show %s" % interface)
if_str = execute("ip", "-f", "inet6", "-o", "addr", "show", interface)
condition = "\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link"
links = [re.search(condition, x) for x in if_str[0].split('\n')]
address = [w.group(1) for w in links if w is not None]
@ -491,6 +504,18 @@ def loads(s):
return json.loads(s)
def synchronized(name):
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
lock = lockfile.FileLock(os.path.join(FLAGS.lock_path,
'nova-%s.lock' % name))
with lock:
return f(*args, **kwargs)
return inner
return wrap
def ensure_b64_encoding(val):
"""Safety method to ensure that values expected to be base64-encoded
actually are. If they are, the value is returned unchanged. Otherwise,

View File

@ -49,10 +49,10 @@ def extend(image, size):
file_size = os.path.getsize(image)
if file_size >= size:
return
utils.execute('truncate -s %s %s' % (size, image))
utils.execute('truncate', '-s', size, image)
# NOTE(vish): attempts to resize filesystem
utils.execute('e2fsck -fp %s' % image, check_exit_code=False)
utils.execute('resize2fs %s' % image, check_exit_code=False)
utils.execute('e2fsck', '-fp', mage, check_exit_code=False)
utils.execute('resize2fs', image, check_exit_code=False)
def inject_data(image, key=None, net=None, partition=None, nbd=False):
@ -68,7 +68,7 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False):
try:
if not partition is None:
# create partition
out, err = utils.execute('sudo kpartx -a %s' % device)
out, err = utils.execute('sudo', 'kpartx', '-a', device)
if err:
raise exception.Error(_('Failed to load partition: %s') % err)
mapped_device = '/dev/mapper/%sp%s' % (device.split('/')[-1],
@ -84,13 +84,14 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False):
mapped_device)
# Configure ext2fs so that it doesn't auto-check every N boots
out, err = utils.execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device)
out, err = utils.execute('sudo', 'tune2fs',
'-c', 0, '-i', 0, mapped_device)
tmpdir = tempfile.mkdtemp()
try:
# mount loopback to dir
out, err = utils.execute(
'sudo mount %s %s' % (mapped_device, tmpdir))
'sudo', 'mount', mapped_device, tmpdir)
if err:
raise exception.Error(_('Failed to mount filesystem: %s')
% err)
@ -103,13 +104,13 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False):
_inject_net_into_fs(net, tmpdir)
finally:
# unmount device
utils.execute('sudo umount %s' % mapped_device)
utils.execute('sudo', 'umount', mapped_device)
finally:
# remove temporary directory
utils.execute('rmdir %s' % tmpdir)
utils.execute('rmdir', tmpdir)
if not partition is None:
# remove partitions
utils.execute('sudo kpartx -d %s' % device)
utils.execute('sudo', 'kpartx', '-d', device)
finally:
_unlink_device(device, nbd)
@ -118,7 +119,7 @@ def _link_device(image, nbd):
"""Link image to device using loopback or nbd"""
if nbd:
device = _allocate_device()
utils.execute('sudo qemu-nbd -c %s %s' % (device, image))
utils.execute('sudo', 'qemu-nbd', '-c', device, image)
# NOTE(vish): this forks into another process, so give it a chance
# to set up before continuuing
for i in xrange(FLAGS.timeout_nbd):
@ -127,7 +128,7 @@ def _link_device(image, nbd):
time.sleep(1)
raise exception.Error(_('nbd device %s did not show up') % device)
else:
out, err = utils.execute('sudo losetup --find --show %s' % image)
out, err = utils.execute('sudo', 'losetup', '--find', '--show', image)
if err:
raise exception.Error(_('Could not attach image to loopback: %s')
% err)
@ -137,10 +138,10 @@ def _link_device(image, nbd):
def _unlink_device(device, nbd):
"""Unlink image from device using loopback or nbd"""
if nbd:
utils.execute('sudo qemu-nbd -d %s' % device)
utils.execute('sudo', 'qemu-nbd', '-d', device)
_free_device(device)
else:
utils.execute('sudo losetup --detach %s' % device)
utils.execute('sudo', 'losetup', '--detach', device)
_DEVICES = ['/dev/nbd%s' % i for i in xrange(FLAGS.max_nbd_devices)]
@ -170,11 +171,12 @@ def _inject_key_into_fs(key, fs):
fs is the path to the base of the filesystem into which to inject the key.
"""
sshdir = os.path.join(fs, 'root', '.ssh')
utils.execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter
utils.execute('sudo chown root %s' % sshdir)
utils.execute('sudo chmod 700 %s' % sshdir)
utils.execute('sudo', 'mkdir', '-p', sshdir) # existing dir doesn't matter
utils.execute('sudo', 'chown', 'root', sshdir)
utils.execute('sudo', 'chmod', '700', sshdir)
keyfile = os.path.join(sshdir, 'authorized_keys')
utils.execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n')
utils.execute('sudo', 'tee', '-a', keyfile,
process_input='\n' + key.strip() + '\n')
def _inject_net_into_fs(net, fs):
@ -183,8 +185,8 @@ def _inject_net_into_fs(net, fs):
net is the contents of /etc/network/interfaces.
"""
netdir = os.path.join(os.path.join(fs, 'etc'), 'network')
utils.execute('sudo mkdir -p %s' % netdir) # existing dir doesn't matter
utils.execute('sudo chown root:root %s' % netdir)
utils.execute('sudo chmod 755 %s' % netdir)
utils.execute('sudo', 'mkdir', '-p', netdir) # existing dir doesn't matter
utils.execute('sudo', 'chown', 'root:root', netdir)
utils.execute('sudo', 'chmod', 755, netdir)
netfile = os.path.join(netdir, 'interfaces')
utils.execute('sudo tee %s' % netfile, net)
utils.execute('sudo', 'tee', netfile, net)

View File

@ -28,29 +28,32 @@ import time
import urllib2
import urlparse
from nova import context
from nova import flags
from nova import log as logging
from nova import utils
from nova.auth import manager
from nova.auth import signer
from nova.objectstore import image
FLAGS = flags.FLAGS
flags.DEFINE_bool('use_s3', True,
'whether to get images from s3 or use local copy')
LOG = logging.getLogger('nova.virt.images')
def fetch(image, path, user, project):
if FLAGS.use_s3:
f = _fetch_s3_image
else:
f = _fetch_local_image
return f(image, path, user, project)
def fetch(image_id, path, _user, _project):
# TODO(vish): Improve context handling and add owner and auth data
# when it is added to glance. Right now there is no
# auth checking in glance, so we assume that access was
# checked before we got here.
image_service = utils.import_object(FLAGS.image_service)
with open(path, "wb") as image_file:
elevated = context.get_admin_context()
metadata = image_service.get(elevated, image_id, image_file)
return metadata
# NOTE(vish): The methods below should be unnecessary, but I'm leaving
# them in case the glance client does not work on windows.
def _fetch_image_no_curl(url, path, headers):
request = urllib2.Request(url)
for (k, v) in headers.iteritems():
@ -94,8 +97,7 @@ def _fetch_s3_image(image, path, user, project):
cmd += ['-H', '\'%s: %s\'' % (k, v)]
cmd += ['-o', path]
cmd_out = ' '.join(cmd)
return utils.execute(cmd_out)
return utils.execute(*cmd)
def _fetch_local_image(image, path, user, project):
@ -103,13 +105,15 @@ def _fetch_local_image(image, path, user, project):
if sys.platform.startswith('win'):
return shutil.copy(source, path)
else:
return utils.execute('cp %s %s' % (source, path))
return utils.execute('cp', source, path)
def _image_path(path):
return os.path.join(FLAGS.images_path, path)
# TODO(vish): xenapi should use the glance client code directly instead
# of retrieving the image using this method.
def image_url(image):
if FLAGS.image_service == "nova.image.glance.GlanceImageService":
return "http://%s:%s/images/%s" % (FLAGS.glance_host,

View File

@ -47,9 +47,8 @@ import uuid
from xml.dom import minidom
from eventlet import greenthread
from eventlet import event
from eventlet import tpool
from eventlet import semaphore
import IPy
@ -455,8 +454,10 @@ class LibvirtConnection(object):
if virsh_output.startswith('/dev/'):
LOG.info(_("cool, it's a device"))
out, err = utils.execute("sudo dd if=%s iflag=nonblock" %
virsh_output, check_exit_code=False)
out, err = utils.execute('sudo', 'dd',
"if=%s" % virsh_output,
'iflag=nonblock',
check_exit_code=False)
return out
else:
return ''
@ -478,11 +479,11 @@ class LibvirtConnection(object):
console_log = os.path.join(FLAGS.instances_path, instance['name'],
'console.log')
utils.execute('sudo chown %d %s' % (os.getuid(), console_log))
utils.execute('sudo', 'chown', s.getuid(), console_log)
if FLAGS.libvirt_type == 'xen':
# Xen is special
virsh_output = utils.execute("virsh ttyconsole %s" %
virsh_output = utils.execute('virsh', 'ttyconsole',
instance['name'])
data = self._flush_xen_console(virsh_output)
fpath = self._append_to_file(data, console_log)
@ -499,9 +500,10 @@ class LibvirtConnection(object):
port = random.randint(int(start_port), int(end_port))
# netcat will exit with 0 only if the port is in use,
# so a nonzero return value implies it is unused
cmd = 'netcat 0.0.0.0 %s -w 1 </dev/null || echo free' % (port)
stdout, stderr = utils.execute(cmd)
if stdout.strip() == 'free':
cmd = 'netcat', '0.0.0.0', port, '-w', '1'
try:
stdout, stderr = utils.execute(*cmd, process_input='')
except ProcessExecutionError:
return port
raise Exception(_('Unable to find an open port'))
@ -528,7 +530,10 @@ class LibvirtConnection(object):
subprocess.Popen(cmd, shell=True)
return {'token': token, 'host': host, 'port': port}
def _cache_image(self, fn, target, fname, cow=False, *args, **kwargs):
_image_sems = {}
@staticmethod
def _cache_image(fn, target, fname, cow=False, *args, **kwargs):
"""Wrapper for a method that creates an image that caches the image.
This wrapper will save the image into a common store and create a
@ -547,14 +552,21 @@ class LibvirtConnection(object):
if not os.path.exists(base_dir):
os.mkdir(base_dir)
base = os.path.join(base_dir, fname)
if not os.path.exists(base):
fn(target=base, *args, **kwargs)
if fname not in LibvirtConnection._image_sems:
LibvirtConnection._image_sems[fname] = semaphore.Semaphore()
with LibvirtConnection._image_sems[fname]:
if not os.path.exists(base):
fn(target=base, *args, **kwargs)
if not LibvirtConnection._image_sems[fname].locked():
del LibvirtConnection._image_sems[fname]
if cow:
utils.execute('qemu-img create -f qcow2 -o '
'cluster_size=2M,backing_file=%s %s'
% (base, target))
utils.execute('qemu-img', 'create', '-f', 'qcow2', '-o',
'cluster_size=2M,backing_file=%s' % base,
target)
else:
utils.execute('cp %s %s' % (base, target))
utils.execute('cp', base, target)
def _fetch_image(self, target, image_id, user, project, size=None):
"""Grab image and optionally attempt to resize it"""
@ -564,7 +576,7 @@ class LibvirtConnection(object):
def _create_local(self, target, local_gb):
"""Create a blank image of specified size"""
utils.execute('truncate %s -s %dG' % (target, local_gb))
utils.execute('truncate', target, '-s', "%dG" % local_gb)
# TODO(vish): should we format disk by default?
def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None):
@ -575,7 +587,7 @@ class LibvirtConnection(object):
fname + suffix)
# ensure directories exist and are writable
utils.execute('mkdir -p %s' % basepath(suffix=''))
utils.execute('mkdir', '-p', basepath(suffix=''))
LOG.info(_('instance %s: Creating image'), inst['name'])
f = open(basepath('libvirt.xml'), 'w')
@ -595,21 +607,23 @@ class LibvirtConnection(object):
'ramdisk_id': inst['ramdisk_id']}
if disk_images['kernel_id']:
fname = '%08x' % int(disk_images['kernel_id'])
self._cache_image(fn=self._fetch_image,
target=basepath('kernel'),
fname=disk_images['kernel_id'],
fname=fname,
image_id=disk_images['kernel_id'],
user=user,
project=project)
if disk_images['ramdisk_id']:
fname = '%08x' % int(disk_images['ramdisk_id'])
self._cache_image(fn=self._fetch_image,
target=basepath('ramdisk'),
fname=disk_images['ramdisk_id'],
fname=fname,
image_id=disk_images['ramdisk_id'],
user=user,
project=project)
root_fname = disk_images['image_id']
root_fname = '%08x' % int(disk_images['image_id'])
size = FLAGS.minimum_root_size
if inst['instance_type'] == 'm1.tiny' or suffix == '.rescue':
size = None
@ -675,7 +689,7 @@ class LibvirtConnection(object):
' data into image %(img_id)s (%(e)s)') % locals())
if FLAGS.libvirt_type == 'uml':
utils.execute('sudo chown root %s' % basepath('disk'))
utils.execute('sudo', 'chown', 'root', basepath('disk'))
def to_xml(self, instance, rescue=False):
# TODO(termie): cache?
@ -1608,16 +1622,18 @@ class IptablesFirewallDriver(FirewallDriver):
self.apply_ruleset()
def apply_ruleset(self):
current_filter, _ = self.execute('sudo iptables-save -t filter')
current_filter, _ = self.execute('sudo', 'iptables-save',
'-t', 'filter')
current_lines = current_filter.split('\n')
new_filter = self.modify_rules(current_lines, 4)
self.execute('sudo iptables-restore',
self.execute('sudo', 'iptables-restore',
process_input='\n'.join(new_filter))
if(FLAGS.use_ipv6):
current_filter, _ = self.execute('sudo ip6tables-save -t filter')
current_filter, _ = self.execute('sudo', 'ip6tables-save',
'-t', 'filter')
current_lines = current_filter.split('\n')
new_filter = self.modify_rules(current_lines, 6)
self.execute('sudo ip6tables-restore',
self.execute('sudo', 'ip6tables-restore',
process_input='\n'.join(new_filter))
def modify_rules(self, current_lines, ip_version=4):

View File

@ -917,14 +917,13 @@ def _write_partition(virtual_size, dev):
LOG.debug(_('Writing partition table %(primary_first)d %(primary_last)d'
' to %(dest)s...') % locals())
def execute(cmd, process_input=None, check_exit_code=True):
return utils.execute(cmd=cmd,
process_input=process_input,
check_exit_code=check_exit_code)
def execute(*cmd, **kwargs):
return utils.execute(*cmd, **kwargs)
execute('parted --script %s mklabel msdos' % dest)
execute('parted --script %s mkpart primary %ds %ds' %
(dest, primary_first, primary_last))
execute('parted', '--script', dest, 'mklabel', 'msdos')
execute('parted', '--script', dest, 'mkpart', 'primary',
'%ds' % primary_first,
'%ds' % primary_last)
LOG.debug(_('Writing partition table %s done.'), dest)

View File

@ -65,14 +65,14 @@ class VolumeDriver(object):
self._execute = execute
self._sync_exec = sync_exec
def _try_execute(self, command):
def _try_execute(self, *command):
# NOTE(vish): Volume commands can partially fail due to timing, but
# running them a second time on failure will usually
# recover nicely.
tries = 0
while True:
try:
self._execute(command)
self._execute(*command)
return True
except exception.ProcessExecutionError:
tries = tries + 1
@ -84,7 +84,7 @@ class VolumeDriver(object):
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met"""
out, err = self._execute("sudo vgs --noheadings -o name")
out, err = self._execute('sudo', 'vgs', '--noheadings', '-o', 'name')
volume_groups = out.split()
if not FLAGS.volume_group in volume_groups:
raise exception.Error(_("volume group %s doesn't exist")
@ -97,22 +97,22 @@ class VolumeDriver(object):
sizestr = '100M'
else:
sizestr = '%sG' % volume['size']
self._try_execute("sudo lvcreate -L %s -n %s %s" %
(sizestr,
self._try_execute('sudo', 'lvcreate', '-L', sizestr, '-n',
volume['name'],
FLAGS.volume_group))
FLAGS.volume_group)
def delete_volume(self, volume):
"""Deletes a logical volume."""
try:
self._try_execute("sudo lvdisplay %s/%s" %
self._try_execute('sudo', 'lvdisplay',
'%s/%s' %
(FLAGS.volume_group,
volume['name']))
except Exception as e:
# If the volume isn't present, then don't attempt to delete
return True
self._try_execute("sudo lvremove -f %s/%s" %
self._try_execute('sudo', 'lvremove', '-f', "%s/%s" %
(FLAGS.volume_group,
volume['name']))
@ -172,12 +172,13 @@ class AOEDriver(VolumeDriver):
blade_id) = self.db.volume_allocate_shelf_and_blade(context,
volume['id'])
self._try_execute(
"sudo vblade-persist setup %s %s %s /dev/%s/%s" %
(shelf_id,
'sudo', 'vblade-persist', 'setup',
shelf_id,
blade_id,
FLAGS.aoe_eth_dev,
FLAGS.volume_group,
volume['name']))
"/dev/%s/%s" %
(FLAGS.volume_group,
volume['name']))
# NOTE(vish): The standard _try_execute does not work here
# because these methods throw errors if other
# volumes on this host are in the process of
@ -186,9 +187,9 @@ class AOEDriver(VolumeDriver):
# just wait a bit for the current volume to
# be ready and ignore any errors.
time.sleep(2)
self._execute("sudo vblade-persist auto all",
self._execute('sudo', 'vblade-persist', 'auto', 'all',
check_exit_code=False)
self._execute("sudo vblade-persist start all",
self._execute('sudo', 'vblade-persist', 'start', 'all',
check_exit_code=False)
def remove_export(self, context, volume):
@ -196,10 +197,10 @@ class AOEDriver(VolumeDriver):
(shelf_id,
blade_id) = self.db.volume_get_shelf_and_blade(context,
volume['id'])
self._try_execute("sudo vblade-persist stop %s %s" %
(shelf_id, blade_id))
self._try_execute("sudo vblade-persist destroy %s %s" %
(shelf_id, blade_id))
self._try_execute('sudo', 'vblade-persist', 'stop',
shelf_id, blade_id)
self._try_execute('sudo', 'vblade-persist', 'destroy',
shelf_id, blade_id)
def discover_volume(self, context, _volume):
"""Discover volume on a remote host."""
@ -209,7 +210,7 @@ class AOEDriver(VolumeDriver):
self._execute("sudo aoe-discover")
out, err = self._execute("sudo aoe-stat", check_exit_code=False)
device_path = 'e%(shelf_id)d.%(blade_id)d' % locals()
if 0 <= out.find(device_path):
if out.find(device_path) >= 0:
return "/dev/etherd/%s" % device_path
else:
return
@ -286,13 +287,16 @@ class ISCSIDriver(VolumeDriver):
iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name'])
self._sync_exec("sudo ietadm --op new "
"--tid=%s --params Name=%s" %
(iscsi_target, iscsi_name),
self._sync_exec('sudo', 'ietadm', '--op', 'new',
"--tid=%s" % iscsi_target,
'--params',
"Name=%s" % iscsi_name,
check_exit_code=False)
self._sync_exec("sudo ietadm --op new --tid=%s "
"--lun=0 --params Path=%s,Type=fileio" %
(iscsi_target, volume_path),
self._sync_exec('sudo', 'ietadm', '--op', 'new',
"--tid=%s" % iscsi_target,
'--lun=0',
'--params',
"Path=%s,Type=fileio" % volume_path,
check_exit_code=False)
def _ensure_iscsi_targets(self, context, host):
@ -313,12 +317,13 @@ class ISCSIDriver(VolumeDriver):
volume['host'])
iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name'])
self._execute("sudo ietadm --op new "
"--tid=%s --params Name=%s" %
self._execute('sudo', 'ietadm', '--op', 'new',
'--tid=%s --params Name=%s' %
(iscsi_target, iscsi_name))
self._execute("sudo ietadm --op new --tid=%s "
"--lun=0 --params Path=%s,Type=fileio" %
(iscsi_target, volume_path))
self._execute('sudo', 'ietadm', '--op', 'new',
'--tid=%s' % iscsi_target,
'--lun=0', '--params',
'Path=%s,Type=fileio' % volume_path)
def remove_export(self, context, volume):
"""Removes an export for a logical volume."""
@ -333,16 +338,18 @@ class ISCSIDriver(VolumeDriver):
try:
# ietadm show will exit with an error
# this export has already been removed
self._execute("sudo ietadm --op show --tid=%s " % iscsi_target)
self._execute('sudo', 'ietadm', '--op', 'show',
'--tid=%s' % iscsi_target)
except Exception as e:
LOG.info(_("Skipping remove_export. No iscsi_target " +
"is presently exported for volume: %d"), volume['id'])
return
self._execute("sudo ietadm --op delete --tid=%s "
"--lun=0" % iscsi_target)
self._execute("sudo ietadm --op delete --tid=%s" %
iscsi_target)
self._execute('sudo', 'ietadm', '--op', 'delete',
'--tid=%s' % iscsi_target,
'--lun=0')
self._execute('sudo', 'ietadm', '--op', 'delete',
'--tid=%s' % iscsi_target)
def _do_iscsi_discovery(self, volume):
#TODO(justinsb): Deprecate discovery and use stored info
@ -351,8 +358,8 @@ class ISCSIDriver(VolumeDriver):
volume_name = volume['name']
(out, _err) = self._execute("sudo iscsiadm -m discovery -t "
"sendtargets -p %s" % (volume['host']))
(out, _err) = self._execute('sudo', 'iscsiadm', '-m', 'discovery',
'-t', 'sendtargets', '-p', volume['host'])
for target in out.splitlines():
if FLAGS.iscsi_ip_prefix in target and volume_name in target:
return target
@ -526,7 +533,7 @@ class RBDDriver(VolumeDriver):
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met"""
(stdout, stderr) = self._execute("rados lspools")
(stdout, stderr) = self._execute('rados', 'lspools')
pools = stdout.split("\n")
if not FLAGS.rbd_pool in pools:
raise exception.Error(_("rbd has no pool %s") %
@ -538,16 +545,13 @@ class RBDDriver(VolumeDriver):
size = 100
else:
size = int(volume['size']) * 1024
self._try_execute("rbd --pool %s --size %d create %s" %
(FLAGS.rbd_pool,
size,
volume['name']))
self._try_execute('rbd', '--pool', FLAGS.rbd_pool,
'--size', size, 'create', volume['name'])
def delete_volume(self, volume):
"""Deletes a logical volume."""
self._try_execute("rbd --pool %s rm %s" %
(FLAGS.rbd_pool,
volume['name']))
self._try_execute('rbd', '--pool', FLAGS.rbd_pool,
'rm', voluname['name'])
def local_path(self, volume):
"""Returns the path of the rbd volume."""
@ -582,7 +586,7 @@ class SheepdogDriver(VolumeDriver):
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met"""
try:
(out, err) = self._execute("collie cluster info")
(out, err) = self._execute('collie', 'cluster', 'info')
if not out.startswith('running'):
raise exception.Error(_("Sheepdog is not working: %s") % out)
except exception.ProcessExecutionError:
@ -594,12 +598,13 @@ class SheepdogDriver(VolumeDriver):
sizestr = '100M'
else:
sizestr = '%sG' % volume['size']
self._try_execute("qemu-img create sheepdog:%s %s" %
(volume['name'], sizestr))
self._try_execute('qemu-img', 'create',
"sheepdog:%s" % volume['name'],
sizestr)
def delete_volume(self, volume):
"""Deletes a logical volume"""
self._try_execute("collie vdi delete %s" % volume['name'])
self._try_execute('collie', 'vdi', 'delete', volume['name'])
def local_path(self, volume):
return "sheepdog:%s" % volume['name']

View File

@ -36,6 +36,7 @@ import webob.exc
from paste import deploy
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
@ -82,6 +83,35 @@ class Server(object):
log=WritableLogger(logger))
class Request(webob.Request):
def best_match_content_type(self):
"""
Determine the most acceptable content-type based on the
query extension then the Accept header
"""
parts = self.path.rsplit(".", 1)
if len(parts) > 1:
format = parts[1]
if format in ["json", "xml"]:
return "application/{0}".format(parts[1])
ctypes = ["application/json", "application/xml"]
bm = self.accept.best_match(ctypes)
return bm or "application/json"
def get_content_type(self):
try:
ct = self.headers["Content-Type"]
assert ct in ("application/xml", "application/json")
return ct
except Exception:
raise webob.exc.HTTPBadRequest("Invalid content type")
class Application(object):
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
@ -113,7 +143,7 @@ class Application(object):
def __call__(self, environ, start_response):
r"""Subclasses will probably want to implement __call__ like this:
@webob.dec.wsgify
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
# Any of the following objects work as responses:
@ -199,7 +229,7 @@ class Middleware(Application):
"""Do whatever you'd like to the response."""
return response
@webob.dec.wsgify
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
response = self.process_request(req)
if response:
@ -212,7 +242,7 @@ class Debug(Middleware):
"""Helper class that can be inserted into any WSGI application chain
to get information about the request and response."""
@webob.dec.wsgify
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
print ("*" * 40) + " REQUEST ENVIRON"
for key, value in req.environ.items():
@ -276,7 +306,7 @@ class Router(object):
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@webob.dec.wsgify
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""
Route the incoming request to a controller based on self.map.
@ -285,7 +315,7 @@ class Router(object):
return self._router
@staticmethod
@webob.dec.wsgify
@webob.dec.wsgify(RequestClass=Request)
def _dispatch(req):
"""
Called by self._router after matching the incoming request to a route
@ -304,11 +334,11 @@ class Controller(object):
WSGI app that reads routing information supplied by RoutesMiddleware
and calls the requested action method upon itself. All action methods
must, in addition to their normal parameters, accept a 'req' argument
which is the incoming webob.Request. They raise a webob.exc exception,
which is the incoming wsgi.Request. They raise a webob.exc exception,
or return a dict which will be serialized by requested content type.
"""
@webob.dec.wsgify
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""
Call the method specified in req.environ by RoutesMiddleware.
@ -318,32 +348,45 @@ class Controller(object):
method = getattr(self, action)
del arg_dict['controller']
del arg_dict['action']
if 'format' in arg_dict:
del arg_dict['format']
arg_dict['req'] = req
result = method(**arg_dict)
if type(result) is dict:
return self._serialize(result, req)
content_type = req.best_match_content_type()
body = self._serialize(result, content_type)
response = webob.Response()
response.headers["Content-Type"] = content_type
response.body = body
return response
else:
return result
def _serialize(self, data, request):
def _serialize(self, data, content_type):
"""
Serialize the given dict to the response type requested in request.
Serialize the given dict to the provided content_type.
Uses self._serialization_metadata if it exists, which is a dict mapping
MIME types to information needed to serialize to that type.
"""
_metadata = getattr(type(self), "_serialization_metadata", {})
serializer = Serializer(request.environ, _metadata)
return serializer.to_content_type(data)
serializer = Serializer(_metadata)
try:
return serializer.serialize(data, content_type)
except exception.InvalidContentType:
raise webob.exc.HTTPNotAcceptable()
def _deserialize(self, data, request):
def _deserialize(self, data, content_type):
"""
Deserialize the request body to the response type requested in request.
Deserialize the request body to the specefied content type.
Uses self._serialization_metadata if it exists, which is a dict mapping
MIME types to information needed to serialize to that type.
"""
_metadata = getattr(type(self), "_serialization_metadata", {})
serializer = Serializer(request.environ, _metadata)
return serializer.deserialize(data)
serializer = Serializer(_metadata)
return serializer.deserialize(data, content_type)
class Serializer(object):
@ -351,50 +394,52 @@ class Serializer(object):
Serializes and deserializes dictionaries to certain MIME types.
"""
def __init__(self, environ, metadata=None):
def __init__(self, metadata=None):
"""
Create a serializer based on the given WSGI environment.
'metadata' is an optional dict mapping MIME types to information
needed to serialize a dictionary to that type.
"""
self.metadata = metadata or {}
req = webob.Request.blank('', environ)
suffix = req.path_info.split('.')[-1].lower()
if suffix == 'json':
self.handler = self._to_json
elif suffix == 'xml':
self.handler = self._to_xml
elif 'application/json' in req.accept:
self.handler = self._to_json
elif 'application/xml' in req.accept:
self.handler = self._to_xml
else:
# This is the default
self.handler = self._to_json
def to_content_type(self, data):
def _get_serialize_handler(self, content_type):
handlers = {
"application/json": self._to_json,
"application/xml": self._to_xml,
}
try:
return handlers[content_type]
except Exception:
raise exception.InvalidContentType()
def serialize(self, data, content_type):
"""
Serialize a dictionary into a string.
The format of the string will be decided based on the Content Type
requested in self.environ: by Accept: header, or by URL suffix.
Serialize a dictionary into a string of the specified content type.
"""
return self.handler(data)
return self._get_serialize_handler(content_type)(data)
def deserialize(self, datastring):
def deserialize(self, datastring, content_type):
"""
Deserialize a string to a dictionary.
The string must be in the format of a supported MIME type.
"""
datastring = datastring.strip()
return self.get_deserialize_handler(content_type)(datastring)
def get_deserialize_handler(self, content_type):
handlers = {
"application/json": self._from_json,
"application/xml": self._from_xml,
}
try:
is_xml = (datastring[0] == '<')
if not is_xml:
return utils.loads(datastring)
return self._from_xml(datastring)
except:
return None
return handlers[content_type]
except Exception:
raise exception.InvalidContentType()
def _from_json(self, datastring):
return utils.loads(datastring)
def _from_xml(self, datastring):
xmldata = self.metadata.get('application/xml', {})

View File

@ -30,13 +30,14 @@ import simplejson as json
def main(dom_id, command, only_this_vif=None):
xsls = execute("/usr/bin/xenstore-ls /local/domain/%s/vm-data/networking" \
% dom_id, True)
xsls = execute('/usr/bin/xenstore-ls',
'/local/domain/%s/vm-data/networking' % dom_id, True)
macs = [line.split("=")[0].strip() for line in xsls.splitlines()]
for mac in macs:
xsr = "/usr/bin/xenstore-read /local/domain/%s/vm-data/networking/%s"
xsread = execute(xsr % (dom_id, mac), True)
xsread = execute('/usr/bin/enstore-read',
'/local/domain/%s/vm-data/networking/%s' %
(dom_id, mac), True)
data = json.loads(xsread)
for ip in data['ips']:
if data["label"] == "public":
@ -51,9 +52,9 @@ def main(dom_id, command, only_this_vif=None):
apply_iptables_rules(command, params)
def execute(command, return_stdout=False):
def execute(*command, return_stdout=False):
devnull = open(os.devnull, 'w')
proc = subprocess.Popen(command, shell=True, close_fds=True,
proc = subprocess.Popen(command, close_fds=True,
stdout=subprocess.PIPE, stderr=devnull)
devnull.close()
if return_stdout:
@ -67,45 +68,69 @@ def execute(command, return_stdout=False):
def apply_iptables_rules(command, params):
iptables = lambda rule: execute("/sbin/iptables %s" % rule)
iptables = lambda *rule: execute('/sbin/iptables', *rule)
iptables("-D FORWARD -m physdev --physdev-in %(VIF)s -s %(IP)s \
-j ACCEPT" % params)
iptables('-D', 'FORWARD', '-m', 'physdev',
'--physdev-in', '%(VIF)s' % params,
'-s', '%(IP)s' % params,
'-j', 'ACCEPT')
if command == 'online':
iptables("-A FORWARD -m physdev --physdev-in %(VIF)s -s %(IP)s \
-j ACCEPT" % params)
iptables('-A', 'FORWARD', '-m', 'physdev',
'--physdev-in', '%(VIF)s' % params,
'-s', '%(IP)s' % params,
'-j', 'ACCEPT')
def apply_arptables_rules(command, params):
arptables = lambda rule: execute("/sbin/arptables %s" % rule)
arptables = lambda *rule: execute('/sbin/arptables', *rule)
arptables("-D FORWARD --opcode Request --in-interface %(VIF)s \
--source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params)
arptables("-D FORWARD --opcode Reply --in-interface %(VIF)s \
--source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params)
arptables('-D', 'FORWARD', '--opcode', 'Request',
'--in-interface', '%(VIF)s' % params,
'--source-ip', '%(IP)s' % params,
'--source-mac', '%(MAC)s' % params,
'-j', 'ACCEPT')
arptables('-D', 'FORWARD', '--opcode', 'Reply',
'--in-interface', '%(VIF)s' % params,
'--source-ip', '%(IP)s' % params,
'--source-mac', '%(MAC)s' % params,
'-j', 'ACCEPT')
if command == 'online':
arptables("-A FORWARD --opcode Request --in-interface %(VIF)s \
--source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params)
arptables("-A FORWARD --opcode Reply --in-interface %(VIF)s \
--source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params)
arptables('-A', 'FORWARD', '--opcode', 'Request',
'--in-interface', '%(VIF)s' % params
'--source-ip', '%(IP)s' % params,
'--source-mac', '%(MAC)s' % params,
'-j', 'ACCEPT')
arptables('-A', 'FORWARD', '--opcode', 'Reply',
'--in-interface', '%(VIF)s' % params,
'--source-ip', '%(IP)s' % params,
'--source-mac', '%(MAC)s' % params,
'-j', 'ACCEPT')
def apply_ebtables_rules(command, params):
ebtables = lambda rule: execute("/sbin/ebtables %s" % rule)
ebtables = lambda *rule: execute("/sbin/ebtables", *rule)
ebtables("-D FORWARD -p 0806 -o %(VIF)s --arp-ip-dst %(IP)s -j ACCEPT" %
params)
ebtables("-D FORWARD -p 0800 -o %(VIF)s --ip-dst %(IP)s -j ACCEPT" %
params)
ebtables('-D', 'FORWARD', '-p', '0806', '-o', params['VIF'],
'--arp-ip-dst', params['IP'],
'-j', 'ACCEPT')
ebtables('-D', 'FORWARD', '-p', '0800', '-o',
params['VIF'], '--ip-dst', params['IP'],
'-j', 'ACCEPT')
if command == 'online':
ebtables("-A FORWARD -p 0806 -o %(VIF)s --arp-ip-dst %(IP)s \
-j ACCEPT" % params)
ebtables("-A FORWARD -p 0800 -o %(VIF)s --ip-dst %(IP)s \
-j ACCEPT" % params)
ebtables('-A', 'FORWARD', '-p', '0806',
'-o', params['VIF'],
'--arp-ip-dst', params['IP'],
'-j', 'ACCEPT')
ebtables('-A', 'FORWARD', '-p', '0800',
'-o', params['VIF'],
'--ip-dst', params['IP'],
'-j', 'ACCEPT')
ebtables("-D FORWARD -s ! %(MAC)s -i %(VIF)s -j DROP" % params)
ebtables('-D', 'FORWARD', '-s', '!', params['MAC'],
'-i', params['VIF'], '-j', 'DROP')
if command == 'online':
ebtables("-I FORWARD 1 -s ! %(MAC)s -i %(VIF)s -j DROP" % params)
ebtables('-I', 'FORWARD', '1', '-s', '!', params['MAC'],
'-i', '%(VIF)s', '-j', 'DROP')
if __name__ == "__main__":

View File

@ -35,7 +35,7 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
import boto
import nova
from boto.ec2.connection import EC2Connection
from euca2ools import Euca2ool, InstanceValidationError, Util, ConnectionFailed
from euca2ools import Euca2ool, InstanceValidationError, Util
usage_string = """
Retrieves a url to an ajax console terminal
@ -147,7 +147,7 @@ def main():
try:
euca_conn = euca.make_connection()
except ConnectionFailed, e:
except Exception, e:
print e.message
sys.exit(1)
try: