merge trunk

This commit is contained in:
Ken Pepple 2011-04-08 14:35:30 -07:00
commit e56e6694ec
57 changed files with 28509 additions and 18630 deletions

View File

@ -744,15 +744,6 @@ class ServiceCommands(object):
{"method": "update_available_resource"})
class LogCommands(object):
def request(self, request_id, logfile='/var/log/nova.log'):
"""Show all fields in the log for the given request. Assumes you
haven't changed the log format too much.
ARGS: request_id [logfile]"""
lines = utils.execute("cat %s | grep '\[%s '" % (logfile, request_id))
print re.sub('#012', "\n", "\n".join(lines))
class DbCommands(object):
"""Class for managing the database."""
@ -878,7 +869,7 @@ class InstanceTypeCommands(object):
elif name == "--all":
inst_types = instance_types.get_all_types(True)
else:
inst_types = instance_types.get_instance_type(name)
inst_types = instance_types.get_instance_type_by_name(name)
except exception.DBError, e:
_db_error(e)
if isinstance(inst_types.values()[0], dict):
@ -894,20 +885,17 @@ class ImageCommands(object):
def __init__(self, *args, **kwargs):
self.image_service = utils.import_object(FLAGS.image_service)
def _register(self, image_type, disk_format, container_format,
def _register(self, container_format, disk_format,
path, owner, name=None, is_public='T',
architecture='x86_64', kernel_id=None, ramdisk_id=None):
meta = {'is_public': True,
meta = {'is_public': (is_public == 'T'),
'name': name,
'disk_format': disk_format,
'container_format': container_format,
'disk_format': disk_format,
'properties': {'image_state': 'available',
'owner_id': owner,
'type': image_type,
'project_id': owner,
'architecture': architecture,
'image_location': 'local',
'is_public': (is_public == 'T')}}
print image_type, meta
'image_location': 'local'}}
if kernel_id:
meta['properties']['kernel_id'] = int(kernel_id)
if ramdisk_id:
@ -932,16 +920,18 @@ class ImageCommands(object):
ramdisk_id = self.ramdisk_register(ramdisk, owner, None,
is_public, architecture)
self.image_register(image, owner, name, is_public,
architecture, kernel_id, ramdisk_id)
architecture, 'ami', 'ami',
kernel_id, ramdisk_id)
def image_register(self, path, owner, name=None, is_public='T',
architecture='x86_64', kernel_id=None, ramdisk_id=None,
disk_format='ami', container_format='ami'):
architecture='x86_64', container_format='bare',
disk_format='raw', kernel_id=None, ramdisk_id=None):
"""Uploads an image into the image_service
arguments: path owner [name] [is_public='T'] [architecture='x86_64']
[container_format='bare'] [disk_format='raw']
[kernel_id=None] [ramdisk_id=None]
[disk_format='ami'] [container_format='ami']"""
return self._register('machine', disk_format, container_format, path,
"""
return self._register(container_format, disk_format, path,
owner, name, is_public, architecture,
kernel_id, ramdisk_id)
@ -950,7 +940,7 @@ class ImageCommands(object):
"""Uploads a kernel into the image_service
arguments: path owner [name] [is_public='T'] [architecture='x86_64']
"""
return self._register('kernel', 'aki', 'aki', path, owner, name,
return self._register('aki', 'aki', path, owner, name,
is_public, architecture)
def ramdisk_register(self, path, owner, name=None, is_public='T',
@ -958,7 +948,7 @@ class ImageCommands(object):
"""Uploads a ramdisk into the image_service
arguments: path owner [name] [is_public='T'] [architecture='x86_64']
"""
return self._register('ramdisk', 'ari', 'ari', path, owner, name,
return self._register('ari', 'ari', path, owner, name,
is_public, architecture)
def _lookup(self, old_image_id):
@ -975,16 +965,17 @@ class ImageCommands(object):
'ramdisk': 'ari'}
container_format = mapping[old['type']]
disk_format = container_format
if container_format == 'ami' and not old.get('kernelId'):
container_format = 'bare'
disk_format = 'raw'
new = {'disk_format': disk_format,
'container_format': container_format,
'is_public': True,
'is_public': old['isPublic'],
'name': old['imageId'],
'properties': {'image_state': old['imageState'],
'owner_id': old['imageOwnerId'],
'project_id': old['imageOwnerId'],
'architecture': old['architecture'],
'type': old['type'],
'image_location': old['imageLocation'],
'is_public': old['isPublic']}}
'image_location': old['imageLocation']}}
if old.get('kernelId'):
new['properties']['kernel_id'] = self._lookup(old['kernelId'])
if old.get('ramdiskId'):
@ -1049,7 +1040,6 @@ CATEGORIES = [
('network', NetworkCommands),
('vm', VmCommands),
('service', ServiceCommands),
('log', LogCommands),
('db', DbCommands),
('volume', VolumeCommands),
('instance_type', InstanceTypeCommands),

View File

@ -154,7 +154,7 @@ class CloudController(object):
floating_ip = db.instance_get_floating_address(ctxt,
instance_ref['id'])
ec2_id = ec2utils.id_to_ec2_id(instance_ref['id'])
image_ec2_id = self._image_ec2_id(instance_ref['image_id'], 'machine')
image_ec2_id = self._image_ec2_id(instance_ref['image_id'], 'ami')
data = {
'user-data': base64.b64decode(instance_ref['user_data']),
'meta-data': {
@ -184,7 +184,7 @@ class CloudController(object):
for image_type in ['kernel', 'ramdisk']:
if '%s_id' % image_type in instance_ref:
ec2_id = self._image_ec2_id(instance_ref['%s_id' % image_type],
image_type)
self._image_type(image_type))
data['meta-data']['%s-id' % image_type] = ec2_id
if False: # TODO(vish): store ancestor ids
@ -730,7 +730,10 @@ class CloudController(object):
instance['project_id'],
instance['host'])
i['productCodesSet'] = self._convert_to_set([], 'product_codes')
i['instanceType'] = instance['instance_type']
if instance['instance_type']:
i['instanceType'] = instance['instance_type'].get('name')
else:
i['instanceType'] = None
i['launchTime'] = instance['created_at']
i['amiLaunchIndex'] = instance['launch_index']
i['displayName'] = instance['display_name']
@ -815,7 +818,7 @@ class CloudController(object):
ramdisk = self._get_image(context, kwargs['ramdisk_id'])
kwargs['ramdisk_id'] = ramdisk['id']
instances = self.compute_api.create(context,
instance_type=instance_types.get_by_type(
instance_type=instance_types.get_instance_type_by_name(
kwargs.get('instance_type', None)),
image_id=self._get_image(context, kwargs['image_id'])['id'],
min_count=int(kwargs.get('min_count', max_count)),
@ -872,13 +875,27 @@ class CloudController(object):
self.compute_api.update(context, instance_id=instance_id, **kwargs)
return True
_type_prefix_map = {'machine': 'ami',
'kernel': 'aki',
'ramdisk': 'ari'}
@staticmethod
def _image_type(image_type):
"""Converts to a three letter image type.
def _image_ec2_id(self, image_id, image_type='machine'):
prefix = self._type_prefix_map[image_type]
template = prefix + '-%08x'
aki, kernel => aki
ari, ramdisk => ari
anything else => ami
"""
if image_type == 'kernel':
return 'aki'
if image_type == 'ramdisk':
return 'ari'
if image_type not in ['aki', 'ari']:
return 'ami'
return image_type
@staticmethod
def _image_ec2_id(image_id, image_type='ami'):
"""Returns image ec2_id using id and three letter type."""
template = image_type + '-%08x'
return ec2utils.id_to_ec2_id(int(image_id), template=template)
def _get_image(self, context, ec2_id):
@ -891,27 +908,34 @@ class CloudController(object):
def _format_image(self, image):
"""Convert from format defined by BaseImageService to S3 format."""
i = {}
image_type = image['properties'].get('type')
image_type = self._image_type(image.get('container_format'))
ec2_id = self._image_ec2_id(image.get('id'), image_type)
name = image.get('name')
i['imageId'] = ec2_id
kernel_id = image['properties'].get('kernel_id')
if kernel_id:
i['kernelId'] = self._image_ec2_id(kernel_id, 'kernel')
i['kernelId'] = self._image_ec2_id(kernel_id, 'aki')
ramdisk_id = image['properties'].get('ramdisk_id')
if ramdisk_id:
i['ramdiskId'] = self._image_ec2_id(ramdisk_id, 'ramdisk')
i['ramdiskId'] = self._image_ec2_id(ramdisk_id, 'ari')
i['imageOwnerId'] = image['properties'].get('owner_id')
if name:
i['imageLocation'] = "%s (%s)" % (image['properties'].
get('image_location'), name)
else:
i['imageLocation'] = image['properties'].get('image_location')
i['imageState'] = image['properties'].get('image_state')
# NOTE(vish): fallback status if image_state isn't set
state = image.get('status')
if state == 'active':
state = 'available'
i['imageState'] = image['properties'].get('image_state', state)
i['displayName'] = name
i['description'] = image.get('description')
i['imageType'] = image_type
i['isPublic'] = str(image['properties'].get('is_public', '')) == 'True'
display_mapping = {'aki': 'kernel',
'ari': 'ramdisk',
'ami': 'machine'}
i['imageType'] = display_mapping.get(image_type)
i['isPublic'] = image.get('is_public') == True
i['architecture'] = image['properties'].get('architecture')
return i
@ -943,8 +967,9 @@ class CloudController(object):
image_location = kwargs['name']
metadata = {'properties': {'image_location': image_location}}
image = self.image_service.create(context, metadata)
image_type = self._image_type(image.get('container_format'))
image_id = self._image_ec2_id(image['id'],
image['properties']['type'])
image_type)
msg = _("Registered image %(image_location)s with"
" id %(image_id)s") % locals()
LOG.audit(msg, context=context)
@ -959,7 +984,7 @@ class CloudController(object):
except exception.NotFound:
raise exception.NotFound(_('Image %s not found') % image_id)
result = {'imageId': image_id, 'launchPermission': []}
if image['properties']['is_public']:
if image['is_public']:
result['launchPermission'].append({'group': 'all'})
return result
@ -984,7 +1009,7 @@ class CloudController(object):
internal_id = image['id']
del(image['id'])
image['properties']['is_public'] = (operation_type == 'add')
image['is_public'] = (operation_type == 'add')
return self.image_service.update(context, internal_id, image)
def update_image(self, context, image_id, **kwargs):

View File

@ -34,6 +34,7 @@ from nova.api.openstack import consoles
from nova.api.openstack import flavors
from nova.api.openstack import images
from nova.api.openstack import image_metadata
from nova.api.openstack import ips
from nova.api.openstack import limits
from nova.api.openstack import servers
from nova.api.openstack import server_metadata
@ -144,6 +145,11 @@ class APIRouterV10(APIRouter):
parent_resource=dict(member_name='server',
collection_name='servers'))
mapper.resource("ip", "ips", controller=ips.Controller(),
collection=dict(public='GET', private='GET'),
parent_resource=dict(member_name='server',
collection_name='servers'))
class APIRouterV11(APIRouter):
"""Define routes specific to OpenStack API V1.1."""

View File

@ -47,7 +47,7 @@ class Fault(webob.exc.HTTPException):
"""Generate a WSGI response based on the exception passed to ctor."""
# Replace the body with fault details.
code = self.wrapped_exc.status_int
fault_name = self._fault_names.get(code, "computeFault")
fault_name = self._fault_names.get(code, "cloudServersFault")
fault_data = {
fault_name: {
'code': code,

View File

@ -18,6 +18,7 @@
from webob import exc
from nova import flags
from nova import quota
from nova import utils
from nova import wsgi
from nova.api.openstack import common
@ -40,6 +41,15 @@ class Controller(common.OpenstackController):
metadata = image.get('properties', {})
return metadata
def _check_quota_limit(self, context, metadata):
if metadata is None:
return
num_metadata = len(metadata)
quota_metadata = quota.allowed_metadata_items(context, num_metadata)
if quota_metadata < num_metadata:
expl = _("Image metadata limit exceeded")
raise exc.HTTPBadRequest(explanation=expl)
def index(self, req, image_id):
"""Returns the list of metadata for a given instance"""
context = req.environ['nova.context']
@ -62,6 +72,7 @@ class Controller(common.OpenstackController):
if 'metadata' in body:
for key, value in body['metadata'].iteritems():
metadata[key] = value
self._check_quota_limit(context, metadata)
img['properties'] = metadata
self.image_service.update(context, image_id, img, None)
return dict(metadata=metadata)
@ -78,6 +89,7 @@ class Controller(common.OpenstackController):
img = self.image_service.show(context, image_id)
metadata = self._get_metadata(context, image_id, img)
metadata[id] = body[id]
self._check_quota_limit(context, metadata)
img['properties'] = metadata
self.image_service.update(context, image_id, img, None)

72
nova/api/openstack/ips.py Normal file
View File

@ -0,0 +1,72 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from webob import exc
import nova
import nova.api.openstack.views.addresses
from nova.api.openstack import common
from nova.api.openstack import faults
class Controller(common.OpenstackController):
"""The servers addresses API controller for the Openstack API."""
_serialization_metadata = {
'application/xml': {
'list_collections': {
'public': {'item_name': 'ip', 'item_key': 'addr'},
'private': {'item_name': 'ip', 'item_key': 'addr'},
},
},
}
def __init__(self):
self.compute_api = nova.compute.API()
self.builder = nova.api.openstack.views.addresses.ViewBuilderV10()
def index(self, req, server_id):
try:
instance = self.compute_api.get(req.environ['nova.context'], id)
except nova.exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
return {'addresses': self.builder.build(instance)}
def public(self, req, server_id):
try:
instance = self.compute_api.get(req.environ['nova.context'], id)
except nova.exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
return {'public': self.builder.build_public_parts(instance)}
def private(self, req, server_id):
try:
instance = self.compute_api.get(req.environ['nova.context'], id)
except nova.exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
return {'private': self.builder.build_private_parts(instance)}
def show(self, req, server_id, id):
return faults.Fault(exc.HTTPNotImplemented())
def create(self, req, server_id):
return faults.Fault(exc.HTTPNotImplemented())
def delete(self, req, server_id, id):
return faults.Fault(exc.HTTPNotImplemented())

View File

@ -55,6 +55,13 @@ class Controller(common.OpenstackController):
"imageRef"],
"link": ["rel", "type", "href"],
},
"dict_collections": {
"metadata": {"item_name": "meta", "item_key": "key"},
},
"list_collections": {
"public": {"item_name": "ip", "item_key": "addr"},
"private": {"item_name": "ip", "item_key": "addr"},
},
},
}
@ -63,15 +70,6 @@ class Controller(common.OpenstackController):
self._image_service = utils.import_object(FLAGS.image_service)
super(Controller, self).__init__()
def ips(self, req, id):
try:
instance = self.compute_api.get(req.environ['nova.context'], id)
except exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
builder = self._get_addresses_view_builder(req)
return builder.build(instance)
def index(self, req):
""" Returns a list of server names and ids for a given user """
return self._items(req, is_detail=False)
@ -160,9 +158,11 @@ class Controller(common.OpenstackController):
name = name.strip()
try:
inst_type = \
instance_types.get_instance_type_by_flavor_id(flavor_id)
(inst,) = self.compute_api.create(
context,
instance_types.get_by_flavor_id(flavor_id),
inst_type,
image_id,
kernel_id=kernel_id,
ramdisk_id=ramdisk_id,
@ -175,7 +175,7 @@ class Controller(common.OpenstackController):
except quota.QuotaError as error:
self._handle_quota_error(error)
inst['instance_type'] = flavor_id
inst['instance_type'] = inst_type
inst['image_id'] = requested_image_id
builder = self._get_view_builder(req)
@ -565,7 +565,7 @@ class Controller(common.OpenstackController):
_("Cannot build from image %(image_id)s, status not active") %
locals())
if image_meta['properties']['disk_format'] != 'ami':
if image_meta.get('container_format') != 'ami':
return None, None
try:

View File

@ -28,10 +28,16 @@ class ViewBuilder(object):
class ViewBuilderV10(ViewBuilder):
def build(self, inst):
private_ips = utils.get_from_path(inst, 'fixed_ip/address')
public_ips = utils.get_from_path(inst, 'fixed_ip/floating_ips/address')
private_ips = self.build_private_parts(inst)
public_ips = self.build_public_parts(inst)
return dict(public=public_ips, private=private_ips)
def build_public_parts(self, inst):
return utils.get_from_path(inst, 'fixed_ip/floating_ips/address')
def build_private_parts(self, inst):
return utils.get_from_path(inst, 'fixed_ip/address')
class ViewBuilderV11(ViewBuilder):
def build(self, inst):

View File

@ -34,11 +34,11 @@ class ViewBuilder(object):
def _format_status(self, image):
"""Update the status field to standardize format."""
status_mapping = {
'pending': 'queued',
'decrypting': 'preparing',
'untarring': 'saving',
'available': 'active',
'killed': 'failed',
'pending': 'QUEUED',
'decrypting': 'PREPARING',
'untarring': 'SAVING',
'available': 'ACTIVE',
'killed': 'FAILED',
}
try:

View File

@ -82,7 +82,7 @@ class ViewBuilder(object):
# Return the metadata as a dictionary
metadata = {}
for item in inst.get('metadata', []):
metadata[item['key']] = item['value']
metadata[item['key']] = str(item['value'])
inst_dict['metadata'] = metadata
inst_dict['hostId'] = ''
@ -115,7 +115,7 @@ class ViewBuilderV10(ViewBuilder):
def _build_flavor(self, response, inst):
if 'instance_type' in dict(inst):
response['flavorId'] = inst['instance_type']
response['flavorId'] = inst['instance_type']['flavorid']
class ViewBuilderV11(ViewBuilder):
@ -134,7 +134,7 @@ class ViewBuilderV11(ViewBuilder):
def _build_flavor(self, response, inst):
if "instance_type" in dict(inst):
flavor_id = inst["instance_type"]
flavor_id = inst["instance_type"]['flavorid']
flavor_ref = self.flavor_builder.generate_href(flavor_id)
response["flavorRef"] = flavor_ref

View File

@ -114,8 +114,11 @@ class API(base.Base):
"""Create the number of instances requested if quota and
other arguments check out ok."""
type_data = instance_types.get_instance_type(instance_type)
num_instances = quota.allowed_instances(context, max_count, type_data)
if not instance_type:
instance_type = instance_types.get_default_instance_type()
num_instances = quota.allowed_instances(context, max_count,
instance_type)
if num_instances < min_count:
pid = context.project_id
LOG.warn(_("Quota exceeeded for %(pid)s,"
@ -201,10 +204,10 @@ class API(base.Base):
'user_id': context.user_id,
'project_id': context.project_id,
'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
'instance_type': instance_type,
'memory_mb': type_data['memory_mb'],
'vcpus': type_data['vcpus'],
'local_gb': type_data['local_gb'],
'instance_type_id': instance_type['id'],
'memory_mb': instance_type['memory_mb'],
'vcpus': instance_type['vcpus'],
'local_gb': instance_type['local_gb'],
'display_name': display_name,
'display_description': display_description,
'user_data': user_data or '',
@ -367,11 +370,15 @@ class API(base.Base):
instance_id)
raise
if (instance['state_description'] == 'terminating'):
if instance['state_description'] == 'terminating':
LOG.warning(_("Instance %s is already being terminated"),
instance_id)
return
if instance['state_description'] == 'migrating':
LOG.warning(_("Instance %s is being migrated"), instance_id)
return
self.update(context,
instance['id'],
state_description='terminating',
@ -521,8 +528,7 @@ class API(base.Base):
def resize(self, context, instance_id, flavor_id):
"""Resize a running instance."""
instance = self.db.instance_get(context, instance_id)
current_instance_type = self.db.instance_type_get_by_name(
context, instance['instance_type'])
current_instance_type = instance['instance_type']
new_instance_type = self.db.instance_type_get_by_flavor_id(
context, flavor_id)

View File

@ -59,8 +59,8 @@ def create(name, memory, vcpus, local_gb, flavorid, swap=0,
rxtx_quota=rxtx_quota,
rxtx_cap=rxtx_cap))
except exception.DBError, e:
LOG.exception(_('DB error: %s' % e))
raise exception.ApiError(_("Cannot create instance type: %s" % name))
LOG.exception(_('DB error: %s') % e)
raise exception.ApiError(_("Cannot create instance type: %s") % name)
def destroy(name):
@ -72,8 +72,8 @@ def destroy(name):
try:
db.instance_type_destroy(context.get_admin_context(), name)
except exception.NotFound:
LOG.exception(_('Instance type %s not found for deletion' % name))
raise exception.ApiError(_("Unknown instance type: %s" % name))
LOG.exception(_('Instance type %s not found for deletion') % name)
raise exception.ApiError(_("Unknown instance type: %s") % name)
def purge(name):
@ -85,8 +85,8 @@ def purge(name):
try:
db.instance_type_purge(context.get_admin_context(), name)
except exception.NotFound:
LOG.exception(_('Instance type %s not found for purge' % name))
raise exception.ApiError(_("Unknown instance type: %s" % name))
LOG.exception(_('Instance type %s not found for purge') % name)
raise exception.ApiError(_("Unknown instance type: %s") % name)
def get_all_types(inactive=0):
@ -101,41 +101,43 @@ def get_all_flavors():
return get_all_types(context.get_admin_context())
def get_instance_type(name):
def get_default_instance_type():
name = FLAGS.default_instance_type
try:
return get_instance_type_by_name(name)
except exception.DBError:
raise exception.ApiError(_("Unknown instance type: %s") % name)
def get_instance_type(id):
"""Retrieves single instance type by id"""
if id is None:
return get_default_instance_type()
try:
ctxt = context.get_admin_context()
return db.instance_type_get_by_id(ctxt, id)
except exception.DBError:
raise exception.ApiError(_("Unknown instance type: %s") % name)
def get_instance_type_by_name(name):
"""Retrieves single instance type by name"""
if name is None:
return FLAGS.default_instance_type
return get_default_instance_type()
try:
ctxt = context.get_admin_context()
inst_type = db.instance_type_get_by_name(ctxt, name)
return inst_type
return db.instance_type_get_by_name(ctxt, name)
except exception.DBError:
raise exception.ApiError(_("Unknown instance type: %s" % name))
raise exception.ApiError(_("Unknown instance type: %s") % name)
def get_by_type(instance_type):
"""retrieve instance type name"""
if instance_type is None:
return FLAGS.default_instance_type
try:
ctxt = context.get_admin_context()
inst_type = db.instance_type_get_by_name(ctxt, instance_type)
return inst_type['name']
except exception.DBError, e:
LOG.exception(_('DB error: %s' % e))
raise exception.ApiError(_("Unknown instance type: %s" %\
instance_type))
def get_by_flavor_id(flavor_id):
"""retrieve instance type's name by flavor_id"""
def get_instance_type_by_flavor_id(flavor_id):
"""retrieve instance type by flavor_id"""
if flavor_id is None:
return FLAGS.default_instance_type
return get_default_instance_type()
try:
ctxt = context.get_admin_context()
flavor = db.instance_type_get_by_flavor_id(ctxt, flavor_id)
return flavor['name']
return db.instance_type_get_by_flavor_id(ctxt, flavor_id)
except exception.DBError, e:
LOG.exception(_('DB error: %s' % e))
raise exception.ApiError(_("Unknown flavor: %s" % flavor_id))
LOG.exception(_('DB error: %s') % e)
raise exception.ApiError(_("Unknown flavor: %s") % flavor_id)

View File

@ -1097,12 +1097,8 @@ class ComputeManager(manager.SchedulerDependentManager):
db_instance['id'],
vm_state)
if vm_state == power_state.SHUTOFF:
# TODO(soren): This is what the compute manager does when you
# terminate an instance. At some point I figure we'll have a
# "terminated" state and some sort of cleanup job that runs
# occasionally, cleaning them out.
self.db.instance_destroy(context, db_instance['id'])
# NOTE(justinsb): We no longer auto-remove SHUTOFF instances
# It's quite hard to get them back when we do.
# Are there VMs not in the DB?
for vm_not_found_in_db in vms_not_found_in_db:

View File

@ -1124,6 +1124,11 @@ def instance_type_get_all(context, inactive=False):
return IMPL.instance_type_get_all(context, inactive)
def instance_type_get_by_id(context, id):
"""Get instance type by id"""
return IMPL.instance_type_get_by_id(context, id)
def instance_type_get_by_name(context, name):
"""Get instance type by name"""
return IMPL.instance_type_get_by_name(context, name)

View File

@ -831,6 +831,7 @@ def instance_get(context, instance_id, session=None):
options(joinedload('volumes')).\
options(joinedload_all('fixed_ip.network')).\
options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(id=instance_id).\
filter_by(deleted=can_read_deleted(context)).\
first()
@ -840,6 +841,7 @@ def instance_get(context, instance_id, session=None):
options(joinedload_all('security_groups.rules')).\
options(joinedload('volumes')).\
options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(project_id=context.project_id).\
filter_by(id=instance_id).\
filter_by(deleted=False).\
@ -859,6 +861,7 @@ def instance_get_all(context):
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
options(joinedload('instance_type')).\
filter_by(deleted=can_read_deleted(context)).\
all()
@ -870,6 +873,7 @@ def instance_get_all_by_user(context, user_id):
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
options(joinedload('instance_type')).\
filter_by(deleted=can_read_deleted(context)).\
filter_by(user_id=user_id).\
all()
@ -882,6 +886,7 @@ def instance_get_all_by_host(context, host):
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
options(joinedload('instance_type')).\
filter_by(host=host).\
filter_by(deleted=can_read_deleted(context)).\
all()
@ -896,6 +901,7 @@ def instance_get_all_by_project(context, project_id):
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
options(joinedload('instance_type')).\
filter_by(project_id=project_id).\
filter_by(deleted=can_read_deleted(context)).\
all()
@ -910,6 +916,7 @@ def instance_get_all_by_reservation(context, reservation_id):
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
options(joinedload('instance_type')).\
filter_by(reservation_id=reservation_id).\
filter_by(deleted=can_read_deleted(context)).\
all()
@ -918,6 +925,7 @@ def instance_get_all_by_reservation(context, reservation_id):
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
options(joinedload('instance_type')).\
filter_by(project_id=context.project_id).\
filter_by(reservation_id=reservation_id).\
filter_by(deleted=False).\
@ -930,6 +938,7 @@ def instance_get_project_vpn(context, project_id):
return session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload('instance_type')).\
filter_by(project_id=project_id).\
filter_by(image_id=FLAGS.vpn_image_id).\
filter_by(deleted=can_read_deleted(context)).\
@ -2370,6 +2379,19 @@ def instance_type_get_all(context, inactive=False):
raise exception.NotFound
@require_context
def instance_type_get_by_id(context, id):
"""Returns a dict describing specific instance_type"""
session = get_session()
inst_type = session.query(models.InstanceTypes).\
filter_by(id=id).\
first()
if not inst_type:
raise exception.NotFound(_("No instance type with id %s") % id)
else:
return dict(inst_type)
@require_context
def instance_type_get_by_name(context, name):
"""Returns a dict describing specific instance_type"""

View File

@ -0,0 +1,84 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import *
from sqlalchemy.sql import text
from migrate import *
#from nova import log as logging
meta = MetaData()
c_instance_type = Column('instance_type',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False),
nullable=True)
c_instance_type_id = Column('instance_type_id',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False),
nullable=True)
instance_types = Table('instance_types', meta,
Column('id', Integer(), primary_key=True, nullable=False),
Column('name',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
unique=True))
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True,
autoload_with=migrate_engine)
instances.create_column(c_instance_type_id)
recs = migrate_engine.execute(instance_types.select())
for row in recs:
type_id = row[0]
type_name = row[1]
migrate_engine.execute(instances.update()\
.where(instances.c.instance_type == type_name)\
.values(instance_type_id=type_id))
instances.c.instance_type.drop()
def downgrade(migrate_engine):
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True,
autoload_with=migrate_engine)
instances.create_column(c_instance_type)
recs = migrate_engine.execute(instance_types.select())
for row in recs:
type_id = row[0]
type_name = row[1]
migrate_engine.execute(instances.update()\
.where(instances.c.instance_type_id == type_id)\
.values(instance_type=type_name))
instances.c.instance_type_id.drop()

View File

@ -209,7 +209,7 @@ class Instance(BASE, NovaBase):
hostname = Column(String(255))
host = Column(String(255)) # , ForeignKey('hosts.id'))
instance_type = Column(String(255))
instance_type_id = Column(String(255))
user_data = Column(Text)
@ -268,6 +268,12 @@ class InstanceTypes(BASE, NovaBase):
rxtx_quota = Column(Integer, nullable=False, default=0)
rxtx_cap = Column(Integer, nullable=False, default=0)
instances = relationship(Instance,
backref=backref('instance_type', uselist=False),
foreign_keys=id,
primaryjoin='and_(Instance.instance_type_id == '
'InstanceTypes.id)')
class Volume(BASE, NovaBase):
"""Represents a block storage device that can be attached to a vm."""

View File

@ -44,10 +44,10 @@ class FakeImageService(service.BaseImageService):
'created_at': timestamp,
'updated_at': timestamp,
'status': 'active',
'type': 'machine',
'container_format': 'ami',
'disk_format': 'raw',
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel,
'disk_format': 'ami'}
'ramdisk_id': FLAGS.null_kernel}
}
self.create(None, image)
super(FakeImageService, self).__init__()

View File

@ -151,6 +151,8 @@ class GlanceImageService(service.BaseImageService):
:raises NotFound if the image does not exist.
"""
# NOTE(vish): show is to check if image is available
self.show(context, image_id)
try:
image_meta = self.client.update_image(image_id, image_meta, data)
except glance_exception.NotFound:
@ -165,6 +167,8 @@ class GlanceImageService(service.BaseImageService):
:raises NotFound if the image does not exist.
"""
# NOTE(vish): show is to check if image is available
self.show(context, image_id)
try:
result = self.client.delete_image(image_id)
except glance_exception.NotFound:
@ -186,33 +190,6 @@ class GlanceImageService(service.BaseImageService):
image_meta = _convert_timestamps_to_datetimes(image_meta)
return image_meta
@staticmethod
def _is_image_available(context, image_meta):
"""
Images are always available if they are public or if the user is an
admin.
Otherwise, we filter by project_id (if present) and then fall-back to
images owned by user.
"""
# FIXME(sirp): We should be filtering by user_id on the Glance side
# for security; however, we can't do that until we get authn/authz
# sorted out. Until then, filtering in Nova.
if image_meta['is_public'] or context.is_admin:
return True
properties = image_meta['properties']
if context.project_id and ('project_id' in properties):
return str(properties['project_id']) == str(project_id)
try:
user_id = properties['user_id']
except KeyError:
return False
return str(user_id) == str(context.user_id)
# utility functions
def _convert_timestamps_to_datetimes(image_meta):

View File

@ -84,7 +84,10 @@ class LocalImageService(service.BaseImageService):
def show(self, context, image_id):
try:
with open(self._path_to(image_id)) as metadata_file:
return json.load(metadata_file)
image_meta = json.load(metadata_file)
if not self._is_image_available(context, image_meta):
raise exception.NotFound
return image_meta
except (IOError, ValueError):
raise exception.NotFound
@ -119,10 +122,15 @@ class LocalImageService(service.BaseImageService):
image_path = self._path_to(image_id, None)
if not os.path.exists(image_path):
os.mkdir(image_path)
return self.update(context, image_id, metadata, data)
return self._store(context, image_id, metadata, data)
def update(self, context, image_id, metadata, data=None):
"""Replace the contents of the given image with the new data."""
# NOTE(vish): show is to check if image is available
self.show(context, image_id)
return self._store(context, image_id, metadata, data)
def _store(self, context, image_id, metadata, data=None):
metadata['id'] = image_id
try:
if data:
@ -140,9 +148,11 @@ class LocalImageService(service.BaseImageService):
def delete(self, context, image_id):
"""Delete the given image.
Raises OSError if the image does not exist.
Raises NotFound if the image does not exist.
"""
# NOTE(vish): show is to check if image is available
self.show(context, image_id)
try:
shutil.rmtree(self._path_to(image_id, None))
except (IOError, ValueError):

View File

@ -46,6 +46,7 @@ flags.DEFINE_string('image_decryption_dir', '/tmp',
class S3ImageService(service.BaseImageService):
"""Wraps an existing image service to support s3 based register"""
def __init__(self, service=None, *args, **kwargs):
if service == None:
service = utils.import_object(FLAGS.image_service)
@ -58,52 +59,23 @@ class S3ImageService(service.BaseImageService):
return image
def delete(self, context, image_id):
# FIXME(vish): call to show is to check filter
self.show(context, image_id)
self.service.delete(context, image_id)
def update(self, context, image_id, metadata, data=None):
# FIXME(vish): call to show is to check filter
self.show(context, image_id)
image = self.service.update(context, image_id, metadata, data)
return image
def index(self, context):
images = self.service.index(context)
# FIXME(vish): index doesn't filter so we do it manually
return self._filter(context, images)
return self.service.index(context)
def detail(self, context):
images = self.service.detail(context)
# FIXME(vish): detail doesn't filter so we do it manually
return self._filter(context, images)
@classmethod
def _is_visible(cls, context, image):
return (context.is_admin
or context.project_id == image['properties']['owner_id']
or image['properties']['is_public'] == 'True')
@classmethod
def _filter(cls, context, images):
filtered = []
for image in images:
if not cls._is_visible(context, image):
continue
filtered.append(image)
return filtered
return self.service.detail(context)
def show(self, context, image_id):
image = self.service.show(context, image_id)
if not self._is_visible(context, image):
raise exception.NotFound
return image
return self.service.show(context, image_id)
def show_by_name(self, context, name):
image = self.service.show_by_name(context, name)
if not self._is_visible(context, image):
raise exception.NotFound
return image
return self.service.show(context, name)
@staticmethod
def _conn(context):
@ -167,7 +139,7 @@ class S3ImageService(service.BaseImageService):
arch = 'x86_64'
properties = metadata['properties']
properties['owner_id'] = context.project_id
properties['project_id'] = context.project_id
properties['architecture'] = arch
if kernel_id:
@ -176,8 +148,6 @@ class S3ImageService(service.BaseImageService):
if ramdisk_id:
properties['ramdisk_id'] = ec2utils.ec2_id_to_id(ramdisk_id)
properties['is_public'] = False
properties['type'] = image_type
metadata.update({'disk_format': image_format,
'container_format': image_format,
'status': 'queued',

View File

@ -136,6 +136,33 @@ class BaseImageService(object):
"""
raise NotImplementedError
@staticmethod
def _is_image_available(context, image_meta):
"""
Images are always available if they are public or if the user is an
admin.
Otherwise, we filter by project_id (if present) and then fall-back to
images owned by user.
"""
# FIXME(sirp): We should be filtering by user_id on the Glance side
# for security; however, we can't do that until we get authn/authz
# sorted out. Until then, filtering in Nova.
if image_meta['is_public'] or context.is_admin:
return True
properties = image_meta['properties']
if context.project_id and ('project_id' in properties):
return str(properties['project_id']) == str(context.project_id)
try:
user_id = properties['user_id']
except KeyError:
return False
return str(user_id) == str(context.user_id)
@classmethod
def _translate_to_base(cls, metadata):
"""Return a metadata dictionary that is BaseImageService compliant.

View File

@ -391,6 +391,12 @@ def unbind_floating_ip(floating_ip):
'dev', FLAGS.public_interface)
def ensure_metadata_ip():
"""Sets up local metadata ip"""
_execute('sudo', 'ip', 'addr', 'add', '169.254.169.254/32',
'scope', 'link', 'dev', 'lo', check_exit_code=False)
def ensure_vlan_forward(public_ip, port, private_ip):
"""Sets up forwarding rules for vlan"""
iptables_manager.ipv4['filter'].add_rule("FORWARD",
@ -442,6 +448,7 @@ def ensure_vlan(vlan_num):
return interface
@utils.synchronized('ensure_bridge', external=True)
def ensure_bridge(bridge, interface, net_attrs=None):
"""Create a bridge unless it already exists.
@ -495,6 +502,8 @@ def ensure_bridge(bridge, interface, net_attrs=None):
fields = line.split()
if fields and fields[0] == "0.0.0.0" and fields[-1] == interface:
gateway = fields[1]
_execute('sudo', 'route', 'del', 'default', 'gw', gateway,
'dev', interface, check_exit_code=False)
out, err = _execute('sudo', 'ip', 'addr', 'show', 'dev', interface,
'scope', 'global')
for line in out.split("\n"):
@ -504,7 +513,7 @@ def ensure_bridge(bridge, interface, net_attrs=None):
_execute(*_ip_bridge_cmd('del', params, fields[-1]))
_execute(*_ip_bridge_cmd('add', params, bridge))
if gateway:
_execute('sudo', 'route', 'add', '0.0.0.0', 'gw', gateway)
_execute('sudo', 'route', 'add', 'default', 'gw', gateway)
out, err = _execute('sudo', 'brctl', 'addif', bridge, interface,
check_exit_code=False)

View File

@ -126,6 +126,7 @@ class NetworkManager(manager.SchedulerDependentManager):
standalone service.
"""
self.driver.init_host()
self.driver.ensure_metadata_ip()
# Set up networking for the projects for which we're already
# the designated network host.
ctxt = context.get_admin_context()

View File

@ -53,13 +53,13 @@ class APITest(test.TestCase):
#api.application = succeed
api = self._wsgi_app(succeed)
resp = Request.blank('/').get_response(api)
self.assertFalse('computeFault' in resp.body, resp.body)
self.assertFalse('cloudServersFault' in resp.body, resp.body)
self.assertEqual(resp.status_int, 200, resp.body)
#api.application = raise_webob_exc
api = self._wsgi_app(raise_webob_exc)
resp = Request.blank('/').get_response(api)
self.assertFalse('computeFault' in resp.body, resp.body)
self.assertFalse('cloudServersFault' in resp.body, resp.body)
self.assertEqual(resp.status_int, 404, resp.body)
#api.application = raise_api_fault
@ -71,11 +71,11 @@ class APITest(test.TestCase):
#api.application = fail
api = self._wsgi_app(fail)
resp = Request.blank('/').get_response(api)
self.assertTrue('{"computeFault' in resp.body, resp.body)
self.assertTrue('{"cloudServersFault' in resp.body, resp.body)
self.assertEqual(resp.status_int, 500, resp.body)
#api.application = fail
api = self._wsgi_app(fail)
resp = Request.blank('/.xml').get_response(api)
self.assertTrue('<computeFault' in resp.body, resp.body)
self.assertTrue('<cloudServersFault' in resp.body, resp.body)
self.assertEqual(resp.status_int, 500, resp.body)

View File

@ -45,7 +45,6 @@ class ImageMetaDataTest(unittest.TestCase):
'is_public': True,
'deleted_at': None,
'properties': {
'type': 'ramdisk',
'key1': 'value1',
'key2': 'value2'
},
@ -62,11 +61,23 @@ class ImageMetaDataTest(unittest.TestCase):
'is_public': True,
'deleted_at': None,
'properties': {
'type': 'ramdisk',
'key1': 'value1',
'key2': 'value2'
},
'size': 5882349},
{'status': 'active',
'name': 'image3',
'deleted': False,
'container_format': None,
'created_at': '2011-03-22T17:40:15',
'disk_format': None,
'updated_at': '2011-03-22T17:40:15',
'id': '3',
'location': 'file:///var/lib/glance/images/2',
'is_public': True,
'deleted_at': None,
'properties': {},
'size': 5882349},
]
def setUp(self):
@ -77,6 +88,10 @@ class ImageMetaDataTest(unittest.TestCase):
fakes.FakeAuthManager.auth_data = {}
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_auth(self.stubs)
# NOTE(dprince) max out properties/metadata in image 3 for testing
img3 = self.IMAGE_FIXTURES[2]
for num in range(FLAGS.quota_metadata_items):
img3['properties']['key%i' % num] = "blah"
fakes.stub_out_glance(self.stubs, self.IMAGE_FIXTURES)
def tearDown(self):
@ -164,3 +179,25 @@ class ImageMetaDataTest(unittest.TestCase):
req.method = 'DELETE'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(404, res.status_int)
def test_too_many_metadata_items_on_create(self):
data = {"metadata": {}}
for num in range(FLAGS.quota_metadata_items + 1):
data['metadata']['key%i' % num] = "blah"
json_string = str(data).replace("\'", "\"")
req = webob.Request.blank('/v1.1/images/2/meta')
req.environ['api.version'] = '1.1'
req.method = 'POST'
req.body = json_string
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
def test_too_many_metadata_items_on_put(self):
req = webob.Request.blank('/v1.1/images/3/meta/blah')
req.environ['api.version'] = '1.1'
req.method = 'PUT'
req.body = '{"blah": "blah"}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)

View File

@ -32,6 +32,7 @@ from nova import test
import nova.api.openstack
from nova.api.openstack import servers
import nova.compute.api
from nova.compute import instance_types
import nova.db.api
from nova.db.sqlalchemy.models import Instance
from nova.db.sqlalchemy.models import InstanceMetadata
@ -71,13 +72,19 @@ def instance_address(context, instance_id):
return None
def stub_instance(id, user_id=1, private_address=None, public_addresses=None):
def stub_instance(id, user_id=1, private_address=None, public_addresses=None,
host=None):
metadata = []
metadata.append(InstanceMetadata(key='seq', value=id))
inst_type = instance_types.get_instance_type_by_flavor_id(1)
if public_addresses == None:
public_addresses = list()
if host != None:
host = str(host)
instance = {
"id": id,
"admin_pass": "",
@ -95,8 +102,8 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None):
"vcpus": 0,
"local_gb": 0,
"hostname": "",
"host": None,
"instance_type": "1",
"host": host,
"instance_type": dict(inst_type),
"user_data": "",
"reservation_id": "",
"mac_address": "",
@ -192,6 +199,26 @@ class ServersTest(test.TestCase):
print res_dict['server']
self.assertEqual(res_dict['server']['links'], expected_links)
def test_get_server_by_id_with_addresses_xml(self):
private = "192.168.0.3"
public = ["1.2.3.4"]
new_return_server = return_server_with_addresses(private, public)
self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
req = webob.Request.blank('/v1.0/servers/1')
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
dom = minidom.parseString(res.body)
server = dom.childNodes[0]
self.assertEquals(server.nodeName, 'server')
self.assertEquals(server.getAttribute('id'), '1')
self.assertEquals(server.getAttribute('name'), 'server1')
(public,) = server.getElementsByTagName('public')
(ip,) = public.getElementsByTagName('ip')
self.assertEquals(ip.getAttribute('addr'), '1.2.3.4')
(private,) = server.getElementsByTagName('private')
(ip,) = private.getElementsByTagName('ip')
self.assertEquals(ip.getAttribute('addr'), '192.168.0.3')
def test_get_server_by_id_with_addresses(self):
private = "192.168.0.3"
public = ["1.2.3.4"]
@ -208,6 +235,84 @@ class ServersTest(test.TestCase):
self.assertEqual(len(addresses["private"]), 1)
self.assertEqual(addresses["private"][0], private)
def test_get_server_addresses_V10(self):
private = '192.168.0.3'
public = ['1.2.3.4']
new_return_server = return_server_with_addresses(private, public)
self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
req = webob.Request.blank('/v1.0/servers/1/ips')
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res_dict, {
'addresses': {'public': public, 'private': [private]}})
def test_get_server_addresses_xml_V10(self):
private_expected = "192.168.0.3"
public_expected = ["1.2.3.4"]
new_return_server = return_server_with_addresses(private_expected,
public_expected)
self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
req = webob.Request.blank('/v1.0/servers/1/ips')
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
dom = minidom.parseString(res.body)
(addresses,) = dom.childNodes
self.assertEquals(addresses.nodeName, 'addresses')
(public,) = addresses.getElementsByTagName('public')
(ip,) = public.getElementsByTagName('ip')
self.assertEquals(ip.getAttribute('addr'), public_expected[0])
(private,) = addresses.getElementsByTagName('private')
(ip,) = private.getElementsByTagName('ip')
self.assertEquals(ip.getAttribute('addr'), private_expected)
def test_get_server_addresses_public_V10(self):
private = "192.168.0.3"
public = ["1.2.3.4"]
new_return_server = return_server_with_addresses(private, public)
self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
req = webob.Request.blank('/v1.0/servers/1/ips/public')
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res_dict, {'public': public})
def test_get_server_addresses_private_V10(self):
private = "192.168.0.3"
public = ["1.2.3.4"]
new_return_server = return_server_with_addresses(private, public)
self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
req = webob.Request.blank('/v1.0/servers/1/ips/private')
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res_dict, {'private': [private]})
def test_get_server_addresses_public_xml_V10(self):
private = "192.168.0.3"
public = ["1.2.3.4"]
new_return_server = return_server_with_addresses(private, public)
self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
req = webob.Request.blank('/v1.0/servers/1/ips/public')
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
dom = minidom.parseString(res.body)
(public_node,) = dom.childNodes
self.assertEquals(public_node.nodeName, 'public')
(ip,) = public_node.getElementsByTagName('ip')
self.assertEquals(ip.getAttribute('addr'), public[0])
def test_get_server_addresses_private_xml_V10(self):
private = "192.168.0.3"
public = ["1.2.3.4"]
new_return_server = return_server_with_addresses(private, public)
self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
req = webob.Request.blank('/v1.0/servers/1/ips/private')
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
dom = minidom.parseString(res.body)
(private_node,) = dom.childNodes
self.assertEquals(private_node.nodeName, 'private')
(ip,) = private_node.getElementsByTagName('ip')
self.assertEquals(ip.getAttribute('addr'), private)
def test_get_server_by_id_with_addresses_v11(self):
private = "192.168.0.3"
public = ["1.2.3.4"]
@ -618,6 +723,22 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 404)
def test_get_all_server_details_xml_v1_0(self):
req = webob.Request.blank('/v1.0/servers/detail')
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
print res.body
dom = minidom.parseString(res.body)
for i, server in enumerate(dom.getElementsByTagName('server')):
self.assertEqual(server.getAttribute('id'), str(i))
self.assertEqual(server.getAttribute('hostId'), '')
self.assertEqual(server.getAttribute('name'), 'server%d' % i)
self.assertEqual(server.getAttribute('imageId'), '10')
self.assertEqual(server.getAttribute('status'), 'BUILD')
(meta,) = server.getElementsByTagName('meta')
self.assertEqual(meta.getAttribute('key'), 'seq')
self.assertEqual(meta.firstChild.data.strip(), str(i))
def test_get_all_server_details_v1_0(self):
req = webob.Request.blank('/v1.0/servers/detail')
res = req.get_response(fakes.wsgi_app())
@ -628,9 +749,9 @@ class ServersTest(test.TestCase):
self.assertEqual(s['hostId'], '')
self.assertEqual(s['name'], 'server%d' % i)
self.assertEqual(s['imageId'], '10')
self.assertEqual(s['flavorId'], '1')
self.assertEqual(s['flavorId'], 1)
self.assertEqual(s['status'], 'BUILD')
self.assertEqual(s['metadata']['seq'], i)
self.assertEqual(s['metadata']['seq'], str(i))
def test_get_all_server_details_v1_1(self):
req = webob.Request.blank('/v1.1/servers/detail')
@ -644,7 +765,7 @@ class ServersTest(test.TestCase):
self.assertEqual(s['imageRef'], 'http://localhost/v1.1/images/10')
self.assertEqual(s['flavorRef'], 'http://localhost/v1.1/flavors/1')
self.assertEqual(s['status'], 'BUILD')
self.assertEqual(s['metadata']['seq'], i)
self.assertEqual(s['metadata']['seq'], str(i))
def test_get_all_server_details_with_host(self):
'''
@ -654,12 +775,8 @@ class ServersTest(test.TestCase):
instances - 2 on one host and 3 on another.
'''
def stub_instance(id, user_id=1):
return Instance(id=id, state=0, image_id=10, user_id=user_id,
display_name='server%s' % id, host='host%s' % (id % 2))
def return_servers_with_host(context, user_id=1):
return [stub_instance(i) for i in xrange(5)]
return [stub_instance(i, 1, None, None, i % 2) for i in xrange(5)]
self.stubs.Set(nova.db.api, 'instance_get_all_by_user',
return_servers_with_host)
@ -677,7 +794,8 @@ class ServersTest(test.TestCase):
self.assertEqual(s['id'], i)
self.assertEqual(s['hostId'], host_ids[i % 2])
self.assertEqual(s['name'], 'server%d' % i)
self.assertEqual(s['imageId'], 10)
self.assertEqual(s['imageId'], '10')
self.assertEqual(s['flavorId'], 1)
def test_server_pause(self):
FLAGS.allow_admin_api = True
@ -1525,29 +1643,27 @@ class TestGetKernelRamdiskFromImage(test.TestCase):
def test_not_ami(self):
"""Anything other than ami should return no kernel and no ramdisk"""
image_meta = {'id': 1, 'status': 'active',
'properties': {'disk_format': 'vhd'}}
image_meta = {'id': 1, 'status': 'active', 'container_format': 'vhd'}
kernel_id, ramdisk_id = self._get_k_r(image_meta)
self.assertEqual(kernel_id, None)
self.assertEqual(ramdisk_id, None)
def test_ami_no_kernel(self):
"""If an ami is missing a kernel it should raise NotFound"""
image_meta = {'id': 1, 'status': 'active',
'properties': {'disk_format': 'ami', 'ramdisk_id': 1}}
image_meta = {'id': 1, 'status': 'active', 'container_format': 'ami',
'properties': {'ramdisk_id': 1}}
self.assertRaises(exception.NotFound, self._get_k_r, image_meta)
def test_ami_no_ramdisk(self):
"""If an ami is missing a ramdisk it should raise NotFound"""
image_meta = {'id': 1, 'status': 'active',
'properties': {'disk_format': 'ami', 'kernel_id': 1}}
image_meta = {'id': 1, 'status': 'active', 'container_format': 'ami',
'properties': {'kernel_id': 1}}
self.assertRaises(exception.NotFound, self._get_k_r, image_meta)
def test_ami_kernel_ramdisk_present(self):
"""Return IDs if both kernel and ramdisk are present"""
image_meta = {'id': 1, 'status': 'active',
'properties': {'disk_format': 'ami', 'kernel_id': 1,
'ramdisk_id': 2}}
image_meta = {'id': 1, 'status': 'active', 'container_format': 'ami',
'properties': {'kernel_id': 1, 'ramdisk_id': 2}}
kernel_id, ramdisk_id = self._get_k_r(image_meta)
self.assertEqual(kernel_id, 1)
self.assertEqual(ramdisk_id, 2)

View File

@ -28,29 +28,34 @@ def stub_out_db_instance_api(stubs, injected=True):
"""Stubs out the db API for creating Instances."""
INSTANCE_TYPES = {
'm1.tiny': dict(memory_mb=512,
'm1.tiny': dict(id=2,
memory_mb=512,
vcpus=1,
local_gb=0,
flavorid=1,
rxtx_cap=1),
'm1.small': dict(memory_mb=2048,
'm1.small': dict(id=5,
memory_mb=2048,
vcpus=1,
local_gb=20,
flavorid=2,
rxtx_cap=2),
'm1.medium':
dict(memory_mb=4096,
dict(id=1,
memory_mb=4096,
vcpus=2,
local_gb=40,
flavorid=3,
rxtx_cap=3),
'm1.large': dict(memory_mb=8192,
'm1.large': dict(id=3,
memory_mb=8192,
vcpus=4,
local_gb=80,
flavorid=4,
rxtx_cap=4),
'm1.xlarge':
dict(memory_mb=16384,
dict(id=4,
memory_mb=16384,
vcpus=8,
local_gb=160,
flavorid=5,
@ -107,6 +112,12 @@ def stub_out_db_instance_api(stubs, injected=True):
def fake_instance_type_get_by_name(context, name):
return INSTANCE_TYPES[name]
def fake_instance_type_get_by_id(context, id):
for name, inst_type in INSTANCE_TYPES.iteritems():
if str(inst_type['id']) == str(id):
return inst_type
return None
def fake_network_get_by_instance(context, instance_id):
# Even instance numbers are on vlan networks
if instance_id % 2 == 0:
@ -136,6 +147,7 @@ def stub_out_db_instance_api(stubs, injected=True):
fake_network_get_all_by_instance)
stubs.Set(db, 'instance_type_get_all', fake_instance_type_get_all)
stubs.Set(db, 'instance_type_get_by_name', fake_instance_type_get_by_name)
stubs.Set(db, 'instance_type_get_by_id', fake_instance_type_get_by_id)
stubs.Set(db, 'instance_get_fixed_address',
fake_instance_get_fixed_address)
stubs.Set(db, 'instance_get_fixed_address_v6',

View File

@ -209,17 +209,17 @@ class TestMutatorDateTimeTests(BaseGlanceTest):
self.assertDateTimesEmpty(image_meta)
def test_update_handles_datetimes(self):
self.client.images = {'image1': self._make_datetime_fixture()}
self.client.update_response = self._make_datetime_fixture()
dummy_id = 'dummy_id'
dummy_meta = {}
image_meta = self.service.update(self.context, 'dummy_id', dummy_meta)
image_meta = self.service.update(self.context, 'image1', dummy_meta)
self.assertDateTimesFilled(image_meta)
def test_update_handles_none_datetimes(self):
self.client.images = {'image1': self._make_datetime_fixture()}
self.client.update_response = self._make_none_datetime_fixture()
dummy_id = 'dummy_id'
dummy_meta = {}
image_meta = self.service.update(self.context, 'dummy_id', dummy_meta)
image_meta = self.service.update(self.context, 'image1', dummy_meta)
self.assertDateTimesEmpty(image_meta)
def _make_datetime_fixture(self):

View File

@ -247,6 +247,37 @@ class CloudTestCase(test.TestCase):
self.assertRaises(NotFound, describe_images,
self.context, ['ami-fake'])
def test_describe_image_attribute(self):
describe_image_attribute = self.cloud.describe_image_attribute
def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
'type': 'machine'}, 'is_public': True}
self.stubs.Set(local.LocalImageService, 'show', fake_show)
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
result = describe_image_attribute(self.context, 'ami-00000001',
'launchPermission')
self.assertEqual([{'group': 'all'}], result['launchPermission'])
def test_modify_image_attribute(self):
modify_image_attribute = self.cloud.modify_image_attribute
def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
'type': 'machine'}, 'is_public': False}
def fake_update(meh, context, image_id, metadata, data=None):
return metadata
self.stubs.Set(local.LocalImageService, 'show', fake_show)
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
self.stubs.Set(local.LocalImageService, 'update', fake_update)
result = modify_image_attribute(self.context, 'ami-00000001',
'launchPermission', 'add',
user_group=['all'])
self.assertEqual(True, result['is_public'])
def test_console_output(self):
instance_type = FLAGS.default_instance_type
max_count = 1

View File

@ -84,7 +84,8 @@ class ComputeTestCase(test.TestCase):
inst['launch_time'] = '10'
inst['user_id'] = self.user.id
inst['project_id'] = self.project.id
inst['instance_type'] = 'm1.tiny'
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
inst['instance_type_id'] = type_id
inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = 0
inst.update(params)
@ -132,7 +133,7 @@ class ComputeTestCase(test.TestCase):
cases = [dict(), dict(display_name=None)]
for instance in cases:
ref = self.compute_api.create(self.context,
FLAGS.default_instance_type, None, **instance)
instance_types.get_default_instance_type(), None, **instance)
try:
self.assertNotEqual(ref[0]['display_name'], None)
finally:
@ -143,7 +144,7 @@ class ComputeTestCase(test.TestCase):
group = self._create_group()
ref = self.compute_api.create(
self.context,
instance_type=FLAGS.default_instance_type,
instance_type=instance_types.get_default_instance_type(),
image_id=None,
security_group=['testgroup'])
try:
@ -161,7 +162,7 @@ class ComputeTestCase(test.TestCase):
ref = self.compute_api.create(
self.context,
instance_type=FLAGS.default_instance_type,
instance_type=instance_types.get_default_instance_type(),
image_id=None,
security_group=['testgroup'])
try:
@ -177,7 +178,7 @@ class ComputeTestCase(test.TestCase):
ref = self.compute_api.create(
self.context,
instance_type=FLAGS.default_instance_type,
instance_type=instance_types.get_default_instance_type(),
image_id=None,
security_group=['testgroup'])
@ -359,8 +360,9 @@ class ComputeTestCase(test.TestCase):
instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id)
inst_type = instance_types.get_instance_type_by_name('m1.xlarge')
db.instance_update(self.context, instance_id,
{'instance_type': 'm1.xlarge'})
{'instance_type_id': inst_type['id']})
self.assertRaises(exception.ApiError, self.compute_api.resize,
context, instance_id, 1)
@ -380,8 +382,8 @@ class ComputeTestCase(test.TestCase):
self.compute.terminate_instance(context, instance_id)
def test_get_by_flavor_id(self):
type = instance_types.get_by_flavor_id(1)
self.assertEqual(type, 'm1.tiny')
type = instance_types.get_instance_type_by_flavor_id(1)
self.assertEqual(type['name'], 'm1.tiny')
def test_resize_same_source_fails(self):
"""Ensure instance fails to migrate when source and destination are
@ -664,4 +666,5 @@ class ComputeTestCase(test.TestCase):
instances = db.instance_get_all(context.get_admin_context())
LOG.info(_("After force-killing instances: %s"), instances)
self.assertEqual(len(instances), 0)
self.assertEqual(len(instances), 1)
self.assertEqual(power_state.SHUTOFF, instances[0]['state'])

View File

@ -62,7 +62,7 @@ class ConsoleTestCase(test.TestCase):
inst['launch_time'] = '10'
inst['user_id'] = self.user.id
inst['project_id'] = self.project.id
inst['instance_type'] = 'm1.tiny'
inst['instance_type_id'] = 1
inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = 0
return db.instance_create(self.context, inst)['id']

View File

@ -40,7 +40,11 @@ class InstanceTypeTestCase(test.TestCase):
max_flavorid = session.query(models.InstanceTypes).\
order_by("flavorid desc").\
first()
max_id = session.query(models.InstanceTypes).\
order_by("id desc").\
first()
self.flavorid = max_flavorid["flavorid"] + 1
self.id = max_id["id"] + 1
self.name = str(int(time.time()))
def test_instance_type_create_then_delete(self):
@ -53,7 +57,7 @@ class InstanceTypeTestCase(test.TestCase):
'instance type was not created')
instance_types.destroy(self.name)
self.assertEqual(1,
instance_types.get_instance_type(self.name)["deleted"])
instance_types.get_instance_type(self.id)["deleted"])
self.assertEqual(starting_inst_list, instance_types.get_all_types())
instance_types.purge(self.name)
self.assertEqual(len(starting_inst_list),

View File

@ -67,7 +67,7 @@ class QuotaTestCase(test.TestCase):
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user.id
inst['project_id'] = self.project.id
inst['instance_type'] = 'm1.large'
inst['instance_type_id'] = '3' # m1.large
inst['vcpus'] = cores
inst['mac_address'] = utils.generate_mac()
return db.instance_create(self.context, inst)['id']
@ -124,11 +124,12 @@ class QuotaTestCase(test.TestCase):
for i in range(FLAGS.quota_instances):
instance_id = self._create_instance()
instance_ids.append(instance_id)
inst_type = instance_types.get_instance_type_by_name('m1.small')
self.assertRaises(quota.QuotaError, compute.API().create,
self.context,
min_count=1,
max_count=1,
instance_type='m1.small',
instance_type=inst_type,
image_id=1)
for instance_id in instance_ids:
db.instance_destroy(self.context, instance_id)
@ -137,11 +138,12 @@ class QuotaTestCase(test.TestCase):
instance_ids = []
instance_id = self._create_instance(cores=4)
instance_ids.append(instance_id)
inst_type = instance_types.get_instance_type_by_name('m1.small')
self.assertRaises(quota.QuotaError, compute.API().create,
self.context,
min_count=1,
max_count=1,
instance_type='m1.small',
instance_type=inst_type,
image_id=1)
for instance_id in instance_ids:
db.instance_destroy(self.context, instance_id)
@ -192,11 +194,12 @@ class QuotaTestCase(test.TestCase):
metadata = {}
for i in range(FLAGS.quota_metadata_items + 1):
metadata['key%s' % i] = 'value%s' % i
inst_type = instance_types.get_instance_type_by_name('m1.small')
self.assertRaises(quota.QuotaError, compute.API().create,
self.context,
min_count=1,
max_count=1,
instance_type='m1.small',
instance_type=inst_type,
image_id='fake',
metadata=metadata)
@ -207,13 +210,15 @@ class QuotaTestCase(test.TestCase):
def _create_with_injected_files(self, files):
api = compute.API(image_service=self.StubImageService())
inst_type = instance_types.get_instance_type_by_name('m1.small')
api.create(self.context, min_count=1, max_count=1,
instance_type='m1.small', image_id='fake',
instance_type=inst_type, image_id='fake',
injected_files=files)
def test_no_injected_files(self):
api = compute.API(image_service=self.StubImageService())
api.create(self.context, instance_type='m1.small', image_id='fake')
inst_type = instance_types.get_instance_type_by_name('m1.small')
api.create(self.context, instance_type=inst_type, image_id='fake')
def test_max_injected_files(self):
files = []

View File

@ -263,7 +263,7 @@ class SimpleDriverTestCase(test.TestCase):
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user.id
inst['project_id'] = self.project.id
inst['instance_type'] = 'm1.tiny'
inst['instance_type_id'] = '1'
inst['mac_address'] = utils.generate_mac()
inst['vcpus'] = kwargs.get('vcpus', 1)
inst['ami_launch_index'] = 0

View File

@ -140,7 +140,7 @@ class LibvirtConnTestCase(test.TestCase):
'vcpus': 2,
'project_id': 'fake',
'bridge': 'br101',
'instance_type': 'm1.small'}
'instance_type_id': '5'} # m1.small
def lazy_load_library_exists(self):
"""check if libvirt is available."""
@ -479,7 +479,7 @@ class LibvirtConnTestCase(test.TestCase):
fake_timer = FakeTime()
self.create_fake_libvirt_mock(nwfilterLookupByName=fake_raise)
self.create_fake_libvirt_mock()
instance_ref = db.instance_create(self.context, self.test_instance)
# Start test
@ -488,6 +488,7 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_conn.LibvirtConnection(False)
conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
conn.firewall_driver.setattr('instance_filter_exists', fake_none)
conn.ensure_filtering_rules_for_instance(instance_ref,
time=fake_timer)
except exception.Error, e:

View File

@ -106,7 +106,7 @@ class VolumeTestCase(test.TestCase):
inst['launch_time'] = '10'
inst['user_id'] = 'fake'
inst['project_id'] = 'fake'
inst['instance_type'] = 'm1.tiny'
inst['instance_type_id'] = '2' # m1.tiny
inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = 0
instance_id = db.instance_create(self.context, inst)['id']

View File

@ -80,7 +80,7 @@ class XenAPIVolumeTestCase(test.TestCase):
'image_id': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'instance_type': 'm1.large',
'instance_type_id': '3', # m1.large
'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': 'linux'}
@ -289,11 +289,11 @@ class XenAPIVMTestCase(test.TestCase):
'enabled':'1'}],
'ip6s': [{'ip': 'fe80::a8bb:ccff:fedd:eeff',
'netmask': '120',
'enabled': '1',
'gateway': 'fe80::a00:1'}],
'enabled': '1'}],
'mac': 'aa:bb:cc:dd:ee:ff',
'dns': ['10.0.0.2'],
'gateway': '10.0.0.1'})
'gateway': '10.0.0.1',
'gateway6': 'fe80::a00:1'})
def check_vm_params_for_windows(self):
self.assertEquals(self.vm['platform']['nx'], 'true')
@ -328,7 +328,7 @@ class XenAPIVMTestCase(test.TestCase):
self.assertEquals(self.vm['HVM_boot_policy'], '')
def _test_spawn(self, image_id, kernel_id, ramdisk_id,
instance_type="m1.large", os_type="linux",
instance_type_id="3", os_type="linux",
instance_id=1, check_injection=False):
stubs.stubout_loopingcall_start(self.stubs)
values = {'id': instance_id,
@ -337,7 +337,7 @@ class XenAPIVMTestCase(test.TestCase):
'image_id': image_id,
'kernel_id': kernel_id,
'ramdisk_id': ramdisk_id,
'instance_type': instance_type,
'instance_type_id': instance_type_id,
'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': os_type}
instance = db.instance_create(self.context, values)
@ -349,7 +349,7 @@ class XenAPIVMTestCase(test.TestCase):
FLAGS.xenapi_image_service = 'glance'
self.assertRaises(Exception,
self._test_spawn,
1, 2, 3, "m1.xlarge")
1, 2, 3, "4") # m1.xlarge
def test_spawn_raw_objectstore(self):
FLAGS.xenapi_image_service = 'objectstore'
@ -523,7 +523,7 @@ class XenAPIVMTestCase(test.TestCase):
'image_id': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'instance_type': 'm1.large',
'instance_type_id': '3', # m1.large
'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': 'linux'}
instance = db.instance_create(self.context, values)
@ -580,7 +580,7 @@ class XenAPIMigrateInstance(test.TestCase):
'kernel_id': None,
'ramdisk_id': None,
'local_gb': 5,
'instance_type': 'm1.large',
'instance_type_id': '3', # m1.large
'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': 'linux'}

View File

@ -117,6 +117,8 @@ flags.DEFINE_integer('live_migration_bandwidth', 0,
'Define live migration behavior')
flags.DEFINE_string('qemu_img', 'qemu-img',
'binary to use for qemu-img commands')
flags.DEFINE_bool('start_guests_on_host_boot', False,
'Whether to restart guests when the host reboots')
def get_connection(read_only):
@ -169,34 +171,34 @@ def _get_network_info(instance):
instance['id'])
network_info = []
def ip_dict(ip):
return {
"ip": ip.address,
"netmask": network["netmask"],
"enabled": "1"}
def ip6_dict(ip6):
prefix = ip6.network.cidr_v6
mac = instance.mac_address
return {
"ip": utils.to_global_ipv6(prefix, mac),
"netmask": ip6.network.netmask_v6,
"gateway": ip6.network.gateway_v6,
"enabled": "1"}
for network in networks:
network_ips = [ip for ip in ip_addresses
if ip.network_id == network.id]
if ip['network_id'] == network['id']]
def ip_dict(ip):
return {
'ip': ip['address'],
'netmask': network['netmask'],
'enabled': '1'}
def ip6_dict():
prefix = network['cidr_v6']
mac = instance['mac_address']
return {
'ip': utils.to_global_ipv6(prefix, mac),
'netmask': network['netmask_v6'],
'enabled': '1'}
mapping = {
'label': network['label'],
'gateway': network['gateway'],
'mac': instance.mac_address,
'mac': instance['mac_address'],
'dns': [network['dns']],
'ips': [ip_dict(ip) for ip in network_ips]}
if FLAGS.use_ipv6:
mapping['ip6s'] = [ip6_dict(ip) for ip in network_ips]
mapping['ip6s'] = [ip6_dict()]
mapping['gateway6'] = network['gateway_v6']
network_info.append((network, mapping))
return network_info
@ -231,12 +233,8 @@ class LibvirtConnection(driver.ComputeDriver):
{'name': instance['name'], 'state': state})
db.instance_set_state(ctxt, instance['id'], state)
if state == power_state.SHUTOFF:
# TODO(soren): This is what the compute manager does when you
# terminate # an instance. At some point I figure we'll have a
# "terminated" state and some sort of cleanup job that runs
# occasionally, cleaning them out.
db.instance_destroy(ctxt, instance['id'])
# NOTE(justinsb): We no longer delete SHUTOFF instances,
# the user may want to power them back on
if state != power_state.RUNNING:
continue
@ -424,7 +422,6 @@ class LibvirtConnection(driver.ComputeDriver):
'container_format': base['container_format'],
'is_public': False,
'properties': {'architecture': base['architecture'],
'type': base['type'],
'name': '%s.%s' % (base['name'], image_id),
'kernel_id': instance['kernel_id'],
'image_location': 'snapshot',
@ -475,7 +472,7 @@ class LibvirtConnection(driver.ComputeDriver):
xml = self.to_xml(instance)
self.firewall_driver.setup_basic_filtering(instance)
self.firewall_driver.prepare_instance_filter(instance)
self._conn.createXML(xml, 0)
self._create_new_domain(xml)
self.firewall_driver.apply_instance_filter(instance)
timer = utils.LoopingCall(f=None)
@ -523,7 +520,7 @@ class LibvirtConnection(driver.ComputeDriver):
'kernel_id': FLAGS.rescue_kernel_id,
'ramdisk_id': FLAGS.rescue_ramdisk_id}
self._create_image(instance, xml, '.rescue', rescue_images)
self._conn.createXML(xml, 0)
self._create_new_domain(xml)
timer = utils.LoopingCall(f=None)
@ -566,10 +563,15 @@ class LibvirtConnection(driver.ComputeDriver):
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
self._create_image(instance, xml, network_info)
self._conn.createXML(xml, 0)
domain = self._create_new_domain(xml)
LOG.debug(_("instance %s: is running"), instance['name'])
self.firewall_driver.apply_instance_filter(instance)
if FLAGS.start_guests_on_host_boot:
LOG.debug(_("instance %s: setting autostart ON") %
instance['name'])
domain.setAutostart(1)
timer = utils.LoopingCall(f=None)
def _wait_for_boot():
@ -797,7 +799,10 @@ class LibvirtConnection(driver.ComputeDriver):
root_fname = '%08x' % int(disk_images['image_id'])
size = FLAGS.minimum_root_size
if inst['instance_type'] == 'm1.tiny' or suffix == '.rescue':
inst_type_id = inst['instance_type_id']
inst_type = instance_types.get_instance_type(inst_type_id)
if inst_type['name'] == 'm1.tiny' or suffix == '.rescue':
size = None
root_fname += "_sm"
@ -809,14 +814,13 @@ class LibvirtConnection(driver.ComputeDriver):
user=user,
project=project,
size=size)
type_data = instance_types.get_instance_type(inst['instance_type'])
if type_data['local_gb']:
if inst_type['local_gb']:
self._cache_image(fn=self._create_local,
target=basepath('disk.local'),
fname="local_%s" % type_data['local_gb'],
fname="local_%s" % inst_type['local_gb'],
cow=FLAGS.use_cow_images,
local_gb=type_data['local_gb'])
local_gb=inst_type['local_gb'])
# For now, we assume that if we're not using a kernel, we're using a
# partitioned disk image where the target partition is the first
@ -950,8 +954,8 @@ class LibvirtConnection(driver.ComputeDriver):
nics.append(self._get_nic_for_xml(network,
mapping))
# FIXME(vish): stick this in db
instance_type_name = instance['instance_type']
instance_type = instance_types.get_instance_type(instance_type_name)
inst_type_id = instance['instance_type_id']
inst_type = instance_types.get_instance_type(inst_type_id)
if FLAGS.use_cow_images:
driver_type = 'qcow2'
@ -962,10 +966,10 @@ class LibvirtConnection(driver.ComputeDriver):
'name': instance['name'],
'basepath': os.path.join(FLAGS.instances_path,
instance['name']),
'memory_kb': instance_type['memory_mb'] * 1024,
'vcpus': instance_type['vcpus'],
'memory_kb': inst_type['memory_mb'] * 1024,
'vcpus': inst_type['vcpus'],
'rescue': rescue,
'local': instance_type['local_gb'],
'local': inst_type['local_gb'],
'driver_type': driver_type,
'nics': nics}
@ -987,11 +991,22 @@ class LibvirtConnection(driver.ComputeDriver):
return xml
def get_info(self, instance_name):
# NOTE(justinsb): When libvirt isn't running / can't connect, we get:
# libvir: Remote error : unable to connect to
# '/var/run/libvirt/libvirt-sock', libvirtd may need to be started:
# No such file or directory
try:
virt_dom = self._conn.lookupByName(instance_name)
except:
raise exception.NotFound(_("Instance %s not found")
% instance_name)
except libvirt.libvirtError as e:
errcode = e.get_error_code()
if errcode == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.NotFound(_("Instance %s not found")
% instance_name)
LOG.warning(_("Error from libvirt during lookup. "
"Code=%(errcode)s Error=%(e)s") %
locals())
raise
(state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
return {'state': state,
'max_mem': max_mem,
@ -999,6 +1014,24 @@ class LibvirtConnection(driver.ComputeDriver):
'num_cpu': num_cpu,
'cpu_time': cpu_time}
def _create_new_domain(self, xml, persistent=True, launch_flags=0):
# NOTE(justinsb): libvirt has two types of domain:
# * a transient domain disappears when the guest is shutdown
# or the host is rebooted.
# * a permanent domain is not automatically deleted
# NOTE(justinsb): Even for ephemeral instances, transient seems risky
if persistent:
# To create a persistent domain, first define it, then launch it.
domain = self._conn.defineXML(xml)
domain.createWithFlags(launch_flags)
else:
# createXML call creates a transient domain
domain = self._conn.createXML(xml, launch_flags)
return domain
def get_diagnostics(self, instance_name):
raise exception.ApiError(_("diagnostics are not supported "
"for libvirt"))
@ -1402,18 +1435,13 @@ class LibvirtConnection(driver.ComputeDriver):
# wait for completion
timeout_count = range(FLAGS.live_migration_retry_count)
while timeout_count:
try:
filter_name = 'nova-instance-%s' % instance_ref.name
self._conn.nwfilterLookupByName(filter_name)
if self.firewall_driver.instance_filter_exists(instance_ref):
break
except libvirt.libvirtError:
timeout_count.pop()
if len(timeout_count) == 0:
ec2_id = instance_ref['hostname']
iname = instance_ref.name
msg = _('Timeout migrating for %(ec2_id)s(%(iname)s)')
raise exception.Error(msg % locals())
time.sleep(1)
timeout_count.pop()
if len(timeout_count) == 0:
msg = _('Timeout migrating for %s. nwfilter not found.')
raise exception.Error(msg % instance_ref.name)
time.sleep(1)
def live_migration(self, ctxt, instance_ref, dest,
post_method, recover_method):
@ -1542,6 +1570,10 @@ class FirewallDriver(object):
"""
raise NotImplementedError()
def instance_filter_exists(self, instance):
"""Check nova-instance-instance-xxx exists"""
raise NotImplementedError()
class NWFilterFirewall(FirewallDriver):
"""
@ -1849,6 +1881,21 @@ class NWFilterFirewall(FirewallDriver):
return 'nova-instance-%s' % (instance['name'])
return 'nova-instance-%s-%s' % (instance['name'], nic_id)
def instance_filter_exists(self, instance):
"""Check nova-instance-instance-xxx exists"""
network_info = _get_network_info(instance)
for (network, mapping) in network_info:
nic_id = mapping['mac'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
try:
self._conn.nwfilterLookupByName(instance_filter_name)
except libvirt.libvirtError:
name = instance.name
LOG.debug(_('The nwfilter(%(instance_filter_name)s) for'
'%(name)s is not found.') % locals())
return False
return True
class IptablesFirewallDriver(FirewallDriver):
def __init__(self, execute=None, **kwargs):
@ -2038,6 +2085,10 @@ class IptablesFirewallDriver(FirewallDriver):
return ipv4_rules, ipv6_rules
def instance_filter_exists(self, instance):
"""Check nova-instance-instance-xxx exists"""
return self.nwfilter.instance_filter_exists(instance)
def refresh_security_group_members(self, security_group):
pass

View File

@ -101,8 +101,8 @@ class VMHelper(HelperBase):
3. Using hardware virtualization
"""
instance_type = instance_types.\
get_instance_type(instance.instance_type)
inst_type_id = instance.instance_type_id
instance_type = instance_types.get_instance_type(inst_type_id)
mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
vcpus = str(instance_type['vcpus'])
rec = {
@ -169,8 +169,8 @@ class VMHelper(HelperBase):
@classmethod
def ensure_free_mem(cls, session, instance):
instance_type = instance_types.get_instance_type(
instance.instance_type)
inst_type_id = instance.instance_type_id
instance_type = instance_types.get_instance_type(inst_type_id)
mem = long(instance_type['memory_mb']) * 1024 * 1024
#get free memory from host
host = session.get_xenapi_host()
@ -1130,7 +1130,7 @@ def _prepare_injectables(inst, networks_info):
'dns': dns,
'address_v6': ip_v6 and ip_v6['ip'] or '',
'netmask_v6': ip_v6 and ip_v6['netmask'] or '',
'gateway_v6': ip_v6 and ip_v6['gateway'] or '',
'gateway_v6': ip_v6 and info['gateway6'] or '',
'use_ipv6': FLAGS.use_ipv6}
interfaces_info.append(interface_info)

View File

@ -176,7 +176,7 @@ class VMOps(object):
vdi_ref, network_info)
self.create_vifs(vm_ref, network_info)
self.inject_network_info(instance, vm_ref, network_info)
self.inject_network_info(instance, network_info, vm_ref)
return vm_ref
def _spawn(self, instance, vm_ref):
@ -802,8 +802,10 @@ class VMOps(object):
instance['id'])
networks = db.network_get_all_by_instance(admin_context,
instance['id'])
flavor = db.instance_type_get_by_name(admin_context,
instance['instance_type'])
inst_type = db.instance_type_get_by_id(admin_context,
instance['instance_type_id'])
network_info = []
for network in networks:
network_IPs = [ip for ip in IPs if ip.network_id == network.id]
@ -814,12 +816,11 @@ class VMOps(object):
"netmask": network["netmask"],
"enabled": "1"}
def ip6_dict(ip6):
def ip6_dict():
return {
"ip": utils.to_global_ipv6(network['cidr_v6'],
instance['mac_address']),
"netmask": network['netmask_v6'],
"gateway": network['gateway_v6'],
"enabled": "1"}
info = {
@ -827,23 +828,41 @@ class VMOps(object):
'gateway': network['gateway'],
'broadcast': network['broadcast'],
'mac': instance.mac_address,
'rxtx_cap': flavor['rxtx_cap'],
'rxtx_cap': inst_type['rxtx_cap'],
'dns': [network['dns']],
'ips': [ip_dict(ip) for ip in network_IPs]}
if network['cidr_v6']:
info['ip6s'] = [ip6_dict(ip) for ip in network_IPs]
info['ip6s'] = [ip6_dict()]
if network['gateway_v6']:
info['gateway6'] = network['gateway_v6']
network_info.append((network, info))
return network_info
def inject_network_info(self, instance, vm_ref, network_info):
#TODO{tr3buchet) remove this shim with nova-multi-nic
def inject_network_info(self, instance, network_info=None, vm_ref=None):
"""
shim in place which makes inject_network_info work without being
passed network_info.
shim goes away after nova-multi-nic
"""
if not network_info:
network_info = self._get_network_info(instance)
self._inject_network_info(instance, network_info, vm_ref)
def _inject_network_info(self, instance, network_info, vm_ref=None):
"""
Generate the network info and make calls to place it into the
xenstore and the xenstore param list.
vm_ref can be passed in because it will sometimes be different than
what VMHelper.lookup(session, instance.name) will find (ex: rescue)
"""
logging.debug(_("injecting network info to xs for vm: |%s|"), vm_ref)
# this function raises if vm_ref is not a vm_opaque_ref
self._session.get_xenapi().VM.get_record(vm_ref)
if vm_ref:
# this function raises if vm_ref is not a vm_opaque_ref
self._session.get_xenapi().VM.get_record(vm_ref)
else:
vm_ref = VMHelper.lookup(self._session, instance.name)
for (network, info) in network_info:
location = 'vm-data/networking/%s' % info['mac'].replace(':', '')
@ -875,8 +894,10 @@ class VMOps(object):
VMHelper.create_vif(self._session, vm_ref, network_ref,
mac_address, device, rxtx_cap)
def reset_network(self, instance, vm_ref):
def reset_network(self, instance, vm_ref=None):
"""Creates uuid arg to pass to make_agent_call and calls it."""
if not vm_ref:
vm_ref = VMHelper.lookup(self._session, instance.name)
args = {'id': str(uuid.uuid4())}
# TODO(tr3buchet): fix function call after refactor
#resp = self._make_agent_call('resetnetwork', instance, '', args)

View File

@ -63,6 +63,7 @@ import xmlrpclib
from eventlet import event
from eventlet import tpool
from eventlet import timeout
from nova import context
from nova import db
@ -140,6 +141,9 @@ flags.DEFINE_bool('xenapi_remap_vbd_dev', False,
flags.DEFINE_string('xenapi_remap_vbd_dev_prefix', 'sd',
'Specify prefix to remap VBD dev to '
'(ex. /dev/xvdb -> /dev/sdb)')
flags.DEFINE_integer('xenapi_login_timeout',
10,
'Timeout in seconds for XenAPI login.')
def get_connection(_):
@ -318,7 +322,10 @@ class XenAPISession(object):
def __init__(self, url, user, pw):
self.XenAPI = self.get_imported_xenapi()
self._session = self._create_session(url)
self._session.login_with_password(user, pw)
exception = self.XenAPI.Failure(_("Unable to log in to XenAPI "
"(is the Dom0 disk full?)"))
with timeout.Timeout(FLAGS.xenapi_login_timeout, exception):
self._session.login_with_password(user, pw)
self.loop = None
def get_imported_xenapi(self):

View File

@ -502,6 +502,14 @@ class Serializer(object):
result.setAttribute('xmlns', xmlns)
if type(data) is list:
collections = metadata.get('list_collections', {})
if nodename in collections:
metadata = collections[nodename]
for item in data:
node = doc.createElement(metadata['item_name'])
node.setAttribute(metadata['item_key'], str(item))
result.appendChild(node)
return result
singular = metadata.get('plurals', {}).get(nodename, None)
if singular is None:
if nodename.endswith('s'):
@ -512,6 +520,16 @@ class Serializer(object):
node = self._to_xml_node(doc, metadata, singular, item)
result.appendChild(node)
elif type(data) is dict:
collections = metadata.get('dict_collections', {})
if nodename in collections:
metadata = collections[nodename]
for k, v in data.items():
node = doc.createElement(metadata['item_name'])
node.setAttribute(metadata['item_key'], str(k))
text = doc.createTextNode(str(v))
node.appendChild(text)
result.appendChild(node)
return result
attrs = metadata.get('attributes', {}).get(nodename, {})
for k, v in data.items():
if k in attrs:

View File

@ -56,16 +56,17 @@ def read_record(self, arg_dict):
and boolean True, attempting to read a non-existent path will return
the string 'None' instead of raising an exception.
"""
cmd = "xenstore-read /local/domain/%(dom_id)s/%(path)s" % arg_dict
cmd = ["xenstore-read", "/local/domain/%(dom_id)s/%(path)s" % arg_dict]
try:
return _run_command(cmd).rstrip("\n")
ret, result = _run_command(cmd)
return result.rstrip("\n")
except pluginlib.PluginError, e:
if arg_dict.get("ignore_missing_path", False):
cmd = "xenstore-exists /local/domain/%(dom_id)s/%(path)s; echo $?"
cmd = cmd % arg_dict
ret = _run_command(cmd).strip()
cmd = ["xenstore-exists",
"/local/domain/%(dom_id)s/%(path)s" % arg_dict]
ret, result = _run_command(cmd).strip()
# If the path exists, the cmd should return "0"
if ret != "0":
if ret != 0:
# No such path, so ignore the error and return the
# string 'None', since None can't be marshalled
# over RPC.
@ -83,8 +84,9 @@ def write_record(self, arg_dict):
you must specify a 'value' key, whose value must be a string. Typically,
you can json-ify more complex values and store the json output.
"""
cmd = "xenstore-write /local/domain/%(dom_id)s/%(path)s '%(value)s'"
cmd = cmd % arg_dict
cmd = ["xenstore-write",
"/local/domain/%(dom_id)s/%(path)s" % arg_dict,
arg_dict["value"]]
_run_command(cmd)
return arg_dict["value"]
@ -96,10 +98,10 @@ def list_records(self, arg_dict):
path as the key and the stored value as the value. If the path
doesn't exist, an empty dict is returned.
"""
cmd = "xenstore-ls /local/domain/%(dom_id)s/%(path)s" % arg_dict
cmd = cmd.rstrip("/")
dirpath = "/local/domain/%(dom_id)s/%(path)s" % arg_dict
cmd = ["xenstore-ls", dirpath.rstrip("/")]
try:
recs = _run_command(cmd)
ret, recs = _run_command(cmd)
except pluginlib.PluginError, e:
if "No such file or directory" in "%s" % e:
# Path doesn't exist.
@ -128,8 +130,9 @@ def delete_record(self, arg_dict):
"""Just like it sounds: it removes the record for the specified
VM and the specified path from xenstore.
"""
cmd = "xenstore-rm /local/domain/%(dom_id)s/%(path)s" % arg_dict
return _run_command(cmd)
cmd = ["xenstore-rm", "/local/domain/%(dom_id)s/%(path)s" % arg_dict]
ret, result = _run_command(cmd)
return result
def _paths_from_ls(recs):
@ -171,9 +174,9 @@ def _run_command(cmd):
Otherwise, the output from stdout is returned.
"""
pipe = subprocess.PIPE
proc = subprocess.Popen([cmd], shell=True, stdin=pipe, stdout=pipe,
stderr=pipe, close_fds=True)
proc.wait()
proc = subprocess.Popen(cmd, stdin=pipe, stdout=pipe, stderr=pipe,
close_fds=True)
ret = proc.wait()
err = proc.stderr.read()
if err:
raise pluginlib.PluginError(err)

3888
po/ast.po

File diff suppressed because it is too large Load Diff

3915
po/cs.po

File diff suppressed because it is too large Load Diff

3892
po/da.po

File diff suppressed because it is too large Load Diff

3978
po/de.po

File diff suppressed because it is too large Load Diff

4732
po/es.po

File diff suppressed because it is too large Load Diff

4077
po/it.po

File diff suppressed because it is too large Load Diff

4662
po/ja.po

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

4055
po/ru.po

File diff suppressed because it is too large Load Diff

3935
po/uk.po

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

24
tools/eventlet-patch Normal file
View File

@ -0,0 +1,24 @@
# HG changeset patch
# User Soren Hansen <soren@linux2go.dk>
# Date 1297678255 -3600
# Node ID 4c846d555010bb5a91ab4da78dfe596451313742
# Parent 5b7e9946c79f005c028eb63207cf5eb7bb21d1c3
Don't attempt to wrap GreenPipes in GreenPipe
If the os module is monkeypatched, Python's standard subprocess module
will return greenio.GreenPipe instances for Popen objects' stdin, stdout,
and stderr attributes. However, eventlet.green.subprocess tries to wrap
these attributes in another greenio.GreenPipe, which GreenPipe refuses.
diff -r 5b7e9946c79f -r 4c846d555010 eventlet/green/subprocess.py
--- a/eventlet/green/subprocess.py Sat Feb 05 13:05:05 2011 -0800
+++ b/eventlet/green/subprocess.py Mon Feb 14 11:10:55 2011 +0100
@@ -27,7 +27,7 @@
# eventlet.processes.Process.run() method.
for attr in "stdin", "stdout", "stderr":
pipe = getattr(self, attr)
- if pipe is not None:
+ if pipe is not None and not type(pipe) == greenio.GreenPipe:
wrapped_pipe = greenio.GreenPipe(pipe, pipe.mode, bufsize)
setattr(self, attr, wrapped_pipe)
__init__.__doc__ = subprocess_orig.Popen.__init__.__doc__

View File

@ -103,6 +103,12 @@ def install_dependencies(venv=VENV):
pthfile = os.path.join(venv, "lib", "python2.6", "site-packages", "nova.pth")
f = open(pthfile, 'w')
f.write("%s\n" % ROOT)
# Patch eventlet (see FAQ # 1485)
patchsrc = os.path.join(ROOT, 'tools', 'eventlet-patch')
patchfile = os.path.join(venv, "lib", "python2.6", "site-packages", "eventlet",
"green", "subprocess.py")
patch_cmd = "patch %s %s" % (patchfile, patchsrc)
os.system(patch_cmd)
def print_help():