Merge with trunk

This commit is contained in:
Johannes Erdfelt 2011-06-20 20:55:16 +00:00
commit cb059a76b9
57 changed files with 1634 additions and 545 deletions

View File

@ -97,7 +97,6 @@ flags.DECLARE('vlan_start', 'nova.network.manager')
flags.DECLARE('vpn_start', 'nova.network.manager')
flags.DECLARE('fixed_range_v6', 'nova.network.manager')
flags.DECLARE('gateway_v6', 'nova.network.manager')
flags.DECLARE('images_path', 'nova.image.local')
flags.DECLARE('libvirt_type', 'nova.virt.libvirt.connection')
flags.DEFINE_flag(flags.HelpFlag())
flags.DEFINE_flag(flags.HelpshortFlag())
@ -1056,16 +1055,6 @@ class ImageCommands(object):
machine_images = {}
other_images = {}
directory = os.path.abspath(directory)
# NOTE(vish): If we're importing from the images path dir, attempt
# to move the files out of the way before importing
# so we aren't writing to the same directory. This
# may fail if the dir was a mointpoint.
if (FLAGS.image_service == 'nova.image.local.LocalImageService'
and directory == os.path.abspath(FLAGS.images_path)):
new_dir = "%s_bak" % directory
os.rename(directory, new_dir)
os.mkdir(directory)
directory = new_dir
for fn in glob.glob("%s/*/info.json" % directory):
try:
image_path = os.path.join(fn.rpartition('/')[0], 'image')

View File

@ -21,22 +21,15 @@ APIRequest class
"""
import datetime
import re
# TODO(termie): replace minidom with etree
from xml.dom import minidom
from nova import log as logging
from nova.api.ec2 import ec2utils
LOG = logging.getLogger("nova.api.request")
_c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
def _camelcase_to_underscore(str):
return _c2u.sub(r'_\1', str).lower().strip('_')
def _underscore_to_camelcase(str):
return ''.join([x[:1].upper() + x[1:] for x in str.split('_')])
@ -51,59 +44,6 @@ def _database_to_isoformat(datetimeobj):
return datetimeobj.strftime("%Y-%m-%dT%H:%M:%SZ")
def _try_convert(value):
"""Return a non-string from a string or unicode, if possible.
============= =====================================================
When value is returns
============= =====================================================
zero-length ''
'None' None
'True' True
'False' False
'0', '-0' 0
0xN, -0xN int from hex (postitive) (N is any number)
0bN, -0bN int from binary (positive) (N is any number)
* try conversion to int, float, complex, fallback value
"""
if len(value) == 0:
return ''
if value == 'None':
return None
if value == 'True':
return True
if value == 'False':
return False
valueneg = value[1:] if value[0] == '-' else value
if valueneg == '0':
return 0
if valueneg == '':
return value
if valueneg[0] == '0':
if valueneg[1] in 'xX':
return int(value, 16)
elif valueneg[1] in 'bB':
return int(value, 2)
else:
try:
return int(value, 8)
except ValueError:
pass
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
try:
return complex(value)
except ValueError:
return value
class APIRequest(object):
def __init__(self, controller, action, version, args):
self.controller = controller
@ -114,7 +54,7 @@ class APIRequest(object):
def invoke(self, context):
try:
method = getattr(self.controller,
_camelcase_to_underscore(self.action))
ec2utils.camelcase_to_underscore(self.action))
except AttributeError:
controller = self.controller
action = self.action
@ -125,19 +65,7 @@ class APIRequest(object):
# and reraise as 400 error.
raise Exception(_error)
args = {}
for key, value in self.args.items():
parts = key.split(".")
key = _camelcase_to_underscore(parts[0])
if isinstance(value, str) or isinstance(value, unicode):
# NOTE(vish): Automatically convert strings back
# into their respective values
value = _try_convert(value)
if len(parts) > 1:
d = args.get(key, {})
d[parts[1]] = value
value = d
args[key] = value
args = ec2utils.dict_from_dotted_str(self.args.items())
for key in args.keys():
# NOTE(vish): Turn numeric dict keys into lists

View File

@ -909,6 +909,25 @@ class CloudController(object):
if kwargs.get('ramdisk_id'):
ramdisk = self._get_image(context, kwargs['ramdisk_id'])
kwargs['ramdisk_id'] = ramdisk['id']
for bdm in kwargs.get('block_device_mapping', []):
# NOTE(yamahata)
# BlockDevicedMapping.<N>.DeviceName
# BlockDevicedMapping.<N>.Ebs.SnapshotId
# BlockDevicedMapping.<N>.Ebs.VolumeSize
# BlockDevicedMapping.<N>.Ebs.DeleteOnTermination
# BlockDevicedMapping.<N>.VirtualName
# => remove .Ebs and allow volume id in SnapshotId
ebs = bdm.pop('ebs', None)
if ebs:
ec2_id = ebs.pop('snapshot_id')
id = ec2utils.ec2_id_to_id(ec2_id)
if ec2_id.startswith('snap-'):
bdm['snapshot_id'] = id
elif ec2_id.startswith('vol-'):
bdm['volume_id'] = id
ebs.setdefault('delete_on_termination', True)
bdm.update(ebs)
image = self._get_image(context, kwargs['image_id'])
if image:
@ -933,37 +952,54 @@ class CloudController(object):
user_data=kwargs.get('user_data'),
security_group=kwargs.get('security_group'),
availability_zone=kwargs.get('placement', {}).get(
'AvailabilityZone'))
'AvailabilityZone'),
block_device_mapping=kwargs.get('block_device_mapping', {}))
return self._format_run_instances(context,
instances[0]['reservation_id'])
def _do_instance(self, action, context, ec2_id):
instance_id = ec2utils.ec2_id_to_id(ec2_id)
action(context, instance_id=instance_id)
def _do_instances(self, action, context, instance_id):
for ec2_id in instance_id:
self._do_instance(action, context, ec2_id)
def terminate_instances(self, context, instance_id, **kwargs):
"""Terminate each instance in instance_id, which is a list of ec2 ids.
instance_id is a kwarg so its name cannot be modified."""
LOG.debug(_("Going to start terminating instances"))
for ec2_id in instance_id:
instance_id = ec2utils.ec2_id_to_id(ec2_id)
self.compute_api.delete(context, instance_id=instance_id)
self._do_instances(self.compute_api.delete, context, instance_id)
return True
def reboot_instances(self, context, instance_id, **kwargs):
"""instance_id is a list of instance ids"""
LOG.audit(_("Reboot instance %r"), instance_id, context=context)
for ec2_id in instance_id:
instance_id = ec2utils.ec2_id_to_id(ec2_id)
self.compute_api.reboot(context, instance_id=instance_id)
self._do_instances(self.compute_api.reboot, context, instance_id)
return True
def stop_instances(self, context, instance_id, **kwargs):
"""Stop each instances in instance_id.
Here instance_id is a list of instance ids"""
LOG.debug(_("Going to stop instances"))
self._do_instances(self.compute_api.stop, context, instance_id)
return True
def start_instances(self, context, instance_id, **kwargs):
"""Start each instances in instance_id.
Here instance_id is a list of instance ids"""
LOG.debug(_("Going to start instances"))
self._do_instances(self.compute_api.start, context, instance_id)
return True
def rescue_instance(self, context, instance_id, **kwargs):
"""This is an extension to the normal ec2_api"""
instance_id = ec2utils.ec2_id_to_id(instance_id)
self.compute_api.rescue(context, instance_id=instance_id)
self._do_instance(self.compute_api.rescue, contect, instnace_id)
return True
def unrescue_instance(self, context, instance_id, **kwargs):
"""This is an extension to the normal ec2_api"""
instance_id = ec2utils.ec2_id_to_id(instance_id)
self.compute_api.unrescue(context, instance_id=instance_id)
self._do_instance(self.compute_api.unrescue, context, instance_id)
return True
def update_instance(self, context, instance_id, **kwargs):
@ -974,7 +1010,8 @@ class CloudController(object):
changes[field] = kwargs[field]
if changes:
instance_id = ec2utils.ec2_id_to_id(instance_id)
self.compute_api.update(context, instance_id=instance_id, **kwargs)
self.compute_api.update(context, instance_id=instance_id,
**changes)
return True
@staticmethod

View File

@ -16,6 +16,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import re
from nova import exception
@ -30,3 +32,95 @@ def ec2_id_to_id(ec2_id):
def id_to_ec2_id(instance_id, template='i-%08x'):
"""Convert an instance ID (int) to an ec2 ID (i-[base 16 number])"""
return template % instance_id
_c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
def camelcase_to_underscore(str):
return _c2u.sub(r'_\1', str).lower().strip('_')
def _try_convert(value):
"""Return a non-string from a string or unicode, if possible.
============= =====================================================
When value is returns
============= =====================================================
zero-length ''
'None' None
'True' True case insensitive
'False' False case insensitive
'0', '-0' 0
0xN, -0xN int from hex (postitive) (N is any number)
0bN, -0bN int from binary (positive) (N is any number)
* try conversion to int, float, complex, fallback value
"""
if len(value) == 0:
return ''
if value == 'None':
return None
lowered_value = value.lower()
if lowered_value == 'true':
return True
if lowered_value == 'false':
return False
valueneg = value[1:] if value[0] == '-' else value
if valueneg == '0':
return 0
if valueneg == '':
return value
if valueneg[0] == '0':
if valueneg[1] in 'xX':
return int(value, 16)
elif valueneg[1] in 'bB':
return int(value, 2)
else:
try:
return int(value, 8)
except ValueError:
pass
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
try:
return complex(value)
except ValueError:
return value
def dict_from_dotted_str(items):
"""parse multi dot-separated argument into dict.
EBS boot uses multi dot-separeted arguments like
BlockDeviceMapping.1.DeviceName=snap-id
Convert the above into
{'block_device_mapping': {'1': {'device_name': snap-id}}}
"""
args = {}
for key, value in items:
parts = key.split(".")
key = camelcase_to_underscore(parts[0])
if isinstance(value, str) or isinstance(value, unicode):
# NOTE(vish): Automatically convert strings back
# into their respective values
value = _try_convert(value)
if len(parts) > 1:
d = args.get(key, {})
args[key] = d
for k in parts[1:-1]:
k = camelcase_to_underscore(k)
v = d.get(k, {})
d[k] = v
d = v
d[camelcase_to_underscore(parts[-1])] = value
else:
args[key] = value
return args

View File

@ -113,8 +113,7 @@ class APIRouter(base_wsgi.Router):
collection={'detail': 'GET',
'info': 'GET',
'select': 'POST',
'boot': 'POST'
})
'boot': 'POST'})
mapper.resource("console", "consoles",
controller=consoles.create_resource(),

View File

@ -301,7 +301,7 @@ class Volumes(extensions.ExtensionDescriptor):
return "Volumes"
def get_alias(self):
return "VOLUMES"
return "os-volumes"
def get_description(self):
return "Volumes support"
@ -317,12 +317,12 @@ class Volumes(extensions.ExtensionDescriptor):
# NOTE(justinsb): No way to provide singular name ('volume')
# Does this matter?
res = extensions.ResourceExtension('volumes',
res = extensions.ResourceExtension('os-volumes',
VolumeController(),
collection_actions={'detail': 'GET'})
resources.append(res)
res = extensions.ResourceExtension('volume_attachments',
res = extensions.ResourceExtension('os-volume_attachments',
VolumeAttachmentController(),
parent=dict(
member_name='server',

View File

@ -94,7 +94,7 @@ class CreateInstanceHelper(object):
except Exception, e:
msg = _("Cannot find requested image %(image_href)s: %(e)s" %
locals())
raise faults.Fault(exc.HTTPBadRequest(msg))
raise faults.Fault(exc.HTTPBadRequest(explanation=msg))
personality = body['server'].get('personality')
@ -106,7 +106,7 @@ class CreateInstanceHelper(object):
if not 'name' in body['server']:
msg = _("Server name is not defined")
raise exc.HTTPBadRequest(msg)
raise exc.HTTPBadRequest(explanation=msg)
zone_blob = body['server'].get('blob')
name = body['server']['name']
@ -121,8 +121,7 @@ class CreateInstanceHelper(object):
extra_values = {
'instance_type': inst_type,
'image_ref': image_href,
'password': password
}
'password': password}
return (extra_values,
create_method(context,
@ -138,14 +137,12 @@ class CreateInstanceHelper(object):
injected_files=injected_files,
admin_password=password,
zone_blob=zone_blob,
reservation_id=reservation_id
)
)
reservation_id=reservation_id))
except quota.QuotaError as error:
self._handle_quota_error(error)
except exception.ImageNotFound as error:
msg = _("Can not find requested image")
raise faults.Fault(exc.HTTPBadRequest(msg))
raise faults.Fault(exc.HTTPBadRequest(explanation=msg))
# Let the caller deal with unhandled exceptions.
@ -180,11 +177,11 @@ class CreateInstanceHelper(object):
def _validate_server_name(self, value):
if not isinstance(value, basestring):
msg = _("Server name is not a string or unicode")
raise exc.HTTPBadRequest(msg)
raise exc.HTTPBadRequest(explanation=msg)
if value.strip() == '':
msg = _("Server name is an empty string")
raise exc.HTTPBadRequest(msg)
raise exc.HTTPBadRequest(explanation=msg)
def _get_kernel_ramdisk_from_image(self, req, image_id):
"""Fetch an image from the ImageService, then if present, return the
@ -265,7 +262,7 @@ class CreateInstanceHelper(object):
return utils.generate_password(16)
if not isinstance(password, basestring) or password == '':
msg = _("Invalid adminPass")
raise exc.HTTPBadRequest(msg)
raise exc.HTTPBadRequest(explanation=msg)
return password

View File

@ -374,6 +374,8 @@ class ExtensionManager(object):
LOG.debug(_('Ext updated: %s'), extension.get_updated())
except AttributeError as ex:
LOG.exception(_("Exception loading extension: %s"), unicode(ex))
return False
return True
def _load_all_extensions(self):
"""Load extensions from the configured path.
@ -412,15 +414,16 @@ class ExtensionManager(object):
'file': ext_path})
continue
new_ext = new_ext_class()
self._check_extension(new_ext)
self._add_extension(new_ext)
self.add_extension(new_ext)
def add_extension(self, ext):
# Do nothing if the extension doesn't check out
if not self._check_extension(ext):
return
def _add_extension(self, ext):
alias = ext.get_alias()
LOG.audit(_('Loaded extension: %s'), alias)
self._check_extension(ext)
if alias in self.extensions:
raise exception.Error("Found duplicate extension: %s" % alias)
self.extensions[alias] = ext

View File

@ -32,25 +32,24 @@ class Controller(object):
self.compute_api = nova.compute.API()
self.builder = nova.api.openstack.views.addresses.ViewBuilderV10()
def index(self, req, server_id):
def _get_instance(self, req, server_id):
try:
instance = self.compute_api.get(req.environ['nova.context'], id)
instance = self.compute_api.get(
req.environ['nova.context'], server_id)
except nova.exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
return instance
def index(self, req, server_id):
instance = self._get_instance(req, server_id)
return {'addresses': self.builder.build(instance)}
def public(self, req, server_id):
try:
instance = self.compute_api.get(req.environ['nova.context'], id)
except nova.exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
instance = self._get_instance(req, server_id)
return {'public': self.builder.build_public_parts(instance)}
def private(self, req, server_id):
try:
instance = self.compute_api.get(req.environ['nova.context'], id)
except nova.exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
instance = self._get_instance(req, server_id)
return {'private': self.builder.build_private_parts(instance)}
def show(self, req, server_id, id):

View File

@ -7,9 +7,6 @@ image ids.
GlanceImageService(ImageService):
image ids are URIs.
LocalImageService(ImageService):
image ids are random strings.
OpenstackAPITranslationStore:
translates RS server/images/flavor/etc ids into formats required
by a given ImageService strategy.

View File

@ -18,9 +18,10 @@
from webob import exc
from nova import compute
from nova import quota
from nova.api.openstack import faults
from nova.api.openstack import wsgi
from nova import exception
from nova import quota
class Controller(object):
@ -45,7 +46,11 @@ class Controller(object):
def index(self, req, server_id):
""" Returns the list of metadata for a given instance """
context = req.environ['nova.context']
return self._get_metadata(context, server_id)
try:
return self._get_metadata(context, server_id)
except exception.InstanceNotFound:
msg = _('Server %(server_id)s does not exist') % locals()
raise exc.HTTPNotFound(explanation=msg)
def create(self, req, server_id, body):
self._check_body(body)
@ -55,8 +60,13 @@ class Controller(object):
self.compute_api.update_or_create_instance_metadata(context,
server_id,
metadata)
except exception.InstanceNotFound:
msg = _('Server %(server_id)s does not exist') % locals()
raise exc.HTTPNotFound(explanation=msg)
except quota.QuotaError as error:
self._handle_quota_error(error)
return body
def update(self, req, server_id, id, body):
@ -72,6 +82,10 @@ class Controller(object):
self.compute_api.update_or_create_instance_metadata(context,
server_id,
body)
except exception.InstanceNotFound:
msg = _('Server %(server_id)s does not exist') % locals()
raise exc.HTTPNotFound(explanation=msg)
except quota.QuotaError as error:
self._handle_quota_error(error)
@ -80,16 +94,26 @@ class Controller(object):
def show(self, req, server_id, id):
""" Return a single metadata item """
context = req.environ['nova.context']
data = self._get_metadata(context, server_id)
if id in data['metadata']:
try:
data = self._get_metadata(context, server_id)
except exception.InstanceNotFound:
msg = _('Server %(server_id)s does not exist') % locals()
raise exc.HTTPNotFound(explanation=msg)
try:
return {id: data['metadata'][id]}
else:
return faults.Fault(exc.HTTPNotFound())
except KeyError:
msg = _("metadata item %s was not found" % (id))
raise exc.HTTPNotFound(explanation=msg)
def delete(self, req, server_id, id):
""" Deletes an existing metadata """
context = req.environ['nova.context']
self.compute_api.delete_instance_metadata(context, server_id, id)
try:
self.compute_api.delete_instance_metadata(context, server_id, id)
except exception.InstanceNotFound:
msg = _('Server %(server_id)s does not exist') % locals()
raise exc.HTTPNotFound(explanation=msg)
def _handle_quota_error(self, error):
"""Reraise quota errors as api-specific http exceptions."""

View File

@ -51,7 +51,7 @@ class Controller(object):
try:
servers = self._items(req, is_detail=False)
except exception.Invalid as err:
return exc.HTTPBadRequest(str(err))
return exc.HTTPBadRequest(explanation=str(err))
return servers
def detail(self, req):
@ -59,7 +59,7 @@ class Controller(object):
try:
servers = self._items(req, is_detail=True)
except exception.Invalid as err:
return exc.HTTPBadRequest(str(err))
return exc.HTTPBadRequest(explanation=str(err))
return servers
def _get_view_builder(self, req):
@ -488,11 +488,11 @@ class ControllerV11(Controller):
if (not 'changePassword' in input_dict
or not 'adminPass' in input_dict['changePassword']):
msg = _("No adminPass was specified")
return exc.HTTPBadRequest(msg)
return exc.HTTPBadRequest(explanation=msg)
password = input_dict['changePassword']['adminPass']
if not isinstance(password, basestring) or password == '':
msg = _("Invalid adminPass")
return exc.HTTPBadRequest(msg)
return exc.HTTPBadRequest(explanation=msg)
self.compute_api.set_admin_password(context, id, password)
return exc.HTTPAccepted()

View File

@ -75,7 +75,7 @@ class ViewBuilder(object):
}
inst_dict = {
'id': int(inst['id']),
'id': inst['id'],
'name': inst['display_name'],
'addresses': self.addresses_builder.build(inst),
'status': power_mapping[inst.get('state')]}
@ -99,6 +99,7 @@ class ViewBuilder(object):
self._build_image(inst_dict, inst)
self._build_flavor(inst_dict, inst)
inst_dict['uuid'] = inst['uuid']
return dict(server=inst_dict)
def _build_image(self, response, inst):

View File

@ -363,11 +363,11 @@ class Resource(wsgi.Application):
action, action_args, accept = self.deserializer.deserialize(
request)
except exception.InvalidContentType:
return webob.exc.HTTPBadRequest(_("Unsupported Content-Type"))
msg = _("Unsupported Content-Type")
return webob.exc.HTTPBadRequest(explanation=msg)
except exception.MalformedRequestBody:
explanation = _("Malformed request body")
return faults.Fault(webob.exc.HTTPBadRequest(
explanation=explanation))
msg = _("Malformed request body")
return faults.Fault(webob.exc.HTTPBadRequest(explanation=msg))
action_result = self.dispatch(request, action, action_args)

View File

@ -34,6 +34,7 @@ from nova import utils
from nova import volume
from nova.compute import instance_types
from nova.compute import power_state
from nova.compute.utils import terminate_volumes
from nova.scheduler import api as scheduler_api
from nova.db import base
@ -52,6 +53,18 @@ def generate_default_hostname(instance_id):
return str(instance_id)
def _is_able_to_shutdown(instance, instance_id):
states = {'terminating': "Instance %s is already being terminated",
'migrating': "Instance %s is being migrated",
'stopping': "Instance %s is being stopped"}
msg = states.get(instance['state_description'])
if msg:
LOG.warning(_(msg), instance_id)
return False
return True
class API(base.Base):
"""API for interacting with the compute manager."""
@ -239,7 +252,7 @@ class API(base.Base):
return (num_instances, base_options, security_groups)
def create_db_entry_for_new_instance(self, context, base_options,
security_groups, num=1):
security_groups, block_device_mapping, num=1):
"""Create an entry in the DB for this new instance,
including any related table updates (such as security
groups, MAC address, etc). This will called by create()
@ -259,6 +272,23 @@ class API(base.Base):
instance_id,
security_group_id)
# NOTE(yamahata)
# tell vm driver to attach volume at boot time by updating
# BlockDeviceMapping
for bdm in block_device_mapping:
LOG.debug(_('bdm %s'), bdm)
assert 'device_name' in bdm
values = {
'instance_id': instance_id,
'device_name': bdm['device_name'],
'delete_on_termination': bdm.get('delete_on_termination'),
'virtual_name': bdm.get('virtual_name'),
'snapshot_id': bdm.get('snapshot_id'),
'volume_id': bdm.get('volume_id'),
'volume_size': bdm.get('volume_size'),
'no_device': bdm.get('no_device')}
self.db.block_device_mapping_create(elevated, values)
# Set sane defaults if not specified
updates = dict(hostname=self.hostname_factory(instance_id))
if (not hasattr(instance, 'display_name') or
@ -343,7 +373,7 @@ class API(base.Base):
key_name=None, key_data=None, security_group='default',
availability_zone=None, user_data=None, metadata={},
injected_files=None, admin_password=None, zone_blob=None,
reservation_id=None):
reservation_id=None, block_device_mapping=None):
"""
Provision the instances by sending off a series of single
instance requests to the Schedulers. This is fine for trival
@ -364,11 +394,13 @@ class API(base.Base):
injected_files, admin_password, zone_blob,
reservation_id)
block_device_mapping = block_device_mapping or []
instances = []
LOG.debug(_("Going to run %s instances..."), num_instances)
for num in range(num_instances):
instance = self.create_db_entry_for_new_instance(context,
base_options, security_groups, num=num)
base_options, security_groups,
block_device_mapping, num=num)
instances.append(instance)
instance_id = instance['id']
@ -478,24 +510,22 @@ class API(base.Base):
rv = self.db.instance_update(context, instance_id, kwargs)
return dict(rv.iteritems())
def _get_instance(self, context, instance_id, action_str):
try:
return self.get(context, instance_id)
except exception.NotFound:
LOG.warning(_("Instance %(instance_id)s was not found during "
"%(action_str)s") %
{'instance_id': instance_id, 'action_str': action_str})
raise
@scheduler_api.reroute_compute("delete")
def delete(self, context, instance_id):
"""Terminate an instance."""
LOG.debug(_("Going to try to terminate %s"), instance_id)
try:
instance = self.get(context, instance_id)
except exception.NotFound:
LOG.warning(_("Instance %s was not found during terminate"),
instance_id)
raise
instance = self._get_instance(context, instance_id, 'terminating')
if instance['state_description'] == 'terminating':
LOG.warning(_("Instance %s is already being terminated"),
instance_id)
return
if instance['state_description'] == 'migrating':
LOG.warning(_("Instance %s is being migrated"), instance_id)
if not _is_able_to_shutdown(instance, instance_id):
return
self.update(context,
@ -509,12 +539,59 @@ class API(base.Base):
self._cast_compute_message('terminate_instance', context,
instance_id, host)
else:
terminate_volumes(self.db, context, instance_id)
self.db.instance_destroy(context, instance_id)
@scheduler_api.reroute_compute("stop")
def stop(self, context, instance_id):
"""Stop an instance."""
LOG.debug(_("Going to try to stop %s"), instance_id)
instance = self._get_instance(context, instance_id, 'stopping')
if not _is_able_to_shutdown(instance, instance_id):
return
self.update(context,
instance['id'],
state_description='stopping',
state=power_state.NOSTATE,
terminated_at=utils.utcnow())
host = instance['host']
if host:
self._cast_compute_message('stop_instance', context,
instance_id, host)
def start(self, context, instance_id):
"""Start an instance."""
LOG.debug(_("Going to try to start %s"), instance_id)
instance = self._get_instance(context, instance_id, 'starting')
if instance['state_description'] != 'stopped':
_state_description = instance['state_description']
LOG.warning(_("Instance %(instance_id)s is not "
"stopped(%(_state_description)s)") % locals())
return
# TODO(yamahata): injected_files isn't supported right now.
# It is used only for osapi. not for ec2 api.
# availability_zone isn't used by run_instance.
rpc.cast(context,
FLAGS.scheduler_topic,
{"method": "start_instance",
"args": {"topic": FLAGS.compute_topic,
"instance_id": instance_id}})
def get(self, context, instance_id):
"""Get a single instance with the given instance_id."""
rv = self.db.instance_get(context, instance_id)
return dict(rv.iteritems())
# NOTE(sirp): id used to be exclusively integer IDs; now we're
# accepting both UUIDs and integer IDs. The handling of this
# is done in db/sqlalchemy/api/instance_get
if utils.is_uuid_like(instance_id):
uuid = instance_id
instance = self.db.instance_get_by_uuid(context, uuid)
else:
instance = self.db.instance_get(context, instance_id)
return dict(instance.iteritems())
@scheduler_api.reroute_compute("get")
def routing_get(self, context, instance_id):
@ -530,6 +607,7 @@ class API(base.Base):
"""Get all instances with this reservation_id, across
all available Zones (if any).
"""
context = context.elevated()
instances = self.db.instance_get_all_by_reservation(
context, reservation_id)

View File

@ -53,6 +53,7 @@ from nova import rpc
from nova import utils
from nova import volume
from nova.compute import power_state
from nova.compute.utils import terminate_volumes
from nova.virt import driver
@ -214,8 +215,63 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
return self.driver.refresh_security_group_members(security_group_id)
@exception.wrap_exception
def run_instance(self, context, instance_id, **kwargs):
def _setup_block_device_mapping(self, context, instance_id):
"""setup volumes for block device mapping"""
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
'block_device_mapping')
volume_api = volume.API()
block_device_mapping = []
for bdm in self.db.block_device_mapping_get_all_by_instance(
context, instance_id):
LOG.debug(_("setting up bdm %s"), bdm)
if ((bdm['snapshot_id'] is not None) and
(bdm['volume_id'] is None)):
# TODO(yamahata): default name and description
vol = volume_api.create(context, bdm['volume_size'],
bdm['snapshot_id'], '', '')
# TODO(yamahata): creating volume simultaneously
# reduces creation time?
volume_api.wait_creation(context, vol['id'])
self.db.block_device_mapping_update(
context, bdm['id'], {'volume_id': vol['id']})
bdm['volume_id'] = vol['id']
if not ((bdm['snapshot_id'] is None) or
(bdm['volume_id'] is not None)):
LOG.error(_('corrupted state of block device mapping '
'id: %(id)s '
'snapshot: %(snapshot_id) volume: %(vollume_id)') %
{'id': bdm['id'],
'snapshot_id': bdm['snapshot'],
'volume_id': bdm['volume_id']})
raise exception.ApiError(_('broken block device mapping %d') %
bdm['id'])
if bdm['volume_id'] is not None:
volume_api.check_attach(context,
volume_id=bdm['volume_id'])
dev_path = self._attach_volume_boot(context, instance_id,
bdm['volume_id'],
bdm['device_name'])
block_device_mapping.append({'device_path': dev_path,
'mount_device':
bdm['device_name']})
elif bdm['virtual_name'] is not None:
# TODO(yamahata): ephemeral/swap device support
LOG.debug(_('block_device_mapping: '
'ephemeral device is not supported yet'))
else:
# TODO(yamahata): NoDevice support
assert bdm['no_device']
LOG.debug(_('block_device_mapping: '
'no device is not supported yet'))
return block_device_mapping
def _run_instance(self, context, instance_id, **kwargs):
"""Launch a new instance with specified options."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
@ -249,11 +305,15 @@ class ComputeManager(manager.SchedulerDependentManager):
self.network_manager.setup_compute_network(context,
instance_id)
block_device_mapping = self._setup_block_device_mapping(context,
instance_id)
# TODO(vish) check to make sure the availability zone matches
self._update_state(context, instance_id, power_state.BUILDING)
try:
self.driver.spawn(instance_ref)
self.driver.spawn(instance_ref,
block_device_mapping=block_device_mapping)
except Exception as ex: # pylint: disable=W0702
msg = _("Instance '%(instance_id)s' failed to spawn. Is "
"virtualization enabled in the BIOS? Details: "
@ -276,13 +336,25 @@ class ComputeManager(manager.SchedulerDependentManager):
self._update_launched_at(context, instance_id)
self._update_state(context, instance_id)
@exception.wrap_exception
def run_instance(self, context, instance_id, **kwargs):
self._run_instance(context, instance_id, **kwargs)
@exception.wrap_exception
@checks_instance_lock
def terminate_instance(self, context, instance_id):
"""Terminate an instance on this host."""
def start_instance(self, context, instance_id):
"""Starting an instance on this host."""
# TODO(yamahata): injected_files isn't supported.
# Anyway OSAPI doesn't support stop/start yet
self._run_instance(context, instance_id)
def _shutdown_instance(self, context, instance_id, action_str):
"""Shutdown an instance on this host."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
LOG.audit(_("Terminating instance %s"), instance_id, context=context)
LOG.audit(_("%(action_str)s instance %(instance_id)s") %
{'action_str': action_str, 'instance_id': instance_id},
context=context)
fixed_ip = instance_ref.get('fixed_ip')
if not FLAGS.stub_network and fixed_ip:
@ -318,16 +390,34 @@ class ComputeManager(manager.SchedulerDependentManager):
volumes = instance_ref.get('volumes') or []
for volume in volumes:
self.detach_volume(context, instance_id, volume['id'])
if instance_ref['state'] == power_state.SHUTOFF:
self._detach_volume(context, instance_id, volume['id'], False)
if (instance_ref['state'] == power_state.SHUTOFF and
instance_ref['state_description'] != 'stopped'):
self.db.instance_destroy(context, instance_id)
raise exception.Error(_('trying to destroy already destroyed'
' instance: %s') % instance_id)
self.driver.destroy(instance_ref)
if action_str == 'Terminating':
terminate_volumes(self.db, context, instance_id)
@exception.wrap_exception
@checks_instance_lock
def terminate_instance(self, context, instance_id):
"""Terminate an instance on this host."""
self._shutdown_instance(context, instance_id, 'Terminating')
# TODO(ja): should we keep it in a terminated state for a bit?
self.db.instance_destroy(context, instance_id)
@exception.wrap_exception
@checks_instance_lock
def stop_instance(self, context, instance_id):
"""Stopping an instance on this host."""
self._shutdown_instance(context, instance_id, 'Stopping')
# instance state will be updated to stopped by _poll_instance_states()
@exception.wrap_exception
@checks_instance_lock
def rebuild_instance(self, context, instance_id, **kwargs):
@ -711,7 +801,6 @@ class ComputeManager(manager.SchedulerDependentManager):
def get_diagnostics(self, context, instance_id):
"""Retrieve diagnostics for an instance on this host."""
instance_ref = self.db.instance_get(context, instance_id)
if instance_ref["state"] == power_state.RUNNING:
LOG.audit(_("instance %s: retrieving diagnostics"), instance_id,
context=context)
@ -818,6 +907,22 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_ref = self.db.instance_get(context, instance_id)
return self.driver.get_vnc_console(instance_ref)
def _attach_volume_boot(self, context, instance_id, volume_id, mountpoint):
"""Attach a volume to an instance at boot time. So actual attach
is done by instance creation"""
# TODO(yamahata):
# should move check_attach to volume manager?
volume.API().check_attach(context, volume_id)
context = context.elevated()
LOG.audit(_("instance %(instance_id)s: booting with "
"volume %(volume_id)s at %(mountpoint)s") %
locals(), context=context)
dev_path = self.volume_manager.setup_compute_volume(context, volume_id)
self.db.volume_attached(context, volume_id, instance_id, mountpoint)
return dev_path
@checks_instance_lock
def attach_volume(self, context, instance_id, volume_id, mountpoint):
"""Attach a volume to an instance."""
@ -835,6 +940,16 @@ class ComputeManager(manager.SchedulerDependentManager):
volume_id,
instance_id,
mountpoint)
values = {
'instance_id': instance_id,
'device_name': mountpoint,
'delete_on_termination': False,
'virtual_name': None,
'snapshot_id': None,
'volume_id': volume_id,
'volume_size': None,
'no_device': None}
self.db.block_device_mapping_create(context, values)
except Exception as exc: # pylint: disable=W0702
# NOTE(vish): The inline callback eats the exception info so we
# log the traceback here and reraise the same
@ -849,7 +964,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception
@checks_instance_lock
def detach_volume(self, context, instance_id, volume_id):
def _detach_volume(self, context, instance_id, volume_id, destroy_bdm):
"""Detach a volume from an instance."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
@ -865,8 +980,15 @@ class ComputeManager(manager.SchedulerDependentManager):
volume_ref['mountpoint'])
self.volume_manager.remove_compute_volume(context, volume_id)
self.db.volume_detached(context, volume_id)
if destroy_bdm:
self.db.block_device_mapping_destroy_by_instance_and_volume(
context, instance_id, volume_id)
return True
def detach_volume(self, context, instance_id, volume_id):
"""Detach a volume from an instance."""
return self._detach_volume(context, instance_id, volume_id, True)
def remove_volume(self, context, volume_id):
"""Remove volume on compute host.
@ -1192,11 +1314,14 @@ class ComputeManager(manager.SchedulerDependentManager):
"State=%(db_state)s, so setting state to "
"shutoff.") % locals())
vm_state = power_state.SHUTOFF
if db_instance['state_description'] == 'stopping':
self.db.instance_stop(context, db_instance['id'])
continue
else:
vm_state = vm_instance.state
vms_not_found_in_db.remove(name)
if db_instance['state_description'] == 'migrating':
if (db_instance['state_description'] in ['migrating', 'stopping']):
# A situation which db record exists, but no instance"
# sometimes occurs while live-migration at src compute,
# this case should be ignored.

29
nova/compute/utils.py Normal file
View File

@ -0,0 +1,29 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 VA Linux Systems Japan K.K
# Copyright (c) 2011 Isaku Yamahata
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import volume
def terminate_volumes(db, context, instance_id):
"""delete volumes of delete_on_termination=True in block device mapping"""
volume_api = volume.API()
for bdm in db.block_device_mapping_get_all_by_instance(context,
instance_id):
#LOG.debug(_("terminating bdm %s") % bdm)
if bdm['volume_id'] and bdm['delete_on_termination']:
volume_api.delete(context, bdm['volume_id'])
db.block_device_mapping_destroy(context, bdm['id'])

View File

@ -414,6 +414,16 @@ def instance_destroy(context, instance_id):
return IMPL.instance_destroy(context, instance_id)
def instance_stop(context, instance_id):
"""Stop the instance or raise if it does not exist."""
return IMPL.instance_stop(context, instance_id)
def instance_get_by_uuid(context, uuid):
"""Get an instance or raise if it does not exist."""
return IMPL.instance_get_by_uuid(context, uuid)
def instance_get(context, instance_id):
"""Get an instance or raise if it does not exist."""
return IMPL.instance_get(context, instance_id)
@ -920,6 +930,36 @@ def snapshot_update(context, snapshot_id, values):
####################
def block_device_mapping_create(context, values):
"""Create an entry of block device mapping"""
return IMPL.block_device_mapping_create(context, values)
def block_device_mapping_update(context, bdm_id, values):
"""Create an entry of block device mapping"""
return IMPL.block_device_mapping_update(context, bdm_id, values)
def block_device_mapping_get_all_by_instance(context, instance_id):
"""Get all block device mapping belonging to a instance"""
return IMPL.block_device_mapping_get_all_by_instance(context, instance_id)
def block_device_mapping_destroy(context, bdm_id):
"""Destroy the block device mapping."""
return IMPL.block_device_mapping_destroy(context, bdm_id)
def block_device_mapping_destroy_by_instance_and_volume(context, instance_id,
volume_id):
"""Destroy the block device mapping or raise if it does not exist."""
return IMPL.block_device_mapping_destroy_by_instance_and_volume(
context, instance_id, volume_id)
####################
def security_group_get_all(context):
"""Get all security groups."""
return IMPL.security_group_get_all(context)

View File

@ -18,7 +18,7 @@
"""
Implementation of SQLAlchemy backend.
"""
import traceback
import warnings
from nova import db
@ -797,6 +797,8 @@ def instance_create(context, values):
values['metadata'] = _metadata_refs(values.get('metadata'))
instance_ref = models.Instance()
instance_ref['uuid'] = str(utils.gen_uuid())
instance_ref.update(values)
session = get_session()
@ -840,39 +842,67 @@ def instance_destroy(context, instance_id):
@require_context
def instance_get(context, instance_id, session=None):
if not session:
session = get_session()
result = None
def instance_stop(context, instance_id):
session = get_session()
with session.begin():
from nova.compute import power_state
session.query(models.Instance).\
filter_by(id=instance_id).\
update({'host': None,
'state': power_state.SHUTOFF,
'state_description': 'stopped',
'updated_at': literal_column('updated_at')})
session.query(models.SecurityGroupInstanceAssociation).\
filter_by(instance_id=instance_id).\
update({'updated_at': literal_column('updated_at')})
session.query(models.InstanceMetadata).\
filter_by(instance_id=instance_id).\
update({'updated_at': literal_column('updated_at')})
if is_admin_context(context):
result = session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload_all('security_groups.rules')).\
options(joinedload('volumes')).\
options(joinedload_all('fixed_ip.network')).\
options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(id=instance_id).\
filter_by(deleted=can_read_deleted(context)).\
first()
elif is_user_context(context):
result = session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload_all('security_groups.rules')).\
options(joinedload('volumes')).\
options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(project_id=context.project_id).\
filter_by(id=instance_id).\
filter_by(deleted=False).\
first()
@require_context
def instance_get_by_uuid(context, uuid, session=None):
partial = _build_instance_get(context, session=session)
result = partial.filter_by(uuid=uuid)
result = result.first()
if not result:
# FIXME(sirp): it would be nice if InstanceNotFound would accept a
# uuid parameter as well
raise exception.InstanceNotFound(instance_id=uuid)
return result
@require_context
def instance_get(context, instance_id, session=None):
partial = _build_instance_get(context, session=session)
result = partial.filter_by(id=instance_id)
result = result.first()
if not result:
raise exception.InstanceNotFound(instance_id=instance_id)
return result
@require_context
def _build_instance_get(context, session=None):
if not session:
session = get_session()
partial = session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload_all('security_groups.rules')).\
options(joinedload('volumes')).\
options(joinedload_all('fixed_ip.network')).\
options(joinedload('metadata')).\
options(joinedload('instance_type'))
if is_admin_context(context):
partial = partial.filter_by(deleted=can_read_deleted(context))
elif is_user_context(context):
partial = partial.filter_by(project_id=context.project_id).\
filter_by(deleted=False)
return partial
@require_admin_context
def instance_get_all(context):
session = get_session()
@ -907,6 +937,7 @@ def instance_get_all_by_host(context, host):
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(host=host).\
filter_by(deleted=can_read_deleted(context)).\
@ -922,6 +953,7 @@ def instance_get_all_by_project(context, project_id):
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(project_id=project_id).\
filter_by(deleted=can_read_deleted(context)).\
@ -937,6 +969,7 @@ def instance_get_all_by_reservation(context, reservation_id):
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(reservation_id=reservation_id).\
filter_by(deleted=can_read_deleted(context)).\
@ -946,6 +979,7 @@ def instance_get_all_by_reservation(context, reservation_id):
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(project_id=context.project_id).\
filter_by(reservation_id=reservation_id).\
@ -959,6 +993,8 @@ def instance_get_project_vpn(context, project_id):
return session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(project_id=project_id).\
filter_by(image_ref=str(FLAGS.vpn_image_id)).\
@ -1876,6 +1912,66 @@ def snapshot_update(context, snapshot_id, values):
###################
@require_context
def block_device_mapping_create(context, values):
bdm_ref = models.BlockDeviceMapping()
bdm_ref.update(values)
session = get_session()
with session.begin():
bdm_ref.save(session=session)
@require_context
def block_device_mapping_update(context, bdm_id, values):
session = get_session()
with session.begin():
session.query(models.BlockDeviceMapping).\
filter_by(id=bdm_id).\
filter_by(deleted=False).\
update(values)
@require_context
def block_device_mapping_get_all_by_instance(context, instance_id):
session = get_session()
result = session.query(models.BlockDeviceMapping).\
filter_by(instance_id=instance_id).\
filter_by(deleted=False).\
all()
if not result:
return []
return result
@require_context
def block_device_mapping_destroy(context, bdm_id):
session = get_session()
with session.begin():
session.query(models.BlockDeviceMapping).\
filter_by(id=bdm_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def block_device_mapping_destroy_by_instance_and_volume(context, instance_id,
volume_id):
session = get_session()
with session.begin():
session.query(models.BlockDeviceMapping).\
filter_by(instance_id=instance_id).\
filter_by(volume_id=volume_id).\
filter_by(deleted=False).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
###################
@require_context
def security_group_get_all(context):
session = get_session()
@ -2609,7 +2705,17 @@ def zone_get_all(context):
####################
def require_instance_exists(func):
def new_func(context, instance_id, *args, **kwargs):
db.api.instance_get(context, instance_id)
return func(context, instance_id, *args, **kwargs)
new_func.__name__ = func.__name__
return new_func
@require_context
@require_instance_exists
def instance_metadata_get(context, instance_id):
session = get_session()
@ -2625,6 +2731,7 @@ def instance_metadata_get(context, instance_id):
@require_context
@require_instance_exists
def instance_metadata_delete(context, instance_id, key):
session = get_session()
session.query(models.InstanceMetadata).\
@ -2637,6 +2744,7 @@ def instance_metadata_delete(context, instance_id, key):
@require_context
@require_instance_exists
def instance_metadata_delete_all(context, instance_id):
session = get_session()
session.query(models.InstanceMetadata).\
@ -2648,6 +2756,7 @@ def instance_metadata_delete_all(context, instance_id):
@require_context
@require_instance_exists
def instance_metadata_get_item(context, instance_id, key):
session = get_session()
@ -2664,6 +2773,7 @@ def instance_metadata_get_item(context, instance_id, key):
@require_context
@require_instance_exists
def instance_metadata_update_or_create(context, instance_id, metadata):
session = get_session()

View File

@ -0,0 +1,87 @@
# Copyright 2011 OpenStack LLC.
# Copyright 2011 Isaku Yamahata
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Table, Column
from sqlalchemy import DateTime, Boolean, Integer, String
from sqlalchemy import ForeignKey
from nova import log as logging
meta = MetaData()
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
instances = Table('instances', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
volumes = Table('volumes', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
snapshots = Table('snapshots', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
block_device_mapping = Table('block_device_mapping', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, autoincrement=True),
Column('instance_id',
Integer(),
ForeignKey('instances.id'),
nullable=False),
Column('device_name',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=False),
Column('delete_on_termination',
Boolean(create_constraint=True, name=None),
default=False),
Column('virtual_name',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=True),
Column('snapshot_id',
Integer(),
ForeignKey('snapshots.id'),
nullable=True),
Column('volume_id', Integer(), ForeignKey('volumes.id'),
nullable=True),
Column('volume_size', Integer(), nullable=True),
Column('no_device',
Boolean(create_constraint=True, name=None),
nullable=True),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta.bind = migrate_engine
try:
block_device_mapping.create()
except Exception:
logging.info(repr(block_device_mapping))
logging.exception('Exception while creating table')
meta.drop_all(tables=[block_device_mapping])
raise
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
block_device_mapping.drop()

View File

@ -0,0 +1,43 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, Integer, MetaData, String, Table
from nova import utils
meta = MetaData()
instances = Table("instances", meta,
Column("id", Integer(), primary_key=True, nullable=False))
uuid_column = Column("uuid", String(36))
def upgrade(migrate_engine):
meta.bind = migrate_engine
instances.create_column(uuid_column)
rows = migrate_engine.execute(instances.select())
for row in rows:
instance_uuid = str(utils.gen_uuid())
migrate_engine.execute(instances.update()\
.where(instances.c.id == row[0])\
.values(uuid=instance_uuid))
def downgrade(migrate_engine):
meta.bind = migrate_engine
instances.drop_column(uuid_column)

View File

@ -234,6 +234,7 @@ class Instance(BASE, NovaBase):
os_type = Column(String(255))
architecture = Column(String(255))
vm_mode = Column(String(255))
uuid = Column(String(36))
# TODO(vish): see Ewan's email about state improvements, probably
# should be in a driver base class or some such
@ -358,6 +359,45 @@ class Snapshot(BASE, NovaBase):
display_description = Column(String(255))
class BlockDeviceMapping(BASE, NovaBase):
"""Represents block device mapping that is defined by EC2"""
__tablename__ = "block_device_mapping"
id = Column(Integer, primary_key=True, autoincrement=True)
instance_id = Column(Integer, ForeignKey('instances.id'), nullable=False)
instance = relationship(Instance,
backref=backref('balock_device_mapping'),
foreign_keys=instance_id,
primaryjoin='and_(BlockDeviceMapping.instance_id=='
'Instance.id,'
'BlockDeviceMapping.deleted=='
'False)')
device_name = Column(String(255), nullable=False)
# default=False for compatibility of the existing code.
# With EC2 API,
# default True for ami specified device.
# default False for created with other timing.
delete_on_termination = Column(Boolean, default=False)
# for ephemeral device
virtual_name = Column(String(255), nullable=True)
# for snapshot or volume
snapshot_id = Column(Integer, ForeignKey('snapshots.id'), nullable=True)
# outer join
snapshot = relationship(Snapshot,
foreign_keys=snapshot_id)
volume_id = Column(Integer, ForeignKey('volumes.id'), nullable=True)
volume = relationship(Volume,
foreign_keys=volume_id)
volume_size = Column(Integer, nullable=True)
# for no device to suppress devices.
no_device = Column(Boolean, nullable=True)
class ExportDevice(BASE, NovaBase):
"""Represates a shelf and blade that a volume can be exported on."""
__tablename__ = 'export_devices'

View File

@ -272,7 +272,7 @@ DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID')
DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key')
# NOTE(sirp): my_ip interpolation doesn't work within nested structures
DEFINE_list('glance_api_servers',
['127.0.0.1:9292'],
['%s:9292' % _get_my_ip()],
'list of glance api servers available to nova (host:port)')
DEFINE_integer('s3_port', 3333, 's3 port')
DEFINE_string('s3_host', '$my_ip', 's3 host (for infrastructure)')
@ -364,7 +364,7 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager',
'Manager for scheduler')
# The service to use for image search and retrieval
DEFINE_string('image_service', 'nova.image.local.LocalImageService',
DEFINE_string('image_service', 'nova.image.glance.GlanceImageService',
'The service to use for retrieving and searching for images.')
DEFINE_string('host', socket.gethostname(),

View File

@ -120,6 +120,14 @@ class _FakeImageService(service.BaseImageService):
image_id, self.images)
raise exception.ImageNotFound(image_id=image_id)
def show_by_name(self, context, name):
"""Returns a dict containing image data for the given name."""
images = copy.deepcopy(self.images.values())
for image in images:
if name == image.get('name'):
return image
raise exception.ImageNotFound(image_id=name)
def create(self, context, metadata, data=None):
"""Store the image data and return the new image id.

View File

@ -1,167 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os.path
import random
import shutil
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
from nova.image import service
FLAGS = flags.FLAGS
flags.DEFINE_string('images_path', '$state_path/images',
'path to decrypted images')
LOG = logging.getLogger('nova.image.local')
class LocalImageService(service.BaseImageService):
"""Image service storing images to local disk.
It assumes that image_ids are integers.
"""
def __init__(self):
self._path = FLAGS.images_path
def _path_to(self, image_id, fname='info.json'):
if fname:
return os.path.join(self._path, '%08x' % int(image_id), fname)
return os.path.join(self._path, '%08x' % int(image_id))
def _ids(self):
"""The list of all image ids."""
images = []
for image_dir in os.listdir(self._path):
try:
unhexed_image_id = int(image_dir, 16)
except ValueError:
LOG.error(_('%s is not in correct directory naming format')
% image_dir)
else:
images.append(unhexed_image_id)
return images
def index(self, context, filters=None, marker=None, limit=None):
# TODO(blamar): Make use of filters, marker, and limit
filtered = []
image_metas = self.detail(context)
for image_meta in image_metas:
meta = utils.subset_dict(image_meta, ('id', 'name'))
filtered.append(meta)
return filtered
def detail(self, context, filters=None, marker=None, limit=None):
# TODO(blamar): Make use of filters, marker, and limit
images = []
for image_id in self._ids():
try:
image = self.show(context, image_id)
images.append(image)
except exception.NotFound:
continue
return images
def show(self, context, image_id):
try:
with open(self._path_to(image_id)) as metadata_file:
image_meta = json.load(metadata_file)
if not self._is_image_available(context, image_meta):
raise exception.ImageNotFound(image_id=image_id)
return image_meta
except (IOError, ValueError):
raise exception.ImageNotFound(image_id=image_id)
def show_by_name(self, context, name):
"""Returns a dict containing image data for the given name."""
# NOTE(vish): Not very efficient, but the local image service
# is for testing so it should be fine.
images = self.detail(context)
image = None
for cantidate in images:
if name == cantidate.get('name'):
image = cantidate
break
if image is None:
raise exception.ImageNotFound(image_id=name)
return image
def get(self, context, image_id, data):
"""Get image and metadata."""
try:
with open(self._path_to(image_id)) as metadata_file:
metadata = json.load(metadata_file)
with open(self._path_to(image_id, 'image')) as image_file:
shutil.copyfileobj(image_file, data)
except (IOError, ValueError):
raise exception.ImageNotFound(image_id=image_id)
return metadata
def create(self, context, metadata, data=None):
"""Store the image data and return the new image."""
image_id = random.randint(0, 2 ** 31 - 1)
image_path = self._path_to(image_id, None)
if not os.path.exists(image_path):
os.mkdir(image_path)
return self._store(context, image_id, metadata, data)
def update(self, context, image_id, metadata, data=None):
"""Replace the contents of the given image with the new data."""
# NOTE(vish): show is to check if image is available
self.show(context, image_id)
return self._store(context, image_id, metadata, data)
def _store(self, context, image_id, metadata, data=None):
metadata['id'] = image_id
try:
if data:
location = self._path_to(image_id, 'image')
with open(location, 'w') as image_file:
shutil.copyfileobj(data, image_file)
# NOTE(vish): update metadata similarly to glance
metadata['status'] = 'active'
metadata['location'] = location
with open(self._path_to(image_id), 'w') as metadata_file:
json.dump(metadata, metadata_file)
except (IOError, ValueError):
raise exception.ImageNotFound(image_id=image_id)
return metadata
def delete(self, context, image_id):
"""Delete the given image.
:raises: ImageNotFound if the image does not exist.
"""
# NOTE(vish): show is to check if image is available
self.show(context, image_id)
try:
shutil.rmtree(self._path_to(image_id, None))
except (IOError, ValueError):
raise exception.ImageNotFound(image_id=image_id)
def delete_all(self):
"""Clears out all images in local directory."""
for image_id in self._ids():
shutil.rmtree(self._path_to(image_id, None))

View File

@ -24,6 +24,7 @@ from nova import exception
from nova import flags
from nova import log as logging
from nova import rpc
from nova import utils
from eventlet import greenpool
@ -106,7 +107,8 @@ def _wrap_method(function, self):
def _process(func, zone):
"""Worker stub for green thread pool. Give the worker
an authenticated nova client and zone info."""
nova = novaclient.OpenStack(zone.username, zone.password, zone.api_url)
nova = novaclient.OpenStack(zone.username, zone.password, None,
zone.api_url)
nova.authenticate()
return func(nova, zone)
@ -122,7 +124,7 @@ def call_zone_method(context, method_name, errors_to_ignore=None,
results = []
for zone in db.zone_get_all(context):
try:
nova = novaclient.OpenStack(zone.username, zone.password,
nova = novaclient.OpenStack(zone.username, zone.password, None,
zone.api_url)
nova.authenticate()
except novaclient.exceptions.BadRequest, e:
@ -200,38 +202,78 @@ class RedirectResult(exception.Error):
class reroute_compute(object):
"""Decorator used to indicate that the method should
delegate the call the child zones if the db query
can't find anything."""
"""
reroute_compute is responsible for trying to lookup a resource in the
current zone and if it's not found there, delegating the call to the
child zones.
Since reroute_compute will be making 'cross-zone' calls, the ID for the
object must come in as a UUID-- if we receive an integer ID, we bail.
The steps involved are:
1. Validate that item_id is UUID like
2. Lookup item by UUID in the zone local database
3. If the item was found, then extract integer ID, and pass that to
the wrapped method. (This ensures that zone-local code can
continue to use integer IDs).
4. If the item was not found, we delgate the call to a child zone
using the UUID.
"""
def __init__(self, method_name):
self.method_name = method_name
def _route_to_child_zones(self, context, collection, item_uuid):
if not FLAGS.enable_zone_routing:
raise exception.InstanceNotFound(instance_id=item_uuid)
zones = db.zone_get_all(context)
if not zones:
raise exception.InstanceNotFound(instance_id=item_uuid)
# Ask the children to provide an answer ...
LOG.debug(_("Asking child zones ..."))
result = self._call_child_zones(zones,
wrap_novaclient_function(_issue_novaclient_command,
collection, self.method_name, item_uuid))
# Scrub the results and raise another exception
# so the API layers can bail out gracefully ...
raise RedirectResult(self.unmarshall_result(result))
def __call__(self, f):
def wrapped_f(*args, **kwargs):
collection, context, item_id = \
collection, context, item_id_or_uuid = \
self.get_collection_context_and_id(args, kwargs)
try:
# Call the original function ...
attempt_reroute = False
if utils.is_uuid_like(item_id_or_uuid):
item_uuid = item_id_or_uuid
try:
instance = db.instance_get_by_uuid(context, item_uuid)
except exception.InstanceNotFound, e:
# NOTE(sirp): since a UUID was passed in, we can attempt
# to reroute to a child zone
attempt_reroute = True
LOG.debug(_("Instance %(item_uuid)s not found "
"locally: '%(e)s'" % locals()))
else:
# NOTE(sirp): since we're not re-routing in this case, and
# we we were passed a UUID, we need to replace that UUID
# with an integer ID in the argument list so that the
# zone-local code can continue to use integer IDs.
item_id = instance['id']
args = list(args) # needs to be mutable to replace
self.replace_uuid_with_id(args, kwargs, item_id)
if attempt_reroute:
return self._route_to_child_zones(context, collection,
item_uuid)
else:
return f(*args, **kwargs)
except exception.InstanceNotFound, e:
LOG.debug(_("Instance %(item_id)s not found "
"locally: '%(e)s'" % locals()))
if not FLAGS.enable_zone_routing:
raise
zones = db.zone_get_all(context)
if not zones:
raise
# Ask the children to provide an answer ...
LOG.debug(_("Asking child zones ..."))
result = self._call_child_zones(zones,
wrap_novaclient_function(_issue_novaclient_command,
collection, self.method_name, item_id))
# Scrub the results and raise another exception
# so the API layers can bail out gracefully ...
raise RedirectResult(self.unmarshall_result(result))
return wrapped_f
def _call_child_zones(self, zones, function):
@ -250,6 +292,18 @@ class reroute_compute(object):
instance_id = args[2]
return ("servers", context, instance_id)
@staticmethod
def replace_uuid_with_id(args, kwargs, replacement_id):
"""
Extracts the UUID parameter from the arg or kwarg list and replaces
it with an integer ID.
"""
if 'instance_id' in kwargs:
kwargs['instance_id'] = replacement_id
elif len(args) > 1:
args.pop(2)
args.insert(2, replacement_id)
def unmarshall_result(self, zone_responses):
"""Result is a list of responses from each child zone.
Each decorator derivation is responsible to turning this

View File

@ -39,7 +39,7 @@ flags.DEFINE_integer("max_networks", 1000,
class SimpleScheduler(chance.ChanceScheduler):
"""Implements Naive Scheduler that tries to find least loaded host."""
def schedule_run_instance(self, context, instance_id, *_args, **_kwargs):
def _schedule_instance(self, context, instance_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest running instances."""
instance_ref = db.instance_get(context, instance_id)
if (instance_ref['availability_zone']
@ -75,6 +75,12 @@ class SimpleScheduler(chance.ChanceScheduler):
" for this request. Is the appropriate"
" service running?"))
def schedule_run_instance(self, context, instance_id, *_args, **_kwargs):
return self._schedule_instance(context, instance_id, *_args, **_kwargs)
def schedule_start_instance(self, context, instance_id, *_args, **_kwargs):
return self._schedule_instance(context, instance_id, *_args, **_kwargs)
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest volumes."""
volume_ref = db.volume_get(context, volume_id)

View File

@ -88,7 +88,7 @@ class ZoneAwareScheduler(driver.Scheduler):
instance_properties = request_spec['instance_properties']
name = instance_properties['display_name']
image_id = instance_properties['image_id']
image_ref = instance_properties['image_ref']
meta = instance_properties['metadata']
flavor_id = instance_type['flavorid']
reservation_id = instance_properties['reservation_id']
@ -105,13 +105,14 @@ class ZoneAwareScheduler(driver.Scheduler):
% locals())
nova = None
try:
nova = novaclient.OpenStack(zone.username, zone.password, url)
nova = novaclient.OpenStack(zone.username, zone.password, None,
url)
nova.authenticate()
except novaclient.exceptions.BadRequest, e:
raise exception.NotAuthorized(_("Bad credentials attempting "
"to talk to zone at %(url)s.") % locals())
nova.servers.create(name, image_id, flavor_id, ipgroup, meta, files,
nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files,
child_blob, reservation_id=reservation_id)
def _provision_resource_from_blob(self, context, item, instance_id,
@ -184,7 +185,11 @@ class ZoneAwareScheduler(driver.Scheduler):
if not build_plan:
raise driver.NoValidHost(_('No hosts were available'))
for item in build_plan:
for num in xrange(request_spec['num_instances']):
if not build_plan:
break
item = build_plan.pop(0)
self._provision_resource(context, item, instance_id, request_spec,
kwargs)

View File

@ -89,7 +89,8 @@ class ZoneState(object):
def _call_novaclient(zone):
"""Call novaclient. Broken out for testing purposes."""
client = novaclient.OpenStack(zone.username, zone.password, zone.api_url)
client = novaclient.OpenStack(zone.username, zone.password, None,
zone.api_url)
return client.zones.info()._info

View File

@ -39,7 +39,6 @@ from nova.api.openstack import limits
from nova.auth.manager import User, Project
import nova.image.fake
from nova.image import glance
from nova.image import local
from nova.image import service
from nova.tests import fake_flags
from nova.wsgi import Router

View File

@ -128,6 +128,11 @@ class ResourceExtensionTest(unittest.TestCase):
self.assertEqual(response_body, response.body)
class InvalidExtension(object):
def get_alias(self):
return "THIRD"
class ExtensionManagerTest(unittest.TestCase):
response_body = "Try to say this Mr. Knox, sir..."
@ -144,6 +149,14 @@ class ExtensionManagerTest(unittest.TestCase):
self.assertEqual(200, response.status_int)
self.assertEqual(response_body, response.body)
def test_invalid_extensions(self):
app = openstack.APIRouterV11()
ext_midware = extensions.ExtensionMiddleware(app)
ext_mgr = ext_midware.ext_mgr
ext_mgr.add_extension(InvalidExtension())
self.assertTrue('FOXNSOX' in ext_mgr.extensions)
self.assertTrue('THIRD' not in ext_mgr.extensions)
class ActionExtensionTest(unittest.TestCase):

View File

@ -135,36 +135,6 @@ class _BaseImageServiceTests(test.TestCase):
return fixture
class LocalImageServiceTest(_BaseImageServiceTests):
"""Tests the local image service"""
def setUp(self):
super(LocalImageServiceTest, self).setUp()
self.tempdir = tempfile.mkdtemp()
self.flags(images_path=self.tempdir)
self.stubs = stubout.StubOutForTesting()
service_class = 'nova.image.local.LocalImageService'
self.service = utils.import_object(service_class)
self.context = context.RequestContext(None, None)
def tearDown(self):
shutil.rmtree(self.tempdir)
self.stubs.UnsetAll()
super(LocalImageServiceTest, self).tearDown()
def test_get_all_ids_with_incorrect_directory_formats(self):
# create some old-style image directories (starting with 'ami-')
for x in [1, 2, 3]:
tempfile.mkstemp(prefix='ami-', dir=self.tempdir)
# create some valid image directories names
for x in ["1485baed", "1a60f0ee", "3123a73d"]:
os.makedirs(os.path.join(self.tempdir, x))
found_image_ids = self.service._ids()
self.assertEqual(True, isinstance(found_image_ids, list))
self.assertEqual(3, len(found_image_ids), len(found_image_ids))
class GlanceImageServiceTest(_BaseImageServiceTests):
"""Tests the Glance image service, in particular that metadata translation

View File

@ -21,6 +21,7 @@ import unittest
import webob
from nova import exception
from nova import flags
from nova.api import openstack
from nova.tests.api.openstack import fakes
@ -67,6 +68,14 @@ def stub_max_server_metadata():
return metadata
def return_server(context, server_id):
return {'id': server_id}
def return_server_nonexistant(context, server_id):
raise exception.InstanceNotFound()
class ServerMetaDataTest(unittest.TestCase):
def setUp(self):
@ -76,6 +85,7 @@ class ServerMetaDataTest(unittest.TestCase):
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_auth(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
self.stubs.Set(nova.db.api, 'instance_get', return_server)
def tearDown(self):
self.stubs.UnsetAll()
@ -92,6 +102,13 @@ class ServerMetaDataTest(unittest.TestCase):
self.assertEqual('application/json', res.headers['Content-Type'])
self.assertEqual('value1', res_dict['metadata']['key1'])
def test_index_nonexistant_server(self):
self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant)
req = webob.Request.blank('/v1.1/servers/1/meta')
req.environ['api.version'] = '1.1'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(404, res.status_int)
def test_index_no_data(self):
self.stubs.Set(nova.db.api, 'instance_metadata_get',
return_empty_server_metadata)
@ -114,13 +131,19 @@ class ServerMetaDataTest(unittest.TestCase):
self.assertEqual('application/json', res.headers['Content-Type'])
self.assertEqual('value5', res_dict['key5'])
def test_show_nonexistant_server(self):
self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant)
req = webob.Request.blank('/v1.1/servers/1/meta/key5')
req.environ['api.version'] = '1.1'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(404, res.status_int)
def test_show_meta_not_found(self):
self.stubs.Set(nova.db.api, 'instance_metadata_get',
return_empty_server_metadata)
req = webob.Request.blank('/v1.1/servers/1/meta/key6')
req.environ['api.version'] = '1.1'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(404, res.status_int)
def test_delete(self):
@ -132,6 +155,14 @@ class ServerMetaDataTest(unittest.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
def test_delete_nonexistant_server(self):
self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant)
req = webob.Request.blank('/v1.1/servers/1/meta/key5')
req.environ['api.version'] = '1.1'
req.method = 'DELETE'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(404, res.status_int)
def test_create(self):
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
return_create_instance_metadata)
@ -141,8 +172,8 @@ class ServerMetaDataTest(unittest.TestCase):
req.body = '{"metadata": {"key1": "value1"}}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
res_dict = json.loads(res.body)
self.assertEqual('application/json', res.headers['Content-Type'])
self.assertEqual('value1', res_dict['metadata']['key1'])
@ -156,6 +187,16 @@ class ServerMetaDataTest(unittest.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
def test_create_nonexistant_server(self):
self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant)
req = webob.Request.blank('/v1.1/servers/100/meta')
req.environ['api.version'] = '1.1'
req.method = 'POST'
req.body = '{"metadata": {"key1": "value1"}}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(404, res.status_int)
def test_update_item(self):
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
return_create_instance_metadata)
@ -170,6 +211,16 @@ class ServerMetaDataTest(unittest.TestCase):
res_dict = json.loads(res.body)
self.assertEqual('value1', res_dict['key1'])
def test_update_item_nonexistant_server(self):
self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant)
req = webob.Request.blank('/v1.1/servers/asdf/100/key1')
req.environ['api.version'] = '1.1'
req.method = 'PUT'
req.body = '{"key1": "value1"}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(404, res.status_int)
def test_update_item_empty_body(self):
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
return_create_instance_metadata)

View File

@ -49,10 +49,22 @@ FLAGS = flags.FLAGS
FLAGS.verbose = True
def return_server(context, id):
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
def fake_gen_uuid():
return FAKE_UUID
def return_server_by_id(context, id):
return stub_instance(id)
def return_server_by_uuid(context, uuid):
id = 1
return stub_instance(id, uuid=uuid)
def return_server_with_addresses(private, public):
def _return_server(context, id):
return stub_instance(id, private_address=private,
@ -111,7 +123,8 @@ def instance_address(context, instance_id):
def stub_instance(id, user_id=1, private_address=None, public_addresses=None,
host=None, power_state=0, reservation_id=""):
host=None, power_state=0, reservation_id="",
uuid=FAKE_UUID):
metadata = []
metadata.append(InstanceMetadata(key='seq', value=id))
@ -129,7 +142,7 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None,
server_name = "reservation_%s" % (reservation_id, )
instance = {
"id": id,
"id": int(id),
"admin_pass": "",
"user_id": user_id,
"project_id": "",
@ -157,7 +170,8 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None,
"display_name": server_name,
"display_description": "",
"locked": False,
"metadata": metadata}
"metadata": metadata,
"uuid": uuid}
instance["fixed_ip"] = {
"address": private_address,
@ -196,8 +210,11 @@ class ServersTest(test.TestCase):
fakes.stub_out_auth(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fakes.stub_out_image_service(self.stubs)
self.stubs.Set(utils, 'gen_uuid', fake_gen_uuid)
self.stubs.Set(nova.db.api, 'instance_get_all', return_servers)
self.stubs.Set(nova.db.api, 'instance_get', return_server)
self.stubs.Set(nova.db.api, 'instance_get', return_server_by_id)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_by_uuid)
self.stubs.Set(nova.db.api, 'instance_get_all_by_user',
return_servers)
self.stubs.Set(nova.db.api, 'instance_add_security_group',
@ -229,6 +246,36 @@ class ServersTest(test.TestCase):
self.assertEqual(res_dict['server']['id'], 1)
self.assertEqual(res_dict['server']['name'], 'server1')
def test_get_server_by_uuid(self):
"""
The steps involved with resolving a UUID are pretty complicated;
here's what's happening in this scenario:
1. Show is calling `routing_get`
2. `routing_get` is wrapped by `reroute_compute` which does the work
of resolving requests to child zones.
3. `reroute_compute` looks up the UUID by hitting the stub
(returns_server_by_uuid)
4. Since the stub return that the record exists, `reroute_compute`
considers the request to be 'zone local', so it replaces the UUID
in the argument list with an integer ID and then calls the inner
function ('get').
5. The call to `get` hits the other stub 'returns_server_by_id` which
has the UUID set to FAKE_UUID
So, counterintuitively, we call `get` twice on the `show` command.
"""
req = webob.Request.blank('/v1.0/servers/%s' % FAKE_UUID)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res_dict['server']['id'], 1)
self.assertEqual(res_dict['server']['uuid'], FAKE_UUID)
self.assertEqual(res_dict['server']['name'], 'server1')
def test_get_server_by_id_v1_1(self):
req = webob.Request.blank('/v1.1/servers/1')
res = req.get_response(fakes.wsgi_app())
@ -540,7 +587,8 @@ class ServersTest(test.TestCase):
def _setup_for_create_instance(self):
"""Shared implementation for tests below that create instance"""
def instance_create(context, inst):
return {'id': '1', 'display_name': 'server_test'}
return {'id': 1, 'display_name': 'server_test',
'uuid': FAKE_UUID}
def server_update(context, id, params):
return instance_create(context, id)
@ -594,11 +642,22 @@ class ServersTest(test.TestCase):
self.assertEqual(1, server['id'])
self.assertEqual(2, server['flavorId'])
self.assertEqual(3, server['imageId'])
self.assertEqual(FAKE_UUID, server['uuid'])
self.assertEqual(res.status_int, 200)
def test_create_instance(self):
self._test_create_instance_helper()
def test_create_instance_has_uuid(self):
"""Tests at the db-layer instead of API layer since that's where the
UUID is generated
"""
ctxt = context.RequestContext(1, 1)
values = {}
instance = nova.db.api.instance_create(ctxt, values)
expected = FAKE_UUID
self.assertEqual(instance['uuid'], expected)
def test_create_instance_via_zones(self):
"""Server generated ReservationID"""
self._setup_for_create_instance()
@ -1850,7 +1909,8 @@ class TestServerInstanceCreation(test.TestCase):
self.injected_files = kwargs['injected_files']
else:
self.injected_files = None
return [{'id': '1234', 'display_name': 'fakeinstance'}]
return [{'id': '1234', 'display_name': 'fakeinstance',
'uuid': FAKE_UUID}]
def set_admin_password(self, *args, **kwargs):
pass

View File

@ -32,7 +32,7 @@ flags.DECLARE('fake_network', 'nova.network.manager')
FLAGS['network_size'].SetDefault(8)
FLAGS['num_networks'].SetDefault(2)
FLAGS['fake_network'].SetDefault(True)
FLAGS['image_service'].SetDefault('nova.image.local.LocalImageService')
FLAGS['image_service'].SetDefault('nova.image.fake.FakeImageService')
flags.DECLARE('num_shelves', 'nova.volume.driver')
flags.DECLARE('blades_per_shelf', 'nova.volume.driver')
flags.DECLARE('iscsi_num_targets', 'nova.volume.driver')

View File

@ -221,30 +221,30 @@ class TestOpenStackClient(object):
return self.api_delete('/flavors/%s' % flavor_id)
def get_volume(self, volume_id):
return self.api_get('/volumes/%s' % volume_id)['volume']
return self.api_get('/os-volumes/%s' % volume_id)['volume']
def get_volumes(self, detail=True):
rel_url = '/volumes/detail' if detail else '/volumes'
rel_url = '/os-volumes/detail' if detail else '/os-volumes'
return self.api_get(rel_url)['volumes']
def post_volume(self, volume):
return self.api_post('/volumes', volume)['volume']
return self.api_post('/os-volumes', volume)['volume']
def delete_volume(self, volume_id):
return self.api_delete('/volumes/%s' % volume_id)
return self.api_delete('/os-volumes/%s' % volume_id)
def get_server_volume(self, server_id, attachment_id):
return self.api_get('/servers/%s/volume_attachments/%s' %
return self.api_get('/servers/%s/os-volume_attachments/%s' %
(server_id, attachment_id))['volumeAttachment']
def get_server_volumes(self, server_id):
return self.api_get('/servers/%s/volume_attachments' %
return self.api_get('/servers/%s/os-volume_attachments' %
(server_id))['volumeAttachments']
def post_server_volume(self, server_id, volume_attachment):
return self.api_post('/servers/%s/volume_attachments' %
return self.api_post('/servers/%s/os-volume_attachments' %
(server_id), volume_attachment)['volumeAttachment']
def delete_server_volume(self, server_id, attachment_id):
return self.api_delete('/servers/%s/volume_attachments/%s' %
return self.api_delete('/servers/%s/os-volume_attachments/%s' %
(server_id, attachment_id))

View File

@ -48,6 +48,10 @@ flags.DECLARE('stub_network', 'nova.compute.manager')
flags.DECLARE('instances_path', 'nova.compute.manager')
FAKE_UUID_NOT_FOUND = 'ffffffff-ffff-ffff-ffff-ffffffffffff'
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
class TestDriver(driver.Scheduler):
"""Scheduler Driver for Tests"""
def schedule(context, topic, *args, **kwargs):
@ -926,12 +930,23 @@ def zone_get_all(context):
]
def fake_instance_get_by_uuid(context, uuid):
if FAKE_UUID_NOT_FOUND:
raise exception.InstanceNotFound(instance_id=uuid)
else:
return {'id': 1}
class FakeRerouteCompute(api.reroute_compute):
def __init__(self, method_name, id_to_return=1):
super(FakeRerouteCompute, self).__init__(method_name)
self.id_to_return = id_to_return
def _call_child_zones(self, zones, function):
return []
def get_collection_context_and_id(self, args, kwargs):
return ("servers", None, 1)
return ("servers", None, self.id_to_return)
def unmarshall_result(self, zone_responses):
return dict(magic="found me")
@ -960,6 +975,8 @@ class ZoneRedirectTest(test.TestCase):
self.stubs = stubout.StubOutForTesting()
self.stubs.Set(db, 'zone_get_all', zone_get_all)
self.stubs.Set(db, 'instance_get_by_uuid',
fake_instance_get_by_uuid)
self.enable_zone_routing = FLAGS.enable_zone_routing
FLAGS.enable_zone_routing = True
@ -976,8 +993,19 @@ class ZoneRedirectTest(test.TestCase):
except api.RedirectResult, e:
self.fail(_("Successful database hit should succeed"))
def test_trap_not_found_locally(self):
def test_trap_not_found_locally_id_passed(self):
"""When an integer ID is not found locally, we cannot reroute to
another zone, so just return InstanceNotFound exception
"""
decorator = FakeRerouteCompute("foo")
self.assertRaises(exception.InstanceNotFound,
decorator(go_boom), None, None, 1)
def test_trap_not_found_locally_uuid_passed(self):
"""When a UUID is found, if the item isn't found locally, we should
try to reroute to a child zone to see if they have it
"""
decorator = FakeRerouteCompute("foo", id_to_return=FAKE_UUID_NOT_FOUND)
try:
result = decorator(go_boom)(None, None, 1)
self.assertFail(_("Should have rerouted."))

View File

@ -89,7 +89,7 @@ class FakeHttplibConnection(object):
class XmlConversionTestCase(test.TestCase):
"""Unit test api xml conversion"""
def test_number_conversion(self):
conv = apirequest._try_convert
conv = ec2utils._try_convert
self.assertEqual(conv('None'), None)
self.assertEqual(conv('True'), True)
self.assertEqual(conv('False'), False)

View File

@ -35,7 +35,7 @@ from nova import utils
from nova.auth import manager
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova.image import local
from nova.image import fake
FLAGS = flags.FLAGS
@ -56,6 +56,7 @@ class CloudTestCase(test.TestCase):
self.compute = self.start_service('compute')
self.scheduter = self.start_service('scheduler')
self.network = self.start_service('network')
self.volume = self.start_service('volume')
self.image_service = utils.import_object(FLAGS.image_service)
self.manager = manager.AuthManager()
@ -69,8 +70,8 @@ class CloudTestCase(test.TestCase):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
'type': 'machine', 'image_state': 'available'}}
self.stubs.Set(local.LocalImageService, 'show', fake_show)
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
# NOTE(vish): set up a manual wait so rpc.cast has a chance to finish
rpc_cast = rpc.cast
@ -303,7 +304,7 @@ class CloudTestCase(test.TestCase):
def fake_show_none(meh, context, id):
raise exception.ImageNotFound(image_id='bad_image_id')
self.stubs.Set(local.LocalImageService, 'detail', fake_detail)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
# list all
result1 = describe_images(self.context)
result1 = result1['imagesSet'][0]
@ -317,8 +318,8 @@ class CloudTestCase(test.TestCase):
self.assertEqual(2, len(result3['imagesSet']))
# provide an non-existing image_id
self.stubs.UnsetAll()
self.stubs.Set(local.LocalImageService, 'show', fake_show_none)
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show_none)
self.stubs.Set(fake._FakeImageService, 'show', fake_show_none)
self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show_none)
self.assertRaises(exception.ImageNotFound, describe_images,
self.context, ['ami-fake'])
@ -329,8 +330,8 @@ class CloudTestCase(test.TestCase):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
'type': 'machine'}, 'is_public': True}
self.stubs.Set(local.LocalImageService, 'show', fake_show)
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
result = describe_image_attribute(self.context, 'ami-00000001',
'launchPermission')
self.assertEqual([{'group': 'all'}], result['launchPermission'])
@ -345,9 +346,9 @@ class CloudTestCase(test.TestCase):
def fake_update(meh, context, image_id, metadata, data=None):
return metadata
self.stubs.Set(local.LocalImageService, 'show', fake_show)
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
self.stubs.Set(local.LocalImageService, 'update', fake_update)
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
self.stubs.Set(fake._FakeImageService, 'update', fake_update)
result = modify_image_attribute(self.context, 'ami-00000001',
'launchPermission', 'add',
user_group=['all'])
@ -359,7 +360,7 @@ class CloudTestCase(test.TestCase):
def fake_delete(self, context, id):
return None
self.stubs.Set(local.LocalImageService, 'delete', fake_delete)
self.stubs.Set(fake._FakeImageService, 'delete', fake_delete)
# valid image
result = deregister_image(self.context, 'ami-00000001')
self.assertEqual(result['imageId'], 'ami-00000001')
@ -369,18 +370,25 @@ class CloudTestCase(test.TestCase):
def fake_detail_empty(self, context):
return []
self.stubs.Set(local.LocalImageService, 'detail', fake_detail_empty)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail_empty)
self.assertRaises(exception.ImageNotFound, deregister_image,
self.context, 'ami-bad001')
def test_console_output(self):
instance_type = FLAGS.default_instance_type
max_count = 1
kwargs = {'image_id': 'ami-1',
'instance_type': instance_type,
'max_count': max_count}
def _run_instance(self, **kwargs):
rv = self.cloud.run_instances(self.context, **kwargs)
instance_id = rv['instancesSet'][0]['instanceId']
return instance_id
def _run_instance_wait(self, **kwargs):
ec2_instance_id = self._run_instance(**kwargs)
self._wait_for_running(ec2_instance_id)
return ec2_instance_id
def test_console_output(self):
instance_id = self._run_instance(
image_id='ami-1',
instance_type=FLAGS.default_instance_type,
max_count=1)
output = self.cloud.get_console_output(context=self.context,
instance_id=[instance_id])
self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE?OUTPUT')
@ -389,9 +397,7 @@ class CloudTestCase(test.TestCase):
rv = self.cloud.terminate_instances(self.context, [instance_id])
def test_ajax_console(self):
kwargs = {'image_id': 'ami-1'}
rv = self.cloud.run_instances(self.context, **kwargs)
instance_id = rv['instancesSet'][0]['instanceId']
instance_id = self._run_instance(image_id='ami-1')
output = self.cloud.get_ajax_console(context=self.context,
instance_id=[instance_id])
self.assertEquals(output['url'],
@ -457,6 +463,12 @@ class CloudTestCase(test.TestCase):
self.cloud.delete_key_pair(self.context, 'test')
def test_run_instances(self):
# stub out the rpc call
def stub_cast(*args, **kwargs):
pass
self.stubs.Set(rpc, 'cast', stub_cast)
kwargs = {'image_id': FLAGS.default_image,
'instance_type': FLAGS.default_instance_type,
'max_count': 1}
@ -466,7 +478,7 @@ class CloudTestCase(test.TestCase):
self.assertEqual(instance['imageId'], 'ami-00000001')
self.assertEqual(instance['displayName'], 'Server 1')
self.assertEqual(instance['instanceId'], 'i-00000001')
self.assertEqual(instance['instanceState']['name'], 'networking')
self.assertEqual(instance['instanceState']['name'], 'scheduling')
self.assertEqual(instance['instanceType'], 'm1.small')
def test_run_instances_image_state_none(self):
@ -480,7 +492,7 @@ class CloudTestCase(test.TestCase):
'type': 'machine'}}
self.stubs.UnsetAll()
self.stubs.Set(local.LocalImageService, 'show', fake_show_no_state)
self.stubs.Set(fake._FakeImageService, 'show', fake_show_no_state)
self.assertRaises(exception.ApiError, run_instances,
self.context, **kwargs)
@ -495,7 +507,7 @@ class CloudTestCase(test.TestCase):
'type': 'machine', 'image_state': 'decrypting'}}
self.stubs.UnsetAll()
self.stubs.Set(local.LocalImageService, 'show', fake_show_decrypt)
self.stubs.Set(fake._FakeImageService, 'show', fake_show_decrypt)
self.assertRaises(exception.ApiError, run_instances,
self.context, **kwargs)
@ -509,7 +521,7 @@ class CloudTestCase(test.TestCase):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
'type': 'machine'}, 'status': 'active'}
self.stubs.Set(local.LocalImageService, 'show', fake_show_stat_active)
self.stubs.Set(fake._FakeImageService, 'show', fake_show_stat_active)
result = run_instances(self.context, **kwargs)
self.assertEqual(len(result['instancesSet']), 1)
@ -538,7 +550,9 @@ class CloudTestCase(test.TestCase):
def test_update_of_instance_wont_update_private_fields(self):
inst = db.instance_create(self.context, {})
self.cloud.update_instance(self.context, inst['id'],
ec2_id = ec2utils.id_to_ec2_id(inst['id'])
self.cloud.update_instance(self.context, ec2_id,
display_name='c00l 1m4g3',
mac_address='DE:AD:BE:EF')
inst = db.instance_get(self.context, inst['id'])
self.assertEqual(None, inst['mac_address'])
@ -561,3 +575,299 @@ class CloudTestCase(test.TestCase):
vol = db.volume_get(self.context, vol['id'])
self.assertEqual(None, vol['mountpoint'])
db.volume_destroy(self.context, vol['id'])
def _restart_compute_service(self, periodic_interval=None):
"""restart compute service. NOTE: fake driver forgets all instances."""
self.compute.kill()
if periodic_interval:
self.compute = self.start_service(
'compute', periodic_interval=periodic_interval)
else:
self.compute = self.start_service('compute')
def _wait_for_state(self, ctxt, instance_id, predicate):
"""Wait for an stopping instance to be a given state"""
id = ec2utils.ec2_id_to_id(instance_id)
while True:
info = self.cloud.compute_api.get(context=ctxt, instance_id=id)
LOG.debug(info)
if predicate(info):
break
greenthread.sleep(1)
def _wait_for_running(self, instance_id):
def is_running(info):
return info['state_description'] == 'running'
self._wait_for_state(self.context, instance_id, is_running)
def _wait_for_stopped(self, instance_id):
def is_stopped(info):
return info['state_description'] == 'stopped'
self._wait_for_state(self.context, instance_id, is_stopped)
def _wait_for_terminate(self, instance_id):
def is_deleted(info):
return info['deleted']
elevated = self.context.elevated(read_deleted=True)
self._wait_for_state(elevated, instance_id, is_deleted)
def test_stop_start_instance(self):
"""Makes sure stop/start instance works"""
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval=0.3)
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1, }
instance_id = self._run_instance_wait(**kwargs)
# a running instance can't be started. It is just ignored.
result = self.cloud.start_instances(self.context, [instance_id])
greenthread.sleep(0.3)
self.assertTrue(result)
result = self.cloud.stop_instances(self.context, [instance_id])
greenthread.sleep(0.3)
self.assertTrue(result)
self._wait_for_stopped(instance_id)
result = self.cloud.start_instances(self.context, [instance_id])
greenthread.sleep(0.3)
self.assertTrue(result)
self._wait_for_running(instance_id)
result = self.cloud.stop_instances(self.context, [instance_id])
greenthread.sleep(0.3)
self.assertTrue(result)
self._wait_for_stopped(instance_id)
result = self.cloud.terminate_instances(self.context, [instance_id])
greenthread.sleep(0.3)
self.assertTrue(result)
self._restart_compute_service()
def _volume_create(self):
kwargs = {'status': 'available',
'host': self.volume.host,
'size': 1,
'attach_status': 'detached', }
return db.volume_create(self.context, kwargs)
def _assert_volume_attached(self, vol, instance_id, mountpoint):
self.assertEqual(vol['instance_id'], instance_id)
self.assertEqual(vol['mountpoint'], mountpoint)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
def _assert_volume_detached(self, vol):
self.assertEqual(vol['instance_id'], None)
self.assertEqual(vol['mountpoint'], None)
self.assertEqual(vol['status'], "available")
self.assertEqual(vol['attach_status'], "detached")
def test_stop_start_with_volume(self):
"""Make sure run instance with block device mapping works"""
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval=0.3)
vol1 = self._volume_create()
vol2 = self._volume_create()
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1,
'block_device_mapping': [{'device_name': '/dev/vdb',
'volume_id': vol1['id'],
'delete_on_termination': False, },
{'device_name': '/dev/vdc',
'volume_id': vol2['id'],
'delete_on_termination': True, },
]}
ec2_instance_id = self._run_instance_wait(**kwargs)
instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
vols = db.volume_get_all_by_instance(self.context, instance_id)
self.assertEqual(len(vols), 2)
for vol in vols:
self.assertTrue(vol['id'] == vol1['id'] or vol['id'] == vol2['id'])
vol = db.volume_get(self.context, vol1['id'])
self._assert_volume_attached(vol, instance_id, '/dev/vdb')
vol = db.volume_get(self.context, vol2['id'])
self._assert_volume_attached(vol, instance_id, '/dev/vdc')
result = self.cloud.stop_instances(self.context, [ec2_instance_id])
self.assertTrue(result)
self._wait_for_stopped(ec2_instance_id)
vol = db.volume_get(self.context, vol1['id'])
self._assert_volume_detached(vol)
vol = db.volume_get(self.context, vol2['id'])
self._assert_volume_detached(vol)
self.cloud.start_instances(self.context, [ec2_instance_id])
self._wait_for_running(ec2_instance_id)
vols = db.volume_get_all_by_instance(self.context, instance_id)
self.assertEqual(len(vols), 2)
for vol in vols:
self.assertTrue(vol['id'] == vol1['id'] or vol['id'] == vol2['id'])
self.assertTrue(vol['mountpoint'] == '/dev/vdb' or
vol['mountpoint'] == '/dev/vdc')
self.assertEqual(vol['instance_id'], instance_id)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
self.cloud.terminate_instances(self.context, [ec2_instance_id])
greenthread.sleep(0.3)
admin_ctxt = context.get_admin_context(read_deleted=False)
vol = db.volume_get(admin_ctxt, vol1['id'])
self.assertFalse(vol['deleted'])
db.volume_destroy(self.context, vol1['id'])
greenthread.sleep(0.3)
admin_ctxt = context.get_admin_context(read_deleted=True)
vol = db.volume_get(admin_ctxt, vol2['id'])
self.assertTrue(vol['deleted'])
self._restart_compute_service()
def test_stop_with_attached_volume(self):
"""Make sure attach info is reflected to block device mapping"""
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval=0.3)
vol1 = self._volume_create()
vol2 = self._volume_create()
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1,
'block_device_mapping': [{'device_name': '/dev/vdb',
'volume_id': vol1['id'],
'delete_on_termination': True}]}
ec2_instance_id = self._run_instance_wait(**kwargs)
instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
vols = db.volume_get_all_by_instance(self.context, instance_id)
self.assertEqual(len(vols), 1)
for vol in vols:
self.assertEqual(vol['id'], vol1['id'])
self._assert_volume_attached(vol, instance_id, '/dev/vdb')
vol = db.volume_get(self.context, vol2['id'])
self._assert_volume_detached(vol)
self.cloud.compute_api.attach_volume(self.context,
instance_id=instance_id,
volume_id=vol2['id'],
device='/dev/vdc')
greenthread.sleep(0.3)
vol = db.volume_get(self.context, vol2['id'])
self._assert_volume_attached(vol, instance_id, '/dev/vdc')
self.cloud.compute_api.detach_volume(self.context,
volume_id=vol1['id'])
greenthread.sleep(0.3)
vol = db.volume_get(self.context, vol1['id'])
self._assert_volume_detached(vol)
result = self.cloud.stop_instances(self.context, [ec2_instance_id])
self.assertTrue(result)
self._wait_for_stopped(ec2_instance_id)
for vol_id in (vol1['id'], vol2['id']):
vol = db.volume_get(self.context, vol_id)
self._assert_volume_detached(vol)
self.cloud.start_instances(self.context, [ec2_instance_id])
self._wait_for_running(ec2_instance_id)
vols = db.volume_get_all_by_instance(self.context, instance_id)
self.assertEqual(len(vols), 1)
for vol in vols:
self.assertEqual(vol['id'], vol2['id'])
self._assert_volume_attached(vol, instance_id, '/dev/vdc')
vol = db.volume_get(self.context, vol1['id'])
self._assert_volume_detached(vol)
self.cloud.terminate_instances(self.context, [ec2_instance_id])
greenthread.sleep(0.3)
for vol_id in (vol1['id'], vol2['id']):
vol = db.volume_get(self.context, vol_id)
self.assertEqual(vol['id'], vol_id)
self._assert_volume_detached(vol)
db.volume_destroy(self.context, vol_id)
self._restart_compute_service()
def _create_snapshot(self, ec2_volume_id):
result = self.cloud.create_snapshot(self.context,
volume_id=ec2_volume_id)
greenthread.sleep(0.3)
return result['snapshotId']
def test_run_with_snapshot(self):
"""Makes sure run/stop/start instance with snapshot works."""
vol = self._volume_create()
ec2_volume_id = ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x')
ec2_snapshot1_id = self._create_snapshot(ec2_volume_id)
snapshot1_id = ec2utils.ec2_id_to_id(ec2_snapshot1_id)
ec2_snapshot2_id = self._create_snapshot(ec2_volume_id)
snapshot2_id = ec2utils.ec2_id_to_id(ec2_snapshot2_id)
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1,
'block_device_mapping': [{'device_name': '/dev/vdb',
'snapshot_id': snapshot1_id,
'delete_on_termination': False, },
{'device_name': '/dev/vdc',
'snapshot_id': snapshot2_id,
'delete_on_termination': True}]}
ec2_instance_id = self._run_instance_wait(**kwargs)
instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
vols = db.volume_get_all_by_instance(self.context, instance_id)
self.assertEqual(len(vols), 2)
vol1_id = None
vol2_id = None
for vol in vols:
snapshot_id = vol['snapshot_id']
if snapshot_id == snapshot1_id:
vol1_id = vol['id']
mountpoint = '/dev/vdb'
elif snapshot_id == snapshot2_id:
vol2_id = vol['id']
mountpoint = '/dev/vdc'
else:
self.fail()
self._assert_volume_attached(vol, instance_id, mountpoint)
self.assertTrue(vol1_id)
self.assertTrue(vol2_id)
self.cloud.terminate_instances(self.context, [ec2_instance_id])
greenthread.sleep(0.3)
self._wait_for_terminate(ec2_instance_id)
greenthread.sleep(0.3)
admin_ctxt = context.get_admin_context(read_deleted=False)
vol = db.volume_get(admin_ctxt, vol1_id)
self._assert_volume_detached(vol)
self.assertFalse(vol['deleted'])
db.volume_destroy(self.context, vol1_id)
greenthread.sleep(0.3)
admin_ctxt = context.get_admin_context(read_deleted=True)
vol = db.volume_get(admin_ctxt, vol2_id)
self.assertTrue(vol['deleted'])
for snapshot_id in (ec2_snapshot1_id, ec2_snapshot2_id):
self.cloud.delete_snapshot(self.context, snapshot_id)
greenthread.sleep(0.3)
db.volume_destroy(self.context, vol['id'])

View File

@ -22,21 +22,21 @@ Tests For Compute
import mox
import stubout
from nova.auth import manager
from nova import compute
from nova.compute import instance_types
from nova.compute import manager as compute_manager
from nova.compute import power_state
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova import flags
import nova.image.fake
from nova import log as logging
from nova import rpc
from nova import test
from nova import utils
from nova.auth import manager
from nova.compute import instance_types
from nova.compute import manager as compute_manager
from nova.compute import power_state
from nova.db.sqlalchemy import models
from nova.image import local
LOG = logging.getLogger('nova.tests.compute')
FLAGS = flags.FLAGS
@ -73,7 +73,7 @@ class ComputeTestCase(test.TestCase):
def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
self.stubs.Set(local.LocalImageService, 'show', fake_show)
self.stubs.Set(nova.image.fake._FakeImageService, 'show', fake_show)
def tearDown(self):
self.manager.delete_user(self.user)
@ -228,6 +228,21 @@ class ComputeTestCase(test.TestCase):
self.assert_(instance_ref['launched_at'] < terminate)
self.assert_(instance_ref['deleted_at'] > terminate)
def test_stop(self):
"""Ensure instance can be stopped"""
instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id)
self.compute.stop_instance(self.context, instance_id)
self.compute.terminate_instance(self.context, instance_id)
def test_start(self):
"""Ensure instance can be started"""
instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id)
self.compute.stop_instance(self.context, instance_id)
self.compute.start_instance(self.context, instance_id)
self.compute.terminate_instance(self.context, instance_id)
def test_pause(self):
"""Ensure instance can be paused"""
instance_id = self._create_instance()

View File

@ -275,3 +275,21 @@ class GenericUtilsTestCase(test.TestCase):
# error case
result = utils.parse_server_string('www.exa:mple.com:8443')
self.assertEqual(('', ''), result)
class IsUUIDLikeTestCase(test.TestCase):
def assertUUIDLike(self, val, expected):
result = utils.is_uuid_like(val)
self.assertEqual(result, expected)
def test_good_uuid(self):
val = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
self.assertUUIDLike(val, True)
def test_integer_passed(self):
val = 1
self.assertUUIDLike(val, False)
def test_non_uuid_string_passed(self):
val = 'foo-fooo'
self.assertUUIDLike(val, False)

View File

@ -33,6 +33,7 @@ from nova import utils
from nova.auth import manager
from nova.compute import instance_types
from nova.compute import power_state
from nova import exception
from nova.virt import xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import volume_utils
@ -229,6 +230,23 @@ class XenAPIVMTestCase(test.TestCase):
instance = self._create_instance()
self.conn.get_diagnostics(instance)
def test_instance_snapshot_fails_with_no_primary_vdi(self):
def create_bad_vbd(vm_ref, vdi_ref):
vbd_rec = {'VM': vm_ref,
'VDI': vdi_ref,
'userdevice': 'fake',
'currently_attached': False}
vbd_ref = xenapi_fake._create_object('VBD', vbd_rec)
xenapi_fake.after_VBD_create(vbd_ref, vbd_rec)
return vbd_ref
self.stubs.Set(xenapi_fake, 'create_vbd', create_bad_vbd)
stubs.stubout_instance_snapshot(self.stubs)
instance = self._create_instance()
name = "MySnapshot"
self.assertRaises(exception.Error, self.conn.snapshot, instance, name)
def test_instance_snapshot(self):
stubs.stubout_instance_snapshot(self.stubs)
instance = self._create_instance()

View File

@ -35,6 +35,7 @@ import struct
import sys
import time
import types
import uuid
from xml.sax import saxutils
from eventlet import event
@ -726,3 +727,17 @@ def parse_server_string(server_str):
except:
LOG.debug(_('Invalid server_string: %s' % server_str))
return ('', '')
def gen_uuid():
return uuid.uuid4()
def is_uuid_like(val):
"""For our purposes, a UUID is a string in canoical form:
aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
"""
if not isinstance(val, basestring):
return False
return (len(val) == 36) and (val.count('-') == 4)

View File

@ -61,7 +61,7 @@ class ComputeDriver(object):
"""Return a list of InstanceInfo for all registered VMs"""
raise NotImplementedError()
def spawn(self, instance, network_info=None):
def spawn(self, instance, network_info=None, block_device_mapping=None):
"""Launch a VM for the specified instance"""
raise NotImplementedError()

View File

@ -129,7 +129,7 @@ class FakeConnection(driver.ComputeDriver):
info_list.append(self._map_to_instance_info(instance))
return info_list
def spawn(self, instance):
def spawn(self, instance, network_info=None, block_device_mapping=None):
"""
Create a new instance/VM/domain on the virtualization platform.
@ -252,6 +252,10 @@ class FakeConnection(driver.ComputeDriver):
"""
pass
def poll_rescued_instances(self, timeout):
"""Poll for rescued instances"""
pass
def migrate_disk_and_power_off(self, instance, dest):
"""
Transfers the disk of a running instance in multiple phases, turning

View File

@ -139,7 +139,7 @@ class HyperVConnection(driver.ComputeDriver):
return instance_infos
def spawn(self, instance):
def spawn(self, instance, network_info=None, block_device_mapping=None):
""" Create a new VM and start it."""
vm = self._lookup(instance.name)
if vm is not None:

View File

@ -67,11 +67,13 @@
<target dev='${disk_prefix}b' bus='${disk_bus}'/>
</disk>
#else
#if not ($getVar('ebs_root', False))
<disk type='file'>
<driver type='${driver_type}'/>
<source file='${basepath}/disk'/>
<target dev='${disk_prefix}a' bus='${disk_bus}'/>
</disk>
#end if
#if $getVar('local', False)
<disk type='file'>
<driver type='${driver_type}'/>
@ -79,6 +81,13 @@
<target dev='${disk_prefix}b' bus='${disk_bus}'/>
</disk>
#end if
#for $vol in $volumes
<disk type='block'>
<driver type='raw'/>
<source dev='${vol.device_path}'/>
<target dev='${vol.mount_device}' bus='${disk_bus}'/>
</disk>
#end for
#end if
#end if

View File

@ -40,6 +40,7 @@ import hashlib
import multiprocessing
import os
import random
import re
import shutil
import subprocess
import sys
@ -148,6 +149,10 @@ def _late_load_cheetah():
Template = t.Template
def _strip_dev(mount_path):
return re.sub(r'^/dev/', '', mount_path)
class LibvirtConnection(driver.ComputeDriver):
def __init__(self, read_only):
@ -575,11 +580,14 @@ class LibvirtConnection(driver.ComputeDriver):
# NOTE(ilyaalekseyev): Implementation like in multinics
# for xenapi(tr3buchet)
@exception.wrap_exception
def spawn(self, instance, network_info=None):
xml = self.to_xml(instance, False, network_info)
def spawn(self, instance, network_info=None, block_device_mapping=None):
xml = self.to_xml(instance, False, network_info=network_info,
block_device_mapping=block_device_mapping)
block_device_mapping = block_device_mapping or []
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
self._create_image(instance, xml, network_info=network_info)
self._create_image(instance, xml, network_info=network_info,
block_device_mapping=block_device_mapping)
domain = self._create_new_domain(xml)
LOG.debug(_("instance %s: is running"), instance['name'])
self.firewall_driver.apply_instance_filter(instance)
@ -761,7 +769,8 @@ class LibvirtConnection(driver.ComputeDriver):
# TODO(vish): should we format disk by default?
def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None,
network_info=None):
network_info=None, block_device_mapping=None):
block_device_mapping = block_device_mapping or []
if not network_info:
network_info = netutils.get_network_info(inst)
@ -824,16 +833,19 @@ class LibvirtConnection(driver.ComputeDriver):
size = None
root_fname += "_sm"
self._cache_image(fn=self._fetch_image,
target=basepath('disk'),
fname=root_fname,
cow=FLAGS.use_cow_images,
image_id=disk_images['image_id'],
user=user,
project=project,
size=size)
if not self._volume_in_mapping(self.root_mount_device,
block_device_mapping):
self._cache_image(fn=self._fetch_image,
target=basepath('disk'),
fname=root_fname,
cow=FLAGS.use_cow_images,
image_id=disk_images['image_id'],
user=user,
project=project,
size=size)
if inst_type['local_gb']:
if inst_type['local_gb'] and not self._volume_in_mapping(
self.local_mount_device, block_device_mapping):
self._cache_image(fn=self._create_local,
target=basepath('disk.local'),
fname="local_%s" % inst_type['local_gb'],
@ -948,7 +960,20 @@ class LibvirtConnection(driver.ComputeDriver):
return result
def _prepare_xml_info(self, instance, rescue=False, network_info=None):
root_mount_device = 'vda' # FIXME for now. it's hard coded.
local_mount_device = 'vdb' # FIXME for now. it's hard coded.
def _volume_in_mapping(self, mount_device, block_device_mapping):
mount_device_ = _strip_dev(mount_device)
for vol in block_device_mapping:
vol_mount_device = _strip_dev(vol['mount_device'])
if vol_mount_device == mount_device_:
return True
return False
def _prepare_xml_info(self, instance, rescue=False, network_info=None,
block_device_mapping=None):
block_device_mapping = block_device_mapping or []
# TODO(adiantum) remove network_info creation code
# when multinics will be completed
if not network_info:
@ -966,6 +991,16 @@ class LibvirtConnection(driver.ComputeDriver):
else:
driver_type = 'raw'
for vol in block_device_mapping:
vol['mount_device'] = _strip_dev(vol['mount_device'])
ebs_root = self._volume_in_mapping(self.root_mount_device,
block_device_mapping)
if self._volume_in_mapping(self.local_mount_device,
block_device_mapping):
local_gb = False
else:
local_gb = inst_type['local_gb']
xml_info = {'type': FLAGS.libvirt_type,
'name': instance['name'],
'basepath': os.path.join(FLAGS.instances_path,
@ -973,9 +1008,11 @@ class LibvirtConnection(driver.ComputeDriver):
'memory_kb': inst_type['memory_mb'] * 1024,
'vcpus': inst_type['vcpus'],
'rescue': rescue,
'local': inst_type['local_gb'],
'local': local_gb,
'driver_type': driver_type,
'nics': nics}
'nics': nics,
'ebs_root': ebs_root,
'volumes': block_device_mapping}
if FLAGS.vnc_enabled:
if FLAGS.libvirt_type != 'lxc':
@ -991,10 +1028,13 @@ class LibvirtConnection(driver.ComputeDriver):
xml_info['disk'] = xml_info['basepath'] + "/disk"
return xml_info
def to_xml(self, instance, rescue=False, network_info=None):
def to_xml(self, instance, rescue=False, network_info=None,
block_device_mapping=None):
block_device_mapping = block_device_mapping or []
# TODO(termie): cache?
LOG.debug(_('instance %s: starting toXML method'), instance['name'])
xml_info = self._prepare_xml_info(instance, rescue, network_info)
xml_info = self._prepare_xml_info(instance, rescue, network_info,
block_device_mapping)
xml = str(Template(self.libvirt_xml, searchList=[xml_info]))
LOG.debug(_('instance %s: finished toXML method'), instance['name'])
return xml

View File

@ -90,8 +90,6 @@ def fetch_image(image, instance, **kwargs):
func = _get_glance_image
elif FLAGS.image_service == "nova.image.s3.S3ImageService":
func = _get_s3_image
elif FLAGS.image_service == "nova.image.local.LocalImageService":
func = _get_local_image
else:
raise NotImplementedError(_("The Image Service %s is not implemented")
% FLAGS.image_service)
@ -105,8 +103,6 @@ def upload_image(image, instance, **kwargs):
func = _put_glance_image
elif FLAGS.image_service == "nova.image.s3.S3ImageService":
func = _put_s3_image
elif FLAGS.image_service == "nova.image.local.LocalImageService":
func = _put_local_image
else:
raise NotImplementedError(_("The Image Service %s is not implemented")
% FLAGS.image_service)
@ -192,8 +188,6 @@ def get_vmdk_size_and_properties(image, instance):
size, properties = meta_data["size"], meta_data["properties"]
elif FLAGS.image_service == "nova.image.s3.S3ImageService":
raise NotImplementedError
elif FLAGS.image_service == "nova.image.local.LocalImageService":
raise NotImplementedError
LOG.debug(_("Got image size of %(size)s for the image %(image)s") %
locals())
return size, properties

View File

@ -124,7 +124,7 @@ class VMWareESXConnection(driver.ComputeDriver):
"""List VM instances."""
return self._vmops.list_instances()
def spawn(self, instance):
def spawn(self, instance, network_info=None, block_device_mapping=None):
"""Create VM instance."""
self._vmops.spawn(instance)

View File

@ -146,6 +146,7 @@ def create_vdi(name_label, read_only, sr_ref, sharable):
def create_vbd(vm_ref, vdi_ref):
vbd_rec = {'VM': vm_ref,
'VDI': vdi_ref,
'userdevice': '0',
'currently_attached': False}
vbd_ref = _create_object('VBD', vbd_rec)
after_VBD_create(vbd_ref, vbd_rec)

View File

@ -283,19 +283,16 @@ class VMHelper(HelperBase):
@classmethod
def get_vdi_for_vm_safely(cls, session, vm_ref):
vdi_refs = VMHelper.lookup_vm_vdis(session, vm_ref)
if vdi_refs is None:
raise Exception(_("No VDIs found for VM %s") % vm_ref)
else:
num_vdis = len(vdi_refs)
if num_vdis != 1:
raise Exception(
_("Unexpected number of VDIs (%(num_vdis)s) found"
" for VM %(vm_ref)s") % locals())
vdi_ref = vdi_refs[0]
vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
return vdi_ref, vdi_rec
"""Retrieves the primary VDI for a VM"""
vbd_refs = session.get_xenapi().VM.get_VBDs(vm_ref)
for vbd in vbd_refs:
vbd_rec = session.get_xenapi().VBD.get_record(vbd)
# Convention dictates the primary VDI will be userdevice 0
if vbd_rec['userdevice'] == '0':
vdi_rec = session.get_xenapi().VDI.get_record(vbd_rec['VDI'])
return vbd_rec['VDI'], vdi_rec
raise exception.Error(_("No primary VDI found for"
"%(vm_ref)s") % locals())
@classmethod
def create_snapshot(cls, session, instance_id, vm_ref, label):
@ -328,12 +325,6 @@ class VMHelper(HelperBase):
'snap': template_vdi_uuid}
return template_vm_ref, template_vdi_uuids
@classmethod
def get_sr(cls, session, sr_label='slices'):
"""Finds the SR named by the given name label and returns
the UUID"""
return session.call_xenapi('SR.get_by_name_label', sr_label)[0]
@classmethod
def get_sr_path(cls, session):
"""Return the path to our storage repository
@ -789,8 +780,7 @@ class VMHelper(HelperBase):
@classmethod
def scan_default_sr(cls, session):
"""Looks for the system default SR and triggers a re-scan"""
#FIXME(sirp/mdietz): refactor scan_default_sr in there
sr_ref = cls.get_sr(session)
sr_ref = find_sr(session)
session.call_xenapi('SR.scan', sr_ref)
@ -882,7 +872,8 @@ def get_vdi_for_vm_safely(session, vm_ref):
else:
num_vdis = len(vdi_refs)
if num_vdis != 1:
raise Exception(_("Unexpected number of VDIs (%(num_vdis)s) found"
raise exception.Exception(_("Unexpected number of VDIs"
"(%(num_vdis)s) found"
" for VM %(vm_ref)s") % locals())
vdi_ref = vdi_refs[0]

View File

@ -194,7 +194,7 @@ class XenAPIConnection(driver.ComputeDriver):
def list_instances_detail(self):
return self._vmops.list_instances_detail()
def spawn(self, instance):
def spawn(self, instance, network_info=None, block_device_mapping=None):
"""Create VM instance"""
self._vmops.spawn(instance)

View File

@ -21,6 +21,9 @@ Handles all requests relating to volumes.
"""
from eventlet import greenthread
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
@ -44,7 +47,8 @@ class API(base.Base):
if snapshot['status'] != "available":
raise exception.ApiError(
_("Snapshot status must be available"))
size = snapshot['volume_size']
if not size:
size = snapshot['volume_size']
if quota.allowed_volumes(context, 1, size) < 1:
pid = context.project_id
@ -73,6 +77,14 @@ class API(base.Base):
"snapshot_id": snapshot_id}})
return volume
# TODO(yamahata): eliminate dumb polling
def wait_creation(self, context, volume_id):
while True:
volume = self.get(context, volume_id)
if volume['status'] != 'creating':
return
greenthread.sleep(1)
def delete(self, context, volume_id):
volume = self.get(context, volume_id)
if volume['status'] != "available":

View File

@ -582,6 +582,14 @@ class FakeISCSIDriver(ISCSIDriver):
"""No setup necessary in fake mode."""
pass
def discover_volume(self, context, volume):
"""Discover volume on a remote host."""
return "/dev/disk/by-path/volume-id-%d" % volume['id']
def undiscover_volume(self, volume):
"""Undiscover volume on a remote host."""
pass
@staticmethod
def fake_execute(cmd, *_args, **_kwargs):
"""Execute that simply logs the command."""

View File

@ -211,6 +211,12 @@ class NovaTestResult(result.TextTestResult):
break
sys.stdout = stdout
# NOTE(lorinh): Initialize start_time in case a sqlalchemy-migrate
# error results in it failing to be initialized later. Otherwise,
# _handleElapsedTime will fail, causing the wrong error message to
# be outputted.
self.start_time = time.time()
def getDescription(self, test):
return str(test)

View File

@ -10,7 +10,7 @@ boto==1.9b
carrot==0.10.5
eventlet==0.9.12
lockfile==0.8
python-novaclient==2.5
python-novaclient==2.5.3
python-daemon==1.5.5
python-gflags==1.3
redis==2.0.0