Merge "compute_api create*() and schedulers refactoring"
This commit is contained in:
commit
a2646129bc
@ -77,6 +77,8 @@ The filtering (excluding compute nodes incapable of fulfilling the request) and
|
||||
|
||||
Requesting a new instance
|
||||
-------------------------
|
||||
(Note: The information below is out of date, as the `nova.compute.api.create_all_at_once()` functionality has merged into `nova.compute.api.create()` and the non-zone aware schedulers have been updated.)
|
||||
|
||||
Prior to the `BaseScheduler`, to request a new instance, a call was made to `nova.compute.api.create()`. The type of instance created depended on the value of the `InstanceType` record being passed in. The `InstanceType` determined the amount of disk, CPU, RAM and network required for the instance. Administrators can add new `InstanceType` records to suit their needs. For more complicated instance requests we need to go beyond the default fields in the `InstanceType` table.
|
||||
|
||||
`nova.compute.api.create()` performed the following actions:
|
||||
|
@ -1385,7 +1385,7 @@ class CloudController(object):
|
||||
if image_state != 'available':
|
||||
raise exception.ApiError(_('Image must be available'))
|
||||
|
||||
instances = self.compute_api.create(context,
|
||||
(instances, resv_id) = self.compute_api.create(context,
|
||||
instance_type=instance_types.get_instance_type_by_name(
|
||||
kwargs.get('instance_type', None)),
|
||||
image_href=self._get_image(context, kwargs['image_id'])['id'],
|
||||
@ -1400,9 +1400,11 @@ class CloudController(object):
|
||||
security_group=kwargs.get('security_group'),
|
||||
availability_zone=kwargs.get('placement', {}).get(
|
||||
'AvailabilityZone'),
|
||||
block_device_mapping=kwargs.get('block_device_mapping', {}))
|
||||
return self._format_run_instances(context,
|
||||
reservation_id=instances[0]['reservation_id'])
|
||||
block_device_mapping=kwargs.get('block_device_mapping', {}),
|
||||
# NOTE(comstud): Unfortunately, EC2 requires that the
|
||||
# instance DB entries have been created..
|
||||
wait_for_instances=True)
|
||||
return self._format_run_instances(context, resv_id)
|
||||
|
||||
def _do_instance(self, action, context, ec2_id):
|
||||
instance_id = ec2utils.ec2_id_to_id(ec2_id)
|
||||
|
@ -139,8 +139,7 @@ class APIRouter(base_wsgi.Router):
|
||||
controller=zones.create_resource(version),
|
||||
collection={'detail': 'GET',
|
||||
'info': 'GET',
|
||||
'select': 'POST',
|
||||
'boot': 'POST'})
|
||||
'select': 'POST'})
|
||||
|
||||
mapper.connect("versions", "/",
|
||||
controller=versions.create_resource(version),
|
||||
|
@ -15,7 +15,6 @@
|
||||
# under the License
|
||||
|
||||
from nova import utils
|
||||
from nova.api.openstack import create_instance_helper as helper
|
||||
from nova.api.openstack import extensions
|
||||
from nova.api.openstack import servers
|
||||
from nova.api.openstack import wsgi
|
||||
@ -66,7 +65,7 @@ class Createserverext(extensions.ExtensionDescriptor):
|
||||
}
|
||||
|
||||
body_deserializers = {
|
||||
'application/xml': helper.ServerXMLDeserializerV11(),
|
||||
'application/xml': servers.ServerXMLDeserializerV11(),
|
||||
}
|
||||
|
||||
serializer = wsgi.ResponseSerializer(body_serializers,
|
||||
|
@ -334,47 +334,8 @@ class VolumeAttachmentController(object):
|
||||
class BootFromVolumeController(servers.ControllerV11):
|
||||
"""The boot from volume API controller for the Openstack API."""
|
||||
|
||||
def _create_instance(self, context, instance_type, image_href, **kwargs):
|
||||
try:
|
||||
return self.compute_api.create(context, instance_type,
|
||||
image_href, **kwargs)
|
||||
except quota.QuotaError as error:
|
||||
self.helper._handle_quota_error(error)
|
||||
except exception.ImageNotFound as error:
|
||||
msg = _("Can not find requested image")
|
||||
raise faults.Fault(exc.HTTPBadRequest(explanation=msg))
|
||||
|
||||
def create(self, req, body):
|
||||
""" Creates a new server for a given user """
|
||||
extra_values = None
|
||||
try:
|
||||
|
||||
def get_kwargs(context, instance_type, image_href, **kwargs):
|
||||
kwargs['context'] = context
|
||||
kwargs['instance_type'] = instance_type
|
||||
kwargs['image_href'] = image_href
|
||||
return kwargs
|
||||
|
||||
extra_values, kwargs = self.helper.create_instance(req, body,
|
||||
get_kwargs)
|
||||
|
||||
block_device_mapping = body['server'].get('block_device_mapping')
|
||||
kwargs['block_device_mapping'] = block_device_mapping
|
||||
|
||||
instances = self._create_instance(**kwargs)
|
||||
except faults.Fault, f:
|
||||
return f
|
||||
|
||||
# We can only return 1 instance via the API, if we happen to
|
||||
# build more than one... instances is a list, so we'll just
|
||||
# use the first one..
|
||||
inst = instances[0]
|
||||
for key in ['instance_type', 'image_ref']:
|
||||
inst[key] = extra_values[key]
|
||||
|
||||
server = self._build_view(req, inst, is_detail=True)
|
||||
server['server']['adminPass'] = extra_values['password']
|
||||
return server
|
||||
def _get_block_device_mapping(self, data):
|
||||
return data.get('block_device_mapping')
|
||||
|
||||
|
||||
class Volumes(extensions.ExtensionDescriptor):
|
||||
|
50
nova/api/openstack/contrib/zones.py
Normal file
50
nova/api/openstack/contrib/zones.py
Normal file
@ -0,0 +1,50 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""The zones extension."""
|
||||
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova.api.openstack import extensions
|
||||
|
||||
|
||||
LOG = logging.getLogger("nova.api.zones")
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
class Zones(extensions.ExtensionDescriptor):
|
||||
def get_name(self):
|
||||
return "Zones"
|
||||
|
||||
def get_alias(self):
|
||||
return "os-zones"
|
||||
|
||||
def get_description(self):
|
||||
return """Enables zones-related functionality such as adding
|
||||
child zones, listing child zones, getting the capabilities of the
|
||||
local zone, and returning build plans to parent zones' schedulers"""
|
||||
|
||||
def get_namespace(self):
|
||||
return "http://docs.openstack.org/ext/zones/api/v1.1"
|
||||
|
||||
def get_updated(self):
|
||||
return "2011-09-21T00:00:00+00:00"
|
||||
|
||||
def get_resources(self):
|
||||
# Nothing yet.
|
||||
return []
|
@ -1,606 +0,0 @@
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# Copyright 2011 Piston Cloud Computing, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import base64
|
||||
|
||||
from webob import exc
|
||||
from xml.dom import minidom
|
||||
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
import nova.image
|
||||
from nova import quota
|
||||
from nova import utils
|
||||
|
||||
from nova.compute import instance_types
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import wsgi
|
||||
from nova.rpc.common import RemoteError
|
||||
|
||||
LOG = logging.getLogger('nova.api.openstack.create_instance_helper')
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
class CreateFault(exception.NovaException):
|
||||
message = _("Invalid parameters given to create_instance.")
|
||||
|
||||
def __init__(self, fault):
|
||||
self.fault = fault
|
||||
super(CreateFault, self).__init__()
|
||||
|
||||
|
||||
class CreateInstanceHelper(object):
|
||||
"""This is the base class for OS API Controllers that
|
||||
are capable of creating instances (currently Servers and Zones).
|
||||
|
||||
Once we stabilize the Zones portion of the API we may be able
|
||||
to move this code back into servers.py
|
||||
"""
|
||||
|
||||
def __init__(self, controller):
|
||||
"""We need the image service to create an instance."""
|
||||
self.controller = controller
|
||||
self._image_service = utils.import_object(FLAGS.image_service)
|
||||
super(CreateInstanceHelper, self).__init__()
|
||||
|
||||
def create_instance(self, req, body, create_method):
|
||||
"""Creates a new server for the given user. The approach
|
||||
used depends on the create_method. For example, the standard
|
||||
POST /server call uses compute.api.create(), while
|
||||
POST /zones/server uses compute.api.create_all_at_once().
|
||||
|
||||
The problem is, both approaches return different values (i.e.
|
||||
[instance dicts] vs. reservation_id). So the handling of the
|
||||
return type from this method is left to the caller.
|
||||
"""
|
||||
if not body:
|
||||
raise exc.HTTPUnprocessableEntity()
|
||||
|
||||
if not 'server' in body:
|
||||
raise exc.HTTPUnprocessableEntity()
|
||||
|
||||
context = req.environ['nova.context']
|
||||
server_dict = body['server']
|
||||
password = self.controller._get_server_admin_password(server_dict)
|
||||
|
||||
if not 'name' in server_dict:
|
||||
msg = _("Server name is not defined")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
name = server_dict['name']
|
||||
self._validate_server_name(name)
|
||||
name = name.strip()
|
||||
|
||||
image_href = self.controller._image_ref_from_req_data(body)
|
||||
# If the image href was generated by nova api, strip image_href
|
||||
# down to an id and use the default glance connection params
|
||||
|
||||
if str(image_href).startswith(req.application_url):
|
||||
image_href = image_href.split('/').pop()
|
||||
try:
|
||||
image_service, image_id = nova.image.get_image_service(context,
|
||||
image_href)
|
||||
kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(
|
||||
req, image_service, image_id)
|
||||
images = set([str(x['id']) for x in image_service.index(context)])
|
||||
assert str(image_id) in images
|
||||
except Exception, e:
|
||||
msg = _("Cannot find requested image %(image_href)s: %(e)s" %
|
||||
locals())
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
personality = server_dict.get('personality')
|
||||
config_drive = server_dict.get('config_drive')
|
||||
|
||||
injected_files = []
|
||||
if personality:
|
||||
injected_files = self._get_injected_files(personality)
|
||||
|
||||
sg_names = []
|
||||
security_groups = server_dict.get('security_groups')
|
||||
if security_groups is not None:
|
||||
sg_names = [sg['name'] for sg in security_groups if sg.get('name')]
|
||||
if not sg_names:
|
||||
sg_names.append('default')
|
||||
|
||||
sg_names = list(set(sg_names))
|
||||
|
||||
requested_networks = server_dict.get('networks')
|
||||
if requested_networks is not None:
|
||||
requested_networks = self._get_requested_networks(
|
||||
requested_networks)
|
||||
|
||||
try:
|
||||
flavor_id = self.controller._flavor_id_from_req_data(body)
|
||||
except ValueError as error:
|
||||
msg = _("Invalid flavorRef provided.")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
zone_blob = server_dict.get('blob')
|
||||
|
||||
# optional openstack extensions:
|
||||
key_name = server_dict.get('key_name')
|
||||
user_data = server_dict.get('user_data')
|
||||
self._validate_user_data(user_data)
|
||||
|
||||
availability_zone = server_dict.get('availability_zone')
|
||||
name = server_dict['name']
|
||||
self._validate_server_name(name)
|
||||
name = name.strip()
|
||||
|
||||
reservation_id = server_dict.get('reservation_id')
|
||||
min_count = server_dict.get('min_count')
|
||||
max_count = server_dict.get('max_count')
|
||||
# min_count and max_count are optional. If they exist, they come
|
||||
# in as strings. We want to default 'min_count' to 1, and default
|
||||
# 'max_count' to be 'min_count'.
|
||||
min_count = int(min_count) if min_count else 1
|
||||
max_count = int(max_count) if max_count else min_count
|
||||
if min_count > max_count:
|
||||
min_count = max_count
|
||||
|
||||
try:
|
||||
inst_type = \
|
||||
instance_types.get_instance_type_by_flavor_id(flavor_id)
|
||||
extra_values = {
|
||||
'instance_type': inst_type,
|
||||
'image_ref': image_href,
|
||||
'config_drive': config_drive,
|
||||
'password': password}
|
||||
|
||||
return (extra_values,
|
||||
create_method(context,
|
||||
inst_type,
|
||||
image_id,
|
||||
kernel_id=kernel_id,
|
||||
ramdisk_id=ramdisk_id,
|
||||
display_name=name,
|
||||
display_description=name,
|
||||
key_name=key_name,
|
||||
metadata=server_dict.get('metadata', {}),
|
||||
access_ip_v4=server_dict.get('accessIPv4'),
|
||||
access_ip_v6=server_dict.get('accessIPv6'),
|
||||
injected_files=injected_files,
|
||||
admin_password=password,
|
||||
zone_blob=zone_blob,
|
||||
reservation_id=reservation_id,
|
||||
min_count=min_count,
|
||||
max_count=max_count,
|
||||
requested_networks=requested_networks,
|
||||
security_group=sg_names,
|
||||
user_data=user_data,
|
||||
availability_zone=availability_zone,
|
||||
config_drive=config_drive,))
|
||||
except quota.QuotaError as error:
|
||||
self._handle_quota_error(error)
|
||||
except exception.InstanceTypeMemoryTooSmall as error:
|
||||
raise exc.HTTPBadRequest(explanation=unicode(error))
|
||||
except exception.InstanceTypeDiskTooSmall as error:
|
||||
raise exc.HTTPBadRequest(explanation=unicode(error))
|
||||
except exception.ImageNotFound as error:
|
||||
msg = _("Can not find requested image")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
except exception.FlavorNotFound as error:
|
||||
msg = _("Invalid flavorRef provided.")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
except exception.KeypairNotFound as error:
|
||||
msg = _("Invalid key_name provided.")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
except exception.SecurityGroupNotFound as error:
|
||||
raise exc.HTTPBadRequest(explanation=unicode(error))
|
||||
except RemoteError as err:
|
||||
msg = "%(err_type)s: %(err_msg)s" % \
|
||||
{'err_type': err.exc_type, 'err_msg': err.value}
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
# Let the caller deal with unhandled exceptions.
|
||||
|
||||
def _handle_quota_error(self, error):
|
||||
"""
|
||||
Reraise quota errors as api-specific http exceptions
|
||||
"""
|
||||
if error.code == "OnsetFileLimitExceeded":
|
||||
expl = _("Personality file limit exceeded")
|
||||
raise exc.HTTPRequestEntityTooLarge(explanation=error.message,
|
||||
headers={'Retry-After': 0})
|
||||
if error.code == "OnsetFilePathLimitExceeded":
|
||||
expl = _("Personality file path too long")
|
||||
raise exc.HTTPRequestEntityTooLarge(explanation=error.message,
|
||||
headers={'Retry-After': 0})
|
||||
if error.code == "OnsetFileContentLimitExceeded":
|
||||
expl = _("Personality file content too long")
|
||||
raise exc.HTTPRequestEntityTooLarge(explanation=error.message,
|
||||
headers={'Retry-After': 0})
|
||||
if error.code == "InstanceLimitExceeded":
|
||||
expl = _("Instance quotas have been exceeded")
|
||||
raise exc.HTTPRequestEntityTooLarge(explanation=error.message,
|
||||
headers={'Retry-After': 0})
|
||||
# if the original error is okay, just reraise it
|
||||
raise error
|
||||
|
||||
def _deserialize_create(self, request):
|
||||
"""
|
||||
Deserialize a create request
|
||||
|
||||
Overrides normal behavior in the case of xml content
|
||||
"""
|
||||
if request.content_type == "application/xml":
|
||||
deserializer = ServerXMLDeserializer()
|
||||
return deserializer.deserialize(request.body)
|
||||
else:
|
||||
return self._deserialize(request.body, request.get_content_type())
|
||||
|
||||
def _validate_server_name(self, value):
|
||||
if not isinstance(value, basestring):
|
||||
msg = _("Server name is not a string or unicode")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
if value.strip() == '':
|
||||
msg = _("Server name is an empty string")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
def _get_kernel_ramdisk_from_image(self, req, image_service, image_id):
|
||||
"""Fetch an image from the ImageService, then if present, return the
|
||||
associated kernel and ramdisk image IDs.
|
||||
"""
|
||||
context = req.environ['nova.context']
|
||||
image_meta = image_service.show(context, image_id)
|
||||
# NOTE(sirp): extracted to a separate method to aid unit-testing, the
|
||||
# new method doesn't need a request obj or an ImageService stub
|
||||
kernel_id, ramdisk_id = self._do_get_kernel_ramdisk_from_image(
|
||||
image_meta)
|
||||
return kernel_id, ramdisk_id
|
||||
|
||||
@staticmethod
|
||||
def _do_get_kernel_ramdisk_from_image(image_meta):
|
||||
"""Given an ImageService image_meta, return kernel and ramdisk image
|
||||
ids if present.
|
||||
|
||||
This is only valid for `ami` style images.
|
||||
"""
|
||||
image_id = image_meta['id']
|
||||
if image_meta['status'] != 'active':
|
||||
raise exception.ImageUnacceptable(image_id=image_id,
|
||||
reason=_("status is not active"))
|
||||
|
||||
if image_meta.get('container_format') != 'ami':
|
||||
return None, None
|
||||
|
||||
try:
|
||||
kernel_id = image_meta['properties']['kernel_id']
|
||||
except KeyError:
|
||||
raise exception.KernelNotFoundForImage(image_id=image_id)
|
||||
|
||||
try:
|
||||
ramdisk_id = image_meta['properties']['ramdisk_id']
|
||||
except KeyError:
|
||||
ramdisk_id = None
|
||||
|
||||
return kernel_id, ramdisk_id
|
||||
|
||||
def _get_injected_files(self, personality):
|
||||
"""
|
||||
Create a list of injected files from the personality attribute
|
||||
|
||||
At this time, injected_files must be formatted as a list of
|
||||
(file_path, file_content) pairs for compatibility with the
|
||||
underlying compute service.
|
||||
"""
|
||||
injected_files = []
|
||||
|
||||
for item in personality:
|
||||
try:
|
||||
path = item['path']
|
||||
contents = item['contents']
|
||||
except KeyError as key:
|
||||
expl = _('Bad personality format: missing %s') % key
|
||||
raise exc.HTTPBadRequest(explanation=expl)
|
||||
except TypeError:
|
||||
expl = _('Bad personality format')
|
||||
raise exc.HTTPBadRequest(explanation=expl)
|
||||
try:
|
||||
contents = base64.b64decode(contents)
|
||||
except TypeError:
|
||||
expl = _('Personality content for %s cannot be decoded') % path
|
||||
raise exc.HTTPBadRequest(explanation=expl)
|
||||
injected_files.append((path, contents))
|
||||
return injected_files
|
||||
|
||||
def _get_server_admin_password_old_style(self, server):
|
||||
""" Determine the admin password for a server on creation """
|
||||
return utils.generate_password(FLAGS.password_length)
|
||||
|
||||
def _get_server_admin_password_new_style(self, server):
|
||||
""" Determine the admin password for a server on creation """
|
||||
password = server.get('adminPass')
|
||||
|
||||
if password is None:
|
||||
return utils.generate_password(FLAGS.password_length)
|
||||
if not isinstance(password, basestring) or password == '':
|
||||
msg = _("Invalid adminPass")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
return password
|
||||
|
||||
def _get_requested_networks(self, requested_networks):
|
||||
"""
|
||||
Create a list of requested networks from the networks attribute
|
||||
"""
|
||||
networks = []
|
||||
for network in requested_networks:
|
||||
try:
|
||||
network_uuid = network['uuid']
|
||||
|
||||
if not utils.is_uuid_like(network_uuid):
|
||||
msg = _("Bad networks format: network uuid is not in"
|
||||
" proper format (%s)") % network_uuid
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
#fixed IP address is optional
|
||||
#if the fixed IP address is not provided then
|
||||
#it will use one of the available IP address from the network
|
||||
address = network.get('fixed_ip', None)
|
||||
if address is not None and not utils.is_valid_ipv4(address):
|
||||
msg = _("Invalid fixed IP address (%s)") % address
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
# check if the network id is already present in the list,
|
||||
# we don't want duplicate networks to be passed
|
||||
# at the boot time
|
||||
for id, ip in networks:
|
||||
if id == network_uuid:
|
||||
expl = _("Duplicate networks (%s) are not allowed")\
|
||||
% network_uuid
|
||||
raise exc.HTTPBadRequest(explanation=expl)
|
||||
|
||||
networks.append((network_uuid, address))
|
||||
except KeyError as key:
|
||||
expl = _('Bad network format: missing %s') % key
|
||||
raise exc.HTTPBadRequest(explanation=expl)
|
||||
except TypeError:
|
||||
expl = _('Bad networks format')
|
||||
raise exc.HTTPBadRequest(explanation=expl)
|
||||
|
||||
return networks
|
||||
|
||||
def _validate_user_data(self, user_data):
|
||||
"""Check if the user_data is encoded properly"""
|
||||
if not user_data:
|
||||
return
|
||||
try:
|
||||
user_data = base64.b64decode(user_data)
|
||||
except TypeError:
|
||||
expl = _('Userdata content cannot be decoded')
|
||||
raise exc.HTTPBadRequest(explanation=expl)
|
||||
|
||||
|
||||
class ServerXMLDeserializer(wsgi.XMLDeserializer):
|
||||
"""
|
||||
Deserializer to handle xml-formatted server create requests.
|
||||
|
||||
Handles standard server attributes as well as optional metadata
|
||||
and personality attributes
|
||||
"""
|
||||
|
||||
metadata_deserializer = common.MetadataXMLDeserializer()
|
||||
|
||||
def create(self, string):
|
||||
"""Deserialize an xml-formatted server create request"""
|
||||
dom = minidom.parseString(string)
|
||||
server = self._extract_server(dom)
|
||||
return {'body': {'server': server}}
|
||||
|
||||
def _extract_server(self, node):
|
||||
"""Marshal the server attribute of a parsed request"""
|
||||
server = {}
|
||||
server_node = self.find_first_child_named(node, 'server')
|
||||
|
||||
attributes = ["name", "imageId", "flavorId", "adminPass"]
|
||||
for attr in attributes:
|
||||
if server_node.getAttribute(attr):
|
||||
server[attr] = server_node.getAttribute(attr)
|
||||
|
||||
metadata_node = self.find_first_child_named(server_node, "metadata")
|
||||
server["metadata"] = self.metadata_deserializer.extract_metadata(
|
||||
metadata_node)
|
||||
|
||||
server["personality"] = self._extract_personality(server_node)
|
||||
|
||||
return server
|
||||
|
||||
def _extract_personality(self, server_node):
|
||||
"""Marshal the personality attribute of a parsed request"""
|
||||
node = self.find_first_child_named(server_node, "personality")
|
||||
personality = []
|
||||
if node is not None:
|
||||
for file_node in self.find_children_named(node, "file"):
|
||||
item = {}
|
||||
if file_node.hasAttribute("path"):
|
||||
item["path"] = file_node.getAttribute("path")
|
||||
item["contents"] = self.extract_text(file_node)
|
||||
personality.append(item)
|
||||
return personality
|
||||
|
||||
|
||||
class ServerXMLDeserializerV11(wsgi.MetadataXMLDeserializer):
|
||||
"""
|
||||
Deserializer to handle xml-formatted server create requests.
|
||||
|
||||
Handles standard server attributes as well as optional metadata
|
||||
and personality attributes
|
||||
"""
|
||||
|
||||
metadata_deserializer = common.MetadataXMLDeserializer()
|
||||
|
||||
def action(self, string):
|
||||
dom = minidom.parseString(string)
|
||||
action_node = dom.childNodes[0]
|
||||
action_name = action_node.tagName
|
||||
|
||||
action_deserializer = {
|
||||
'createImage': self._action_create_image,
|
||||
'createBackup': self._action_create_backup,
|
||||
'changePassword': self._action_change_password,
|
||||
'reboot': self._action_reboot,
|
||||
'rebuild': self._action_rebuild,
|
||||
'resize': self._action_resize,
|
||||
'confirmResize': self._action_confirm_resize,
|
||||
'revertResize': self._action_revert_resize,
|
||||
}.get(action_name, self.default)
|
||||
|
||||
action_data = action_deserializer(action_node)
|
||||
|
||||
return {'body': {action_name: action_data}}
|
||||
|
||||
def _action_create_image(self, node):
|
||||
return self._deserialize_image_action(node, ('name',))
|
||||
|
||||
def _action_create_backup(self, node):
|
||||
attributes = ('name', 'backup_type', 'rotation')
|
||||
return self._deserialize_image_action(node, attributes)
|
||||
|
||||
def _action_change_password(self, node):
|
||||
if not node.hasAttribute("adminPass"):
|
||||
raise AttributeError("No adminPass was specified in request")
|
||||
return {"adminPass": node.getAttribute("adminPass")}
|
||||
|
||||
def _action_reboot(self, node):
|
||||
if not node.hasAttribute("type"):
|
||||
raise AttributeError("No reboot type was specified in request")
|
||||
return {"type": node.getAttribute("type")}
|
||||
|
||||
def _action_rebuild(self, node):
|
||||
rebuild = {}
|
||||
if node.hasAttribute("name"):
|
||||
rebuild['name'] = node.getAttribute("name")
|
||||
|
||||
metadata_node = self.find_first_child_named(node, "metadata")
|
||||
if metadata_node is not None:
|
||||
rebuild["metadata"] = self.extract_metadata(metadata_node)
|
||||
|
||||
personality = self._extract_personality(node)
|
||||
if personality is not None:
|
||||
rebuild["personality"] = personality
|
||||
|
||||
if not node.hasAttribute("imageRef"):
|
||||
raise AttributeError("No imageRef was specified in request")
|
||||
rebuild["imageRef"] = node.getAttribute("imageRef")
|
||||
|
||||
return rebuild
|
||||
|
||||
def _action_resize(self, node):
|
||||
if not node.hasAttribute("flavorRef"):
|
||||
raise AttributeError("No flavorRef was specified in request")
|
||||
return {"flavorRef": node.getAttribute("flavorRef")}
|
||||
|
||||
def _action_confirm_resize(self, node):
|
||||
return None
|
||||
|
||||
def _action_revert_resize(self, node):
|
||||
return None
|
||||
|
||||
def _deserialize_image_action(self, node, allowed_attributes):
|
||||
data = {}
|
||||
for attribute in allowed_attributes:
|
||||
value = node.getAttribute(attribute)
|
||||
if value:
|
||||
data[attribute] = value
|
||||
metadata_node = self.find_first_child_named(node, 'metadata')
|
||||
if metadata_node is not None:
|
||||
metadata = self.metadata_deserializer.extract_metadata(
|
||||
metadata_node)
|
||||
data['metadata'] = metadata
|
||||
return data
|
||||
|
||||
def create(self, string):
|
||||
"""Deserialize an xml-formatted server create request"""
|
||||
dom = minidom.parseString(string)
|
||||
server = self._extract_server(dom)
|
||||
return {'body': {'server': server}}
|
||||
|
||||
def _extract_server(self, node):
|
||||
"""Marshal the server attribute of a parsed request"""
|
||||
server = {}
|
||||
server_node = self.find_first_child_named(node, 'server')
|
||||
|
||||
attributes = ["name", "imageRef", "flavorRef", "adminPass",
|
||||
"accessIPv4", "accessIPv6"]
|
||||
for attr in attributes:
|
||||
if server_node.getAttribute(attr):
|
||||
server[attr] = server_node.getAttribute(attr)
|
||||
|
||||
metadata_node = self.find_first_child_named(server_node, "metadata")
|
||||
if metadata_node is not None:
|
||||
server["metadata"] = self.extract_metadata(metadata_node)
|
||||
|
||||
personality = self._extract_personality(server_node)
|
||||
if personality is not None:
|
||||
server["personality"] = personality
|
||||
|
||||
networks = self._extract_networks(server_node)
|
||||
if networks is not None:
|
||||
server["networks"] = networks
|
||||
|
||||
security_groups = self._extract_security_groups(server_node)
|
||||
if security_groups is not None:
|
||||
server["security_groups"] = security_groups
|
||||
|
||||
return server
|
||||
|
||||
def _extract_personality(self, server_node):
|
||||
"""Marshal the personality attribute of a parsed request"""
|
||||
node = self.find_first_child_named(server_node, "personality")
|
||||
if node is not None:
|
||||
personality = []
|
||||
for file_node in self.find_children_named(node, "file"):
|
||||
item = {}
|
||||
if file_node.hasAttribute("path"):
|
||||
item["path"] = file_node.getAttribute("path")
|
||||
item["contents"] = self.extract_text(file_node)
|
||||
personality.append(item)
|
||||
return personality
|
||||
else:
|
||||
return None
|
||||
|
||||
def _extract_networks(self, server_node):
|
||||
"""Marshal the networks attribute of a parsed request"""
|
||||
node = self.find_first_child_named(server_node, "networks")
|
||||
if node is not None:
|
||||
networks = []
|
||||
for network_node in self.find_children_named(node,
|
||||
"network"):
|
||||
item = {}
|
||||
if network_node.hasAttribute("uuid"):
|
||||
item["uuid"] = network_node.getAttribute("uuid")
|
||||
if network_node.hasAttribute("fixed_ip"):
|
||||
item["fixed_ip"] = network_node.getAttribute("fixed_ip")
|
||||
networks.append(item)
|
||||
return networks
|
||||
else:
|
||||
return None
|
||||
|
||||
def _extract_security_groups(self, server_node):
|
||||
"""Marshal the security_groups attribute of a parsed request"""
|
||||
node = self.find_first_child_named(server_node, "security_groups")
|
||||
if node is not None:
|
||||
security_groups = []
|
||||
for sg_node in self.find_children_named(node, "security_group"):
|
||||
item = {}
|
||||
name_node = self.find_first_child_named(sg_node, "name")
|
||||
if name_node:
|
||||
item["name"] = self.extract_text(name_node)
|
||||
security_groups.append(item)
|
||||
return security_groups
|
||||
else:
|
||||
return None
|
@ -1,4 +1,5 @@
|
||||
# Copyright 2010 OpenStack LLC.
|
||||
# Copyright 2011 Piston Cloud Computing, Inc
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
@ -21,18 +22,21 @@ from novaclient import exceptions as novaclient_exceptions
|
||||
from lxml import etree
|
||||
from webob import exc
|
||||
import webob
|
||||
from xml.dom import minidom
|
||||
|
||||
from nova import compute
|
||||
from nova import network
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import image
|
||||
from nova import log as logging
|
||||
from nova import utils
|
||||
from nova import quota
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import create_instance_helper as helper
|
||||
from nova.api.openstack import ips
|
||||
from nova.api.openstack import wsgi
|
||||
from nova.compute import instance_types
|
||||
from nova.scheduler import api as scheduler_api
|
||||
import nova.api.openstack
|
||||
import nova.api.openstack.views.addresses
|
||||
@ -40,6 +44,7 @@ import nova.api.openstack.views.flavors
|
||||
import nova.api.openstack.views.images
|
||||
import nova.api.openstack.views.servers
|
||||
from nova.api.openstack import xmlutil
|
||||
from nova.rpc import common as rpc_common
|
||||
|
||||
|
||||
LOG = logging.getLogger('nova.api.openstack.servers')
|
||||
@ -73,7 +78,6 @@ class Controller(object):
|
||||
def __init__(self):
|
||||
self.compute_api = compute.API()
|
||||
self.network_api = network.API()
|
||||
self.helper = helper.CreateInstanceHelper(self)
|
||||
|
||||
def index(self, req):
|
||||
""" Returns a list of server names and ids for a given user """
|
||||
@ -107,11 +111,23 @@ class Controller(object):
|
||||
def _action_rebuild(self, info, request, instance_id):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _get_block_device_mapping(self, data):
|
||||
"""Get block_device_mapping from 'server' dictionary.
|
||||
Overidden by volumes controller.
|
||||
"""
|
||||
return None
|
||||
|
||||
def _get_networks_for_instance(self, req, instance):
|
||||
return ips._get_networks_for_instance(req.environ['nova.context'],
|
||||
self.network_api,
|
||||
instance)
|
||||
|
||||
def _get_block_device_mapping(self, data):
|
||||
"""Get block_device_mapping from 'server' dictionary.
|
||||
Overidden by volumes controller.
|
||||
"""
|
||||
return None
|
||||
|
||||
def _get_servers(self, req, is_detail):
|
||||
"""Returns a list of servers, taking into account any search
|
||||
options specified.
|
||||
@ -163,6 +179,181 @@ class Controller(object):
|
||||
limited_list = self._limit_items(instance_list, req)
|
||||
return self._build_list(req, limited_list, is_detail=is_detail)
|
||||
|
||||
def _handle_quota_error(self, error):
|
||||
"""
|
||||
Reraise quota errors as api-specific http exceptions
|
||||
"""
|
||||
|
||||
code_mappings = {
|
||||
"OnsetFileLimitExceeded":
|
||||
_("Personality file limit exceeded"),
|
||||
"OnsetFilePathLimitExceeded":
|
||||
_("Personality file path too long"),
|
||||
"OnsetFileContentLimitExceeded":
|
||||
_("Personality file content too long"),
|
||||
"InstanceLimitExceeded":
|
||||
_("Instance quotas have been exceeded")}
|
||||
|
||||
expl = code_mappings.get(error.code)
|
||||
if expl:
|
||||
raise exc.HTTPRequestEntityTooLarge(explanation=expl,
|
||||
headers={'Retry-After': 0})
|
||||
# if the original error is okay, just reraise it
|
||||
raise error
|
||||
|
||||
def _deserialize_create(self, request):
|
||||
"""
|
||||
Deserialize a create request
|
||||
|
||||
Overrides normal behavior in the case of xml content
|
||||
"""
|
||||
if request.content_type == "application/xml":
|
||||
deserializer = ServerXMLDeserializer()
|
||||
return deserializer.deserialize(request.body)
|
||||
else:
|
||||
return self._deserialize(request.body, request.get_content_type())
|
||||
|
||||
def _validate_server_name(self, value):
|
||||
if not isinstance(value, basestring):
|
||||
msg = _("Server name is not a string or unicode")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
if value.strip() == '':
|
||||
msg = _("Server name is an empty string")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
def _get_kernel_ramdisk_from_image(self, req, image_service, image_id):
|
||||
"""Fetch an image from the ImageService, then if present, return the
|
||||
associated kernel and ramdisk image IDs.
|
||||
"""
|
||||
context = req.environ['nova.context']
|
||||
image_meta = image_service.show(context, image_id)
|
||||
# NOTE(sirp): extracted to a separate method to aid unit-testing, the
|
||||
# new method doesn't need a request obj or an ImageService stub
|
||||
kernel_id, ramdisk_id = self._do_get_kernel_ramdisk_from_image(
|
||||
image_meta)
|
||||
return kernel_id, ramdisk_id
|
||||
|
||||
@staticmethod
|
||||
def _do_get_kernel_ramdisk_from_image(image_meta):
|
||||
"""Given an ImageService image_meta, return kernel and ramdisk image
|
||||
ids if present.
|
||||
|
||||
This is only valid for `ami` style images.
|
||||
"""
|
||||
image_id = image_meta['id']
|
||||
if image_meta['status'] != 'active':
|
||||
raise exception.ImageUnacceptable(image_id=image_id,
|
||||
reason=_("status is not active"))
|
||||
|
||||
if image_meta.get('container_format') != 'ami':
|
||||
return None, None
|
||||
|
||||
try:
|
||||
kernel_id = image_meta['properties']['kernel_id']
|
||||
except KeyError:
|
||||
raise exception.KernelNotFoundForImage(image_id=image_id)
|
||||
|
||||
try:
|
||||
ramdisk_id = image_meta['properties']['ramdisk_id']
|
||||
except KeyError:
|
||||
ramdisk_id = None
|
||||
|
||||
return kernel_id, ramdisk_id
|
||||
|
||||
def _get_injected_files(self, personality):
|
||||
"""
|
||||
Create a list of injected files from the personality attribute
|
||||
|
||||
At this time, injected_files must be formatted as a list of
|
||||
(file_path, file_content) pairs for compatibility with the
|
||||
underlying compute service.
|
||||
"""
|
||||
injected_files = []
|
||||
|
||||
for item in personality:
|
||||
try:
|
||||
path = item['path']
|
||||
contents = item['contents']
|
||||
except KeyError as key:
|
||||
expl = _('Bad personality format: missing %s') % key
|
||||
raise exc.HTTPBadRequest(explanation=expl)
|
||||
except TypeError:
|
||||
expl = _('Bad personality format')
|
||||
raise exc.HTTPBadRequest(explanation=expl)
|
||||
try:
|
||||
contents = base64.b64decode(contents)
|
||||
except TypeError:
|
||||
expl = _('Personality content for %s cannot be decoded') % path
|
||||
raise exc.HTTPBadRequest(explanation=expl)
|
||||
injected_files.append((path, contents))
|
||||
return injected_files
|
||||
|
||||
def _get_server_admin_password_old_style(self, server):
|
||||
""" Determine the admin password for a server on creation """
|
||||
return utils.generate_password(FLAGS.password_length)
|
||||
|
||||
def _get_server_admin_password_new_style(self, server):
|
||||
""" Determine the admin password for a server on creation """
|
||||
password = server.get('adminPass')
|
||||
|
||||
if password is None:
|
||||
return utils.generate_password(FLAGS.password_length)
|
||||
if not isinstance(password, basestring) or password == '':
|
||||
msg = _("Invalid adminPass")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
return password
|
||||
|
||||
def _get_requested_networks(self, requested_networks):
|
||||
"""
|
||||
Create a list of requested networks from the networks attribute
|
||||
"""
|
||||
networks = []
|
||||
for network in requested_networks:
|
||||
try:
|
||||
network_uuid = network['uuid']
|
||||
|
||||
if not utils.is_uuid_like(network_uuid):
|
||||
msg = _("Bad networks format: network uuid is not in"
|
||||
" proper format (%s)") % network_uuid
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
#fixed IP address is optional
|
||||
#if the fixed IP address is not provided then
|
||||
#it will use one of the available IP address from the network
|
||||
address = network.get('fixed_ip', None)
|
||||
if address is not None and not utils.is_valid_ipv4(address):
|
||||
msg = _("Invalid fixed IP address (%s)") % address
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
# check if the network id is already present in the list,
|
||||
# we don't want duplicate networks to be passed
|
||||
# at the boot time
|
||||
for id, ip in networks:
|
||||
if id == network_uuid:
|
||||
expl = _("Duplicate networks (%s) are not allowed")\
|
||||
% network_uuid
|
||||
raise exc.HTTPBadRequest(explanation=expl)
|
||||
|
||||
networks.append((network_uuid, address))
|
||||
except KeyError as key:
|
||||
expl = _('Bad network format: missing %s') % key
|
||||
raise exc.HTTPBadRequest(explanation=expl)
|
||||
except TypeError:
|
||||
expl = _('Bad networks format')
|
||||
raise exc.HTTPBadRequest(explanation=expl)
|
||||
|
||||
return networks
|
||||
|
||||
def _validate_user_data(self, user_data):
|
||||
"""Check if the user_data is encoded properly"""
|
||||
if not user_data:
|
||||
return
|
||||
try:
|
||||
user_data = base64.b64decode(user_data)
|
||||
except TypeError:
|
||||
expl = _('Userdata content cannot be decoded')
|
||||
raise exc.HTTPBadRequest(explanation=expl)
|
||||
|
||||
@novaclient_exception_converter
|
||||
@scheduler_api.redirect_handler
|
||||
def show(self, req, id):
|
||||
@ -180,22 +371,172 @@ class Controller(object):
|
||||
|
||||
def create(self, req, body):
|
||||
""" Creates a new server for a given user """
|
||||
if 'server' in body:
|
||||
body['server']['key_name'] = self._get_key_name(req, body)
|
||||
|
||||
extra_values = None
|
||||
extra_values, instances = self.helper.create_instance(
|
||||
req, body, self.compute_api.create)
|
||||
if not body:
|
||||
raise exc.HTTPUnprocessableEntity()
|
||||
|
||||
# We can only return 1 instance via the API, if we happen to
|
||||
# build more than one... instances is a list, so we'll just
|
||||
# use the first one..
|
||||
inst = instances[0]
|
||||
for key in ['instance_type', 'image_ref']:
|
||||
inst[key] = extra_values[key]
|
||||
if not 'server' in body:
|
||||
raise exc.HTTPUnprocessableEntity()
|
||||
|
||||
server = self._build_view(req, inst, is_detail=True)
|
||||
server['server']['adminPass'] = extra_values['password']
|
||||
body['server']['key_name'] = self._get_key_name(req, body)
|
||||
|
||||
context = req.environ['nova.context']
|
||||
server_dict = body['server']
|
||||
password = self._get_server_admin_password(server_dict)
|
||||
|
||||
if not 'name' in server_dict:
|
||||
msg = _("Server name is not defined")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
name = server_dict['name']
|
||||
self._validate_server_name(name)
|
||||
name = name.strip()
|
||||
|
||||
image_href = self._image_ref_from_req_data(body)
|
||||
# If the image href was generated by nova api, strip image_href
|
||||
# down to an id and use the default glance connection params
|
||||
|
||||
if str(image_href).startswith(req.application_url):
|
||||
image_href = image_href.split('/').pop()
|
||||
try:
|
||||
image_service, image_id = image.get_image_service(context,
|
||||
image_href)
|
||||
kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(
|
||||
req, image_service, image_id)
|
||||
images = set([str(x['id']) for x in image_service.index(context)])
|
||||
assert str(image_id) in images
|
||||
except Exception, e:
|
||||
msg = _("Cannot find requested image %(image_href)s: %(e)s" %
|
||||
locals())
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
personality = server_dict.get('personality')
|
||||
config_drive = server_dict.get('config_drive')
|
||||
|
||||
injected_files = []
|
||||
if personality:
|
||||
injected_files = self._get_injected_files(personality)
|
||||
|
||||
sg_names = []
|
||||
security_groups = server_dict.get('security_groups')
|
||||
if security_groups is not None:
|
||||
sg_names = [sg['name'] for sg in security_groups if sg.get('name')]
|
||||
if not sg_names:
|
||||
sg_names.append('default')
|
||||
|
||||
sg_names = list(set(sg_names))
|
||||
|
||||
requested_networks = server_dict.get('networks')
|
||||
if requested_networks is not None:
|
||||
requested_networks = self._get_requested_networks(
|
||||
requested_networks)
|
||||
|
||||
try:
|
||||
flavor_id = self._flavor_id_from_req_data(body)
|
||||
except ValueError as error:
|
||||
msg = _("Invalid flavorRef provided.")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
zone_blob = server_dict.get('blob')
|
||||
|
||||
# optional openstack extensions:
|
||||
key_name = server_dict.get('key_name')
|
||||
user_data = server_dict.get('user_data')
|
||||
self._validate_user_data(user_data)
|
||||
|
||||
availability_zone = server_dict.get('availability_zone')
|
||||
name = server_dict['name']
|
||||
self._validate_server_name(name)
|
||||
name = name.strip()
|
||||
|
||||
block_device_mapping = self._get_block_device_mapping(server_dict)
|
||||
|
||||
# Only allow admins to specify their own reservation_ids
|
||||
# This is really meant to allow zones to work.
|
||||
reservation_id = server_dict.get('reservation_id')
|
||||
if all([reservation_id is not None,
|
||||
reservation_id != '',
|
||||
not context.is_admin]):
|
||||
reservation_id = None
|
||||
|
||||
ret_resv_id = server_dict.get('return_reservation_id', False)
|
||||
|
||||
min_count = server_dict.get('min_count')
|
||||
max_count = server_dict.get('max_count')
|
||||
# min_count and max_count are optional. If they exist, they come
|
||||
# in as strings. We want to default 'min_count' to 1, and default
|
||||
# 'max_count' to be 'min_count'.
|
||||
min_count = int(min_count) if min_count else 1
|
||||
max_count = int(max_count) if max_count else min_count
|
||||
if min_count > max_count:
|
||||
min_count = max_count
|
||||
|
||||
try:
|
||||
inst_type = \
|
||||
instance_types.get_instance_type_by_flavor_id(flavor_id)
|
||||
|
||||
(instances, resv_id) = self.compute_api.create(context,
|
||||
inst_type,
|
||||
image_id,
|
||||
kernel_id=kernel_id,
|
||||
ramdisk_id=ramdisk_id,
|
||||
display_name=name,
|
||||
display_description=name,
|
||||
key_name=key_name,
|
||||
metadata=server_dict.get('metadata', {}),
|
||||
access_ip_v4=server_dict.get('accessIPv4'),
|
||||
access_ip_v6=server_dict.get('accessIPv6'),
|
||||
injected_files=injected_files,
|
||||
admin_password=password,
|
||||
zone_blob=zone_blob,
|
||||
reservation_id=reservation_id,
|
||||
min_count=min_count,
|
||||
max_count=max_count,
|
||||
requested_networks=requested_networks,
|
||||
security_group=sg_names,
|
||||
user_data=user_data,
|
||||
availability_zone=availability_zone,
|
||||
config_drive=config_drive,
|
||||
block_device_mapping=block_device_mapping,
|
||||
wait_for_instances=not ret_resv_id)
|
||||
except quota.QuotaError as error:
|
||||
self._handle_quota_error(error)
|
||||
except exception.InstanceTypeMemoryTooSmall as error:
|
||||
raise exc.HTTPBadRequest(explanation=unicode(error))
|
||||
except exception.InstanceTypeDiskTooSmall as error:
|
||||
raise exc.HTTPBadRequest(explanation=unicode(error))
|
||||
except exception.ImageNotFound as error:
|
||||
msg = _("Can not find requested image")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
except exception.FlavorNotFound as error:
|
||||
msg = _("Invalid flavorRef provided.")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
except exception.KeypairNotFound as error:
|
||||
msg = _("Invalid key_name provided.")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
except exception.SecurityGroupNotFound as error:
|
||||
raise exc.HTTPBadRequest(explanation=unicode(error))
|
||||
except rpc_common.RemoteError as err:
|
||||
msg = "%(err_type)s: %(err_msg)s" % \
|
||||
{'err_type': err.exc_type, 'err_msg': err.value}
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
# Let the caller deal with unhandled exceptions.
|
||||
|
||||
# If the caller wanted a reservation_id, return it
|
||||
if ret_resv_id:
|
||||
return {'reservation_id': resv_id}
|
||||
|
||||
# Instances is a list
|
||||
instance = instances[0]
|
||||
if not instance.get('_is_precooked', False):
|
||||
instance['instance_type'] = inst_type
|
||||
instance['image_ref'] = image_href
|
||||
|
||||
server = self._build_view(req, instance, is_detail=True)
|
||||
if '_is_precooked' in server['server']:
|
||||
del server['server']['_is_precooked']
|
||||
else:
|
||||
server['server']['adminPass'] = password
|
||||
return server
|
||||
|
||||
def _delete(self, context, id):
|
||||
@ -218,7 +559,7 @@ class Controller(object):
|
||||
|
||||
if 'name' in body['server']:
|
||||
name = body['server']['name']
|
||||
self.helper._validate_server_name(name)
|
||||
self._validate_server_name(name)
|
||||
update_dict['display_name'] = name.strip()
|
||||
|
||||
if 'accessIPv4' in body['server']:
|
||||
@ -290,17 +631,17 @@ class Controller(object):
|
||||
|
||||
except KeyError as missing_key:
|
||||
msg = _("createBackup entity requires %s attribute") % missing_key
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
except TypeError:
|
||||
msg = _("Malformed createBackup entity")
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
try:
|
||||
rotation = int(rotation)
|
||||
except ValueError:
|
||||
msg = _("createBackup attribute 'rotation' must be an integer")
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
# preserve link to server in image properties
|
||||
server_ref = os.path.join(req.application_url,
|
||||
@ -315,7 +656,7 @@ class Controller(object):
|
||||
props.update(metadata)
|
||||
except ValueError:
|
||||
msg = _("Invalid metadata")
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
image = self.compute_api.backup(context,
|
||||
instance_id,
|
||||
@ -696,7 +1037,7 @@ class ControllerV10(Controller):
|
||||
|
||||
def _get_server_admin_password(self, server):
|
||||
""" Determine the admin password for a server on creation """
|
||||
return self.helper._get_server_admin_password_old_style(server)
|
||||
return self._get_server_admin_password_old_style(server)
|
||||
|
||||
def _get_server_search_options(self):
|
||||
"""Return server search options allowed by non-admin"""
|
||||
@ -884,11 +1225,11 @@ class ControllerV11(Controller):
|
||||
|
||||
except KeyError:
|
||||
msg = _("createImage entity requires name attribute")
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
except TypeError:
|
||||
msg = _("Malformed createImage entity")
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
# preserve link to server in image properties
|
||||
server_ref = os.path.join(req.application_url,
|
||||
@ -903,7 +1244,7 @@ class ControllerV11(Controller):
|
||||
props.update(metadata)
|
||||
except ValueError:
|
||||
msg = _("Invalid metadata")
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
image = self.compute_api.snapshot(context,
|
||||
instance_id,
|
||||
@ -923,7 +1264,7 @@ class ControllerV11(Controller):
|
||||
|
||||
def _get_server_admin_password(self, server):
|
||||
""" Determine the admin password for a server on creation """
|
||||
return self.helper._get_server_admin_password_new_style(server)
|
||||
return self._get_server_admin_password_new_style(server)
|
||||
|
||||
def _get_server_search_options(self):
|
||||
"""Return server search options allowed by non-admin"""
|
||||
@ -1068,6 +1409,227 @@ class ServerXMLSerializer(wsgi.XMLDictSerializer):
|
||||
return self._to_xml(server)
|
||||
|
||||
|
||||
class ServerXMLDeserializer(wsgi.XMLDeserializer):
|
||||
"""
|
||||
Deserializer to handle xml-formatted server create requests.
|
||||
|
||||
Handles standard server attributes as well as optional metadata
|
||||
and personality attributes
|
||||
"""
|
||||
|
||||
metadata_deserializer = common.MetadataXMLDeserializer()
|
||||
|
||||
def create(self, string):
|
||||
"""Deserialize an xml-formatted server create request"""
|
||||
dom = minidom.parseString(string)
|
||||
server = self._extract_server(dom)
|
||||
return {'body': {'server': server}}
|
||||
|
||||
def _extract_server(self, node):
|
||||
"""Marshal the server attribute of a parsed request"""
|
||||
server = {}
|
||||
server_node = self.find_first_child_named(node, 'server')
|
||||
|
||||
attributes = ["name", "imageId", "flavorId", "adminPass"]
|
||||
for attr in attributes:
|
||||
if server_node.getAttribute(attr):
|
||||
server[attr] = server_node.getAttribute(attr)
|
||||
|
||||
metadata_node = self.find_first_child_named(server_node, "metadata")
|
||||
server["metadata"] = self.metadata_deserializer.extract_metadata(
|
||||
metadata_node)
|
||||
|
||||
server["personality"] = self._extract_personality(server_node)
|
||||
|
||||
return server
|
||||
|
||||
def _extract_personality(self, server_node):
|
||||
"""Marshal the personality attribute of a parsed request"""
|
||||
node = self.find_first_child_named(server_node, "personality")
|
||||
personality = []
|
||||
if node is not None:
|
||||
for file_node in self.find_children_named(node, "file"):
|
||||
item = {}
|
||||
if file_node.hasAttribute("path"):
|
||||
item["path"] = file_node.getAttribute("path")
|
||||
item["contents"] = self.extract_text(file_node)
|
||||
personality.append(item)
|
||||
return personality
|
||||
|
||||
|
||||
class ServerXMLDeserializerV11(wsgi.MetadataXMLDeserializer):
|
||||
"""
|
||||
Deserializer to handle xml-formatted server create requests.
|
||||
|
||||
Handles standard server attributes as well as optional metadata
|
||||
and personality attributes
|
||||
"""
|
||||
|
||||
metadata_deserializer = common.MetadataXMLDeserializer()
|
||||
|
||||
def action(self, string):
|
||||
dom = minidom.parseString(string)
|
||||
action_node = dom.childNodes[0]
|
||||
action_name = action_node.tagName
|
||||
|
||||
action_deserializer = {
|
||||
'createImage': self._action_create_image,
|
||||
'createBackup': self._action_create_backup,
|
||||
'changePassword': self._action_change_password,
|
||||
'reboot': self._action_reboot,
|
||||
'rebuild': self._action_rebuild,
|
||||
'resize': self._action_resize,
|
||||
'confirmResize': self._action_confirm_resize,
|
||||
'revertResize': self._action_revert_resize,
|
||||
}.get(action_name, self.default)
|
||||
|
||||
action_data = action_deserializer(action_node)
|
||||
|
||||
return {'body': {action_name: action_data}}
|
||||
|
||||
def _action_create_image(self, node):
|
||||
return self._deserialize_image_action(node, ('name',))
|
||||
|
||||
def _action_create_backup(self, node):
|
||||
attributes = ('name', 'backup_type', 'rotation')
|
||||
return self._deserialize_image_action(node, attributes)
|
||||
|
||||
def _action_change_password(self, node):
|
||||
if not node.hasAttribute("adminPass"):
|
||||
raise AttributeError("No adminPass was specified in request")
|
||||
return {"adminPass": node.getAttribute("adminPass")}
|
||||
|
||||
def _action_reboot(self, node):
|
||||
if not node.hasAttribute("type"):
|
||||
raise AttributeError("No reboot type was specified in request")
|
||||
return {"type": node.getAttribute("type")}
|
||||
|
||||
def _action_rebuild(self, node):
|
||||
rebuild = {}
|
||||
if node.hasAttribute("name"):
|
||||
rebuild['name'] = node.getAttribute("name")
|
||||
|
||||
metadata_node = self.find_first_child_named(node, "metadata")
|
||||
if metadata_node is not None:
|
||||
rebuild["metadata"] = self.extract_metadata(metadata_node)
|
||||
|
||||
personality = self._extract_personality(node)
|
||||
if personality is not None:
|
||||
rebuild["personality"] = personality
|
||||
|
||||
if not node.hasAttribute("imageRef"):
|
||||
raise AttributeError("No imageRef was specified in request")
|
||||
rebuild["imageRef"] = node.getAttribute("imageRef")
|
||||
|
||||
return rebuild
|
||||
|
||||
def _action_resize(self, node):
|
||||
if not node.hasAttribute("flavorRef"):
|
||||
raise AttributeError("No flavorRef was specified in request")
|
||||
return {"flavorRef": node.getAttribute("flavorRef")}
|
||||
|
||||
def _action_confirm_resize(self, node):
|
||||
return None
|
||||
|
||||
def _action_revert_resize(self, node):
|
||||
return None
|
||||
|
||||
def _deserialize_image_action(self, node, allowed_attributes):
|
||||
data = {}
|
||||
for attribute in allowed_attributes:
|
||||
value = node.getAttribute(attribute)
|
||||
if value:
|
||||
data[attribute] = value
|
||||
metadata_node = self.find_first_child_named(node, 'metadata')
|
||||
if metadata_node is not None:
|
||||
metadata = self.metadata_deserializer.extract_metadata(
|
||||
metadata_node)
|
||||
data['metadata'] = metadata
|
||||
return data
|
||||
|
||||
def create(self, string):
|
||||
"""Deserialize an xml-formatted server create request"""
|
||||
dom = minidom.parseString(string)
|
||||
server = self._extract_server(dom)
|
||||
return {'body': {'server': server}}
|
||||
|
||||
def _extract_server(self, node):
|
||||
"""Marshal the server attribute of a parsed request"""
|
||||
server = {}
|
||||
server_node = self.find_first_child_named(node, 'server')
|
||||
|
||||
attributes = ["name", "imageRef", "flavorRef", "adminPass",
|
||||
"accessIPv4", "accessIPv6"]
|
||||
for attr in attributes:
|
||||
if server_node.getAttribute(attr):
|
||||
server[attr] = server_node.getAttribute(attr)
|
||||
|
||||
metadata_node = self.find_first_child_named(server_node, "metadata")
|
||||
if metadata_node is not None:
|
||||
server["metadata"] = self.extract_metadata(metadata_node)
|
||||
|
||||
personality = self._extract_personality(server_node)
|
||||
if personality is not None:
|
||||
server["personality"] = personality
|
||||
|
||||
networks = self._extract_networks(server_node)
|
||||
if networks is not None:
|
||||
server["networks"] = networks
|
||||
|
||||
security_groups = self._extract_security_groups(server_node)
|
||||
if security_groups is not None:
|
||||
server["security_groups"] = security_groups
|
||||
|
||||
return server
|
||||
|
||||
def _extract_personality(self, server_node):
|
||||
"""Marshal the personality attribute of a parsed request"""
|
||||
node = self.find_first_child_named(server_node, "personality")
|
||||
if node is not None:
|
||||
personality = []
|
||||
for file_node in self.find_children_named(node, "file"):
|
||||
item = {}
|
||||
if file_node.hasAttribute("path"):
|
||||
item["path"] = file_node.getAttribute("path")
|
||||
item["contents"] = self.extract_text(file_node)
|
||||
personality.append(item)
|
||||
return personality
|
||||
else:
|
||||
return None
|
||||
|
||||
def _extract_networks(self, server_node):
|
||||
"""Marshal the networks attribute of a parsed request"""
|
||||
node = self.find_first_child_named(server_node, "networks")
|
||||
if node is not None:
|
||||
networks = []
|
||||
for network_node in self.find_children_named(node,
|
||||
"network"):
|
||||
item = {}
|
||||
if network_node.hasAttribute("uuid"):
|
||||
item["uuid"] = network_node.getAttribute("uuid")
|
||||
if network_node.hasAttribute("fixed_ip"):
|
||||
item["fixed_ip"] = network_node.getAttribute("fixed_ip")
|
||||
networks.append(item)
|
||||
return networks
|
||||
else:
|
||||
return None
|
||||
|
||||
def _extract_security_groups(self, server_node):
|
||||
"""Marshal the security_groups attribute of a parsed request"""
|
||||
node = self.find_first_child_named(server_node, "security_groups")
|
||||
if node is not None:
|
||||
security_groups = []
|
||||
for sg_node in self.find_children_named(node, "security_group"):
|
||||
item = {}
|
||||
name_node = self.find_first_child_named(sg_node, "name")
|
||||
if name_node:
|
||||
item["name"] = self.extract_text(name_node)
|
||||
security_groups.append(item)
|
||||
return security_groups
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def create_resource(version='1.0'):
|
||||
controller = {
|
||||
'1.0': ControllerV10,
|
||||
@ -1107,8 +1669,8 @@ def create_resource(version='1.0'):
|
||||
}
|
||||
|
||||
xml_deserializer = {
|
||||
'1.0': helper.ServerXMLDeserializer(),
|
||||
'1.1': helper.ServerXMLDeserializerV11(),
|
||||
'1.0': ServerXMLDeserializer(),
|
||||
'1.1': ServerXMLDeserializerV11(),
|
||||
}[version]
|
||||
|
||||
body_deserializers = {
|
||||
|
@ -25,8 +25,8 @@ from nova import log as logging
|
||||
from nova.compute import api as compute
|
||||
from nova.scheduler import api
|
||||
|
||||
from nova.api.openstack import create_instance_helper as helper
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import servers
|
||||
from nova.api.openstack import wsgi
|
||||
|
||||
|
||||
@ -67,7 +67,6 @@ class Controller(object):
|
||||
|
||||
def __init__(self):
|
||||
self.compute_api = compute.API()
|
||||
self.helper = helper.CreateInstanceHelper(self)
|
||||
|
||||
def index(self, req):
|
||||
"""Return all zones in brief"""
|
||||
@ -120,18 +119,6 @@ class Controller(object):
|
||||
zone = api.zone_update(context, zone_id, body["zone"])
|
||||
return dict(zone=_scrub_zone(zone))
|
||||
|
||||
def boot(self, req, body):
|
||||
"""Creates a new server for a given user while being Zone aware.
|
||||
|
||||
Returns a reservation ID (a UUID).
|
||||
"""
|
||||
result = None
|
||||
extra_values, result = self.helper.create_instance(req, body,
|
||||
self.compute_api.create_all_at_once)
|
||||
|
||||
reservation_id = result
|
||||
return {'reservation_id': reservation_id}
|
||||
|
||||
@check_encryption_key
|
||||
def select(self, req, body):
|
||||
"""Returns a weighted list of costs to create instances
|
||||
@ -155,37 +142,8 @@ class Controller(object):
|
||||
blob=cipher_text))
|
||||
return cooked
|
||||
|
||||
def _image_ref_from_req_data(self, data):
|
||||
return data['server']['imageId']
|
||||
|
||||
def _flavor_id_from_req_data(self, data):
|
||||
return data['server']['flavorId']
|
||||
|
||||
def _get_server_admin_password(self, server):
|
||||
""" Determine the admin password for a server on creation """
|
||||
return self.helper._get_server_admin_password_old_style(server)
|
||||
|
||||
|
||||
class ControllerV11(Controller):
|
||||
"""Controller for 1.1 Zone resources."""
|
||||
|
||||
def _get_server_admin_password(self, server):
|
||||
""" Determine the admin password for a server on creation """
|
||||
return self.helper._get_server_admin_password_new_style(server)
|
||||
|
||||
def _image_ref_from_req_data(self, data):
|
||||
return data['server']['imageRef']
|
||||
|
||||
def _flavor_id_from_req_data(self, data):
|
||||
return data['server']['flavorRef']
|
||||
|
||||
|
||||
def create_resource(version):
|
||||
controller = {
|
||||
'1.0': Controller,
|
||||
'1.1': ControllerV11,
|
||||
}[version]()
|
||||
|
||||
metadata = {
|
||||
"attributes": {
|
||||
"zone": ["id", "api_url", "name", "capabilities"],
|
||||
@ -199,8 +157,8 @@ def create_resource(version):
|
||||
serializer = wsgi.ResponseSerializer(body_serializers)
|
||||
|
||||
body_deserializers = {
|
||||
'application/xml': helper.ServerXMLDeserializer(),
|
||||
'application/xml': servers.ServerXMLDeserializer(),
|
||||
}
|
||||
deserializer = wsgi.RequestDeserializer(body_deserializers)
|
||||
|
||||
return wsgi.Resource(controller, deserializer, serializer)
|
||||
return wsgi.Resource(Controller(), deserializer, serializer)
|
||||
|
@ -75,6 +75,11 @@ def generate_default_hostname(instance):
|
||||
return display_name.translate(table, deletions)
|
||||
|
||||
|
||||
def generate_default_display_name(instance):
|
||||
"""Generate a default display name"""
|
||||
return 'Server %s' % instance['id']
|
||||
|
||||
|
||||
def _is_able_to_shutdown(instance, instance_id):
|
||||
vm_state = instance["vm_state"]
|
||||
task_state = instance["task_state"]
|
||||
@ -177,17 +182,27 @@ class API(base.Base):
|
||||
|
||||
self.network_api.validate_networks(context, requested_networks)
|
||||
|
||||
def _check_create_parameters(self, context, instance_type,
|
||||
image_href, kernel_id=None, ramdisk_id=None,
|
||||
min_count=None, max_count=None,
|
||||
display_name='', display_description='',
|
||||
key_name=None, key_data=None, security_group='default',
|
||||
availability_zone=None, user_data=None, metadata=None,
|
||||
injected_files=None, admin_password=None, zone_blob=None,
|
||||
reservation_id=None, access_ip_v4=None, access_ip_v6=None,
|
||||
requested_networks=None, config_drive=None,):
|
||||
def _create_instance(self, context, instance_type,
|
||||
image_href, kernel_id, ramdisk_id,
|
||||
min_count, max_count,
|
||||
display_name, display_description,
|
||||
key_name, key_data, security_group,
|
||||
availability_zone, user_data, metadata,
|
||||
injected_files, admin_password, zone_blob,
|
||||
reservation_id, access_ip_v4, access_ip_v6,
|
||||
requested_networks, config_drive,
|
||||
block_device_mapping,
|
||||
wait_for_instances):
|
||||
"""Verify all the input parameters regardless of the provisioning
|
||||
strategy being performed."""
|
||||
strategy being performed and schedule the instance(s) for
|
||||
creation."""
|
||||
|
||||
if not metadata:
|
||||
metadata = {}
|
||||
if not display_description:
|
||||
display_description = ''
|
||||
if not security_group:
|
||||
security_group = 'default'
|
||||
|
||||
if not instance_type:
|
||||
instance_type = instance_types.get_default_instance_type()
|
||||
@ -198,6 +213,8 @@ class API(base.Base):
|
||||
if not metadata:
|
||||
metadata = {}
|
||||
|
||||
block_device_mapping = block_device_mapping or []
|
||||
|
||||
num_instances = quota.allowed_instances(context, max_count,
|
||||
instance_type)
|
||||
if num_instances < min_count:
|
||||
@ -303,7 +320,28 @@ class API(base.Base):
|
||||
'vm_mode': vm_mode,
|
||||
'root_device_name': root_device_name}
|
||||
|
||||
return (num_instances, base_options, image)
|
||||
LOG.debug(_("Going to run %s instances...") % num_instances)
|
||||
|
||||
if wait_for_instances:
|
||||
rpc_method = rpc.call
|
||||
else:
|
||||
rpc_method = rpc.cast
|
||||
|
||||
# TODO(comstud): We should use rpc.multicall when we can
|
||||
# retrieve the full instance dictionary from the scheduler.
|
||||
# Otherwise, we could exceed the AMQP max message size limit.
|
||||
# This would require the schedulers' schedule_run_instances
|
||||
# methods to return an iterator vs a list.
|
||||
instances = self._schedule_run_instance(
|
||||
rpc_method,
|
||||
context, base_options,
|
||||
instance_type, zone_blob,
|
||||
availability_zone, injected_files,
|
||||
admin_password, image,
|
||||
num_instances, requested_networks,
|
||||
block_device_mapping, security_group)
|
||||
|
||||
return (instances, reservation_id)
|
||||
|
||||
@staticmethod
|
||||
def _volume_size(instance_type, virtual_name):
|
||||
@ -399,10 +437,8 @@ class API(base.Base):
|
||||
including any related table updates (such as security group,
|
||||
etc).
|
||||
|
||||
This will called by create() in the majority of situations,
|
||||
but create_all_at_once() style Schedulers may initiate the call.
|
||||
If you are changing this method, be sure to update both
|
||||
call paths.
|
||||
This is called by the scheduler after a location for the
|
||||
instance has been determined.
|
||||
"""
|
||||
elevated = context.elevated()
|
||||
if security_group is None:
|
||||
@ -439,7 +475,7 @@ class API(base.Base):
|
||||
updates = {}
|
||||
if (not hasattr(instance, 'display_name') or
|
||||
instance.display_name is None):
|
||||
updates['display_name'] = "Server %s" % instance_id
|
||||
updates['display_name'] = generate_default_display_name(instance)
|
||||
instance['display_name'] = updates['display_name']
|
||||
updates['hostname'] = self.hostname_factory(instance)
|
||||
updates['vm_state'] = vm_states.BUILDING
|
||||
@ -448,21 +484,23 @@ class API(base.Base):
|
||||
instance = self.update(context, instance_id, **updates)
|
||||
return instance
|
||||
|
||||
def _ask_scheduler_to_create_instance(self, context, base_options,
|
||||
instance_type, zone_blob,
|
||||
availability_zone, injected_files,
|
||||
admin_password, image,
|
||||
instance_id=None, num_instances=1,
|
||||
requested_networks=None):
|
||||
"""Send the run_instance request to the schedulers for processing."""
|
||||
def _schedule_run_instance(self,
|
||||
rpc_method,
|
||||
context, base_options,
|
||||
instance_type, zone_blob,
|
||||
availability_zone, injected_files,
|
||||
admin_password, image,
|
||||
num_instances,
|
||||
requested_networks,
|
||||
block_device_mapping,
|
||||
security_group):
|
||||
"""Send a run_instance request to the schedulers for processing."""
|
||||
|
||||
pid = context.project_id
|
||||
uid = context.user_id
|
||||
if instance_id:
|
||||
LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's"
|
||||
" instance %(instance_id)s (single-shot)") % locals())
|
||||
else:
|
||||
LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's"
|
||||
" (all-at-once)") % locals())
|
||||
|
||||
LOG.debug(_("Sending create to scheduler for %(pid)s/%(uid)s's") %
|
||||
locals())
|
||||
|
||||
request_spec = {
|
||||
'image': image,
|
||||
@ -471,82 +509,41 @@ class API(base.Base):
|
||||
'filter': None,
|
||||
'blob': zone_blob,
|
||||
'num_instances': num_instances,
|
||||
'block_device_mapping': block_device_mapping,
|
||||
'security_group': security_group,
|
||||
}
|
||||
|
||||
rpc.cast(context,
|
||||
FLAGS.scheduler_topic,
|
||||
{"method": "run_instance",
|
||||
"args": {"topic": FLAGS.compute_topic,
|
||||
"instance_id": instance_id,
|
||||
"request_spec": request_spec,
|
||||
"availability_zone": availability_zone,
|
||||
"admin_password": admin_password,
|
||||
"injected_files": injected_files,
|
||||
"requested_networks": requested_networks}})
|
||||
|
||||
def create_all_at_once(self, context, instance_type,
|
||||
image_href, kernel_id=None, ramdisk_id=None,
|
||||
min_count=None, max_count=None,
|
||||
display_name='', display_description='',
|
||||
key_name=None, key_data=None, security_group='default',
|
||||
availability_zone=None, user_data=None, metadata=None,
|
||||
injected_files=None, admin_password=None, zone_blob=None,
|
||||
reservation_id=None, block_device_mapping=None,
|
||||
access_ip_v4=None, access_ip_v6=None,
|
||||
requested_networks=None, config_drive=None):
|
||||
"""Provision the instances by passing the whole request to
|
||||
the Scheduler for execution. Returns a Reservation ID
|
||||
related to the creation of all of these instances."""
|
||||
|
||||
if not metadata:
|
||||
metadata = {}
|
||||
|
||||
num_instances, base_options, image = self._check_create_parameters(
|
||||
context, instance_type,
|
||||
image_href, kernel_id, ramdisk_id,
|
||||
min_count, max_count,
|
||||
display_name, display_description,
|
||||
key_name, key_data, security_group,
|
||||
availability_zone, user_data, metadata,
|
||||
injected_files, admin_password, zone_blob,
|
||||
reservation_id, access_ip_v4, access_ip_v6,
|
||||
requested_networks, config_drive)
|
||||
|
||||
self._ask_scheduler_to_create_instance(context, base_options,
|
||||
instance_type, zone_blob,
|
||||
availability_zone, injected_files,
|
||||
admin_password, image,
|
||||
num_instances=num_instances,
|
||||
requested_networks=requested_networks)
|
||||
|
||||
return base_options['reservation_id']
|
||||
return rpc_method(context,
|
||||
FLAGS.scheduler_topic,
|
||||
{"method": "run_instance",
|
||||
"args": {"topic": FLAGS.compute_topic,
|
||||
"request_spec": request_spec,
|
||||
"admin_password": admin_password,
|
||||
"injected_files": injected_files,
|
||||
"requested_networks": requested_networks}})
|
||||
|
||||
def create(self, context, instance_type,
|
||||
image_href, kernel_id=None, ramdisk_id=None,
|
||||
min_count=None, max_count=None,
|
||||
display_name='', display_description='',
|
||||
key_name=None, key_data=None, security_group='default',
|
||||
display_name=None, display_description=None,
|
||||
key_name=None, key_data=None, security_group=None,
|
||||
availability_zone=None, user_data=None, metadata=None,
|
||||
injected_files=None, admin_password=None, zone_blob=None,
|
||||
reservation_id=None, block_device_mapping=None,
|
||||
access_ip_v4=None, access_ip_v6=None,
|
||||
requested_networks=None, config_drive=None,):
|
||||
requested_networks=None, config_drive=None,
|
||||
wait_for_instances=True):
|
||||
"""
|
||||
Provision the instances by sending off a series of single
|
||||
instance requests to the Schedulers. This is fine for trival
|
||||
Scheduler drivers, but may remove the effectiveness of the
|
||||
more complicated drivers.
|
||||
Provision instances, sending instance information to the
|
||||
scheduler. The scheduler will determine where the instance(s)
|
||||
go and will handle creating the DB entries.
|
||||
|
||||
NOTE: If you change this method, be sure to change
|
||||
create_all_at_once() at the same time!
|
||||
|
||||
Returns a list of instance dicts.
|
||||
Returns a tuple of (instances, reservation_id) where instances
|
||||
could be 'None' or a list of instance dicts depending on if
|
||||
we waited for information from the scheduler or not.
|
||||
"""
|
||||
|
||||
if not metadata:
|
||||
metadata = {}
|
||||
|
||||
num_instances, base_options, image = self._check_create_parameters(
|
||||
(instances, reservation_id) = self._create_instance(
|
||||
context, instance_type,
|
||||
image_href, kernel_id, ramdisk_id,
|
||||
min_count, max_count,
|
||||
@ -555,27 +552,25 @@ class API(base.Base):
|
||||
availability_zone, user_data, metadata,
|
||||
injected_files, admin_password, zone_blob,
|
||||
reservation_id, access_ip_v4, access_ip_v6,
|
||||
requested_networks, config_drive)
|
||||
requested_networks, config_drive,
|
||||
block_device_mapping,
|
||||
wait_for_instances)
|
||||
|
||||
block_device_mapping = block_device_mapping or []
|
||||
instances = []
|
||||
LOG.debug(_("Going to run %s instances..."), num_instances)
|
||||
for num in range(num_instances):
|
||||
instance = self.create_db_entry_for_new_instance(context,
|
||||
instance_type, image,
|
||||
base_options, security_group,
|
||||
block_device_mapping, num=num)
|
||||
instances.append(instance)
|
||||
instance_id = instance['id']
|
||||
if instances is None:
|
||||
# wait_for_instances must have been False
|
||||
return (instances, reservation_id)
|
||||
|
||||
self._ask_scheduler_to_create_instance(context, base_options,
|
||||
instance_type, zone_blob,
|
||||
availability_zone, injected_files,
|
||||
admin_password, image,
|
||||
instance_id=instance_id,
|
||||
requested_networks=requested_networks)
|
||||
inst_ret_list = []
|
||||
for instance in instances:
|
||||
if instance.get('_is_precooked', False):
|
||||
inst_ret_list.append(instance)
|
||||
else:
|
||||
# Scheduler only gives us the 'id'. We need to pull
|
||||
# in the created instances from the DB
|
||||
instance = self.db.instance_get(context, instance['id'])
|
||||
inst_ret_list.append(dict(instance.iteritems()))
|
||||
|
||||
return [dict(x.iteritems()) for x in instances]
|
||||
return (inst_ret_list, reservation_id)
|
||||
|
||||
def has_finished_migration(self, context, instance_uuid):
|
||||
"""Returns true if an instance has a finished migration."""
|
||||
|
@ -69,8 +69,6 @@ flags.DEFINE_string('instances_path', '$state_path/instances',
|
||||
'where instances are stored on disk')
|
||||
flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection',
|
||||
'Driver to use for controlling virtualization')
|
||||
flags.DEFINE_string('stub_network', False,
|
||||
'Stub network related code')
|
||||
flags.DEFINE_string('console_host', socket.gethostname(),
|
||||
'Console proxy host to use to connect to instances on'
|
||||
'this host.')
|
||||
|
@ -432,3 +432,6 @@ DEFINE_list('monkey_patch_modules',
|
||||
DEFINE_bool('allow_resize_to_same_host', False,
|
||||
'Allow destination machine to match source for resize. Useful'
|
||||
' when testing in environments with only one host machine.')
|
||||
|
||||
DEFINE_string('stub_network', False,
|
||||
'Stub network related code')
|
||||
|
@ -60,24 +60,10 @@ class AbstractScheduler(driver.Scheduler):
|
||||
request_spec, kwargs):
|
||||
"""Create the requested resource in this Zone."""
|
||||
host = build_plan_item['hostname']
|
||||
base_options = request_spec['instance_properties']
|
||||
image = request_spec['image']
|
||||
instance_type = request_spec.get('instance_type')
|
||||
|
||||
# TODO(sandy): I guess someone needs to add block_device_mapping
|
||||
# support at some point? Also, OS API has no concept of security
|
||||
# groups.
|
||||
instance = compute_api.API().create_db_entry_for_new_instance(context,
|
||||
instance_type, image, base_options, None, [])
|
||||
|
||||
instance_id = instance['id']
|
||||
kwargs['instance_id'] = instance_id
|
||||
|
||||
queue = db.queue_get_for(context, "compute", host)
|
||||
params = {"method": "run_instance", "args": kwargs}
|
||||
rpc.cast(context, queue, params)
|
||||
LOG.debug(_("Provisioning locally via compute node %(host)s")
|
||||
% locals())
|
||||
instance = self.create_instance_db_entry(context, request_spec)
|
||||
driver.cast_to_compute_host(context, host,
|
||||
'run_instance', instance_id=instance['id'], **kwargs)
|
||||
return driver.encode_instance(instance, local=True)
|
||||
|
||||
def _decrypt_blob(self, blob):
|
||||
"""Returns the decrypted blob or None if invalid. Broken out
|
||||
@ -112,7 +98,7 @@ class AbstractScheduler(driver.Scheduler):
|
||||
files = kwargs['injected_files']
|
||||
child_zone = zone_info['child_zone']
|
||||
child_blob = zone_info['child_blob']
|
||||
zone = db.zone_get(context, child_zone)
|
||||
zone = db.zone_get(context.elevated(), child_zone)
|
||||
url = zone.api_url
|
||||
LOG.debug(_("Forwarding instance create call to child zone %(url)s"
|
||||
". ReservationID=%(reservation_id)s") % locals())
|
||||
@ -132,12 +118,13 @@ class AbstractScheduler(driver.Scheduler):
|
||||
# arguments are passed as keyword arguments
|
||||
# (there's a reasonable default for ipgroups in the
|
||||
# novaclient call).
|
||||
nova.servers.create(name, image_ref, flavor_id,
|
||||
instance = nova.servers.create(name, image_ref, flavor_id,
|
||||
meta=meta, files=files, zone_blob=child_blob,
|
||||
reservation_id=reservation_id)
|
||||
return driver.encode_instance(instance._info, local=False)
|
||||
|
||||
def _provision_resource_from_blob(self, context, build_plan_item,
|
||||
instance_id, request_spec, kwargs):
|
||||
request_spec, kwargs):
|
||||
"""Create the requested resource locally or in a child zone
|
||||
based on what is stored in the zone blob info.
|
||||
|
||||
@ -165,21 +152,21 @@ class AbstractScheduler(driver.Scheduler):
|
||||
|
||||
# Valid data ... is it for us?
|
||||
if 'child_zone' in host_info and 'child_blob' in host_info:
|
||||
self._ask_child_zone_to_create_instance(context, host_info,
|
||||
request_spec, kwargs)
|
||||
instance = self._ask_child_zone_to_create_instance(context,
|
||||
host_info, request_spec, kwargs)
|
||||
else:
|
||||
self._provision_resource_locally(context, host_info, request_spec,
|
||||
kwargs)
|
||||
instance = self._provision_resource_locally(context,
|
||||
host_info, request_spec, kwargs)
|
||||
return instance
|
||||
|
||||
def _provision_resource(self, context, build_plan_item, instance_id,
|
||||
def _provision_resource(self, context, build_plan_item,
|
||||
request_spec, kwargs):
|
||||
"""Create the requested resource in this Zone or a child zone."""
|
||||
if "hostname" in build_plan_item:
|
||||
self._provision_resource_locally(context, build_plan_item,
|
||||
request_spec, kwargs)
|
||||
return
|
||||
self._provision_resource_from_blob(context, build_plan_item,
|
||||
instance_id, request_spec, kwargs)
|
||||
return self._provision_resource_locally(context,
|
||||
build_plan_item, request_spec, kwargs)
|
||||
return self._provision_resource_from_blob(context,
|
||||
build_plan_item, request_spec, kwargs)
|
||||
|
||||
def _adjust_child_weights(self, child_results, zones):
|
||||
"""Apply the Scale and Offset values from the Zone definition
|
||||
@ -205,8 +192,7 @@ class AbstractScheduler(driver.Scheduler):
|
||||
LOG.exception(_("Bad child zone scaling values "
|
||||
"for Zone: %(zone_id)s") % locals())
|
||||
|
||||
def schedule_run_instance(self, context, instance_id, request_spec,
|
||||
*args, **kwargs):
|
||||
def schedule_run_instance(self, context, request_spec, *args, **kwargs):
|
||||
"""This method is called from nova.compute.api to provision
|
||||
an instance. However we need to look at the parameters being
|
||||
passed in to see if this is a request to:
|
||||
@ -214,13 +200,16 @@ class AbstractScheduler(driver.Scheduler):
|
||||
2. Use the Build Plan information in the request parameters
|
||||
to simply create the instance (either in this zone or
|
||||
a child zone).
|
||||
|
||||
returns list of instances created.
|
||||
"""
|
||||
# TODO(sandy): We'll have to look for richer specs at some point.
|
||||
blob = request_spec.get('blob')
|
||||
if blob:
|
||||
self._provision_resource(context, request_spec, instance_id,
|
||||
request_spec, kwargs)
|
||||
return None
|
||||
instance = self._provision_resource(context,
|
||||
request_spec, request_spec, kwargs)
|
||||
# Caller expects a list of instances
|
||||
return [instance]
|
||||
|
||||
num_instances = request_spec.get('num_instances', 1)
|
||||
LOG.debug(_("Attempting to build %(num_instances)d instance(s)") %
|
||||
@ -231,16 +220,16 @@ class AbstractScheduler(driver.Scheduler):
|
||||
if not build_plan:
|
||||
raise driver.NoValidHost(_('No hosts were available'))
|
||||
|
||||
instances = []
|
||||
for num in xrange(num_instances):
|
||||
if not build_plan:
|
||||
break
|
||||
build_plan_item = build_plan.pop(0)
|
||||
self._provision_resource(context, build_plan_item, instance_id,
|
||||
request_spec, kwargs)
|
||||
instance = self._provision_resource(context,
|
||||
build_plan_item, request_spec, kwargs)
|
||||
instances.append(instance)
|
||||
|
||||
# Returning None short-circuits the routing to Compute (since
|
||||
# we've already done it here)
|
||||
return None
|
||||
return instances
|
||||
|
||||
def select(self, context, request_spec, *args, **kwargs):
|
||||
"""Select returns a list of weights and zone/host information
|
||||
@ -251,7 +240,7 @@ class AbstractScheduler(driver.Scheduler):
|
||||
return self._schedule(context, "compute", request_spec,
|
||||
*args, **kwargs)
|
||||
|
||||
def schedule(self, context, topic, request_spec, *args, **kwargs):
|
||||
def schedule(self, context, topic, method, *args, **kwargs):
|
||||
"""The schedule() contract requires we return the one
|
||||
best-suited host for this request.
|
||||
"""
|
||||
@ -285,7 +274,7 @@ class AbstractScheduler(driver.Scheduler):
|
||||
weighted_hosts = self.weigh_hosts(topic, request_spec, filtered_hosts)
|
||||
# Next, tack on the host weights from the child zones
|
||||
json_spec = json.dumps(request_spec)
|
||||
all_zones = db.zone_get_all(context)
|
||||
all_zones = db.zone_get_all(context.elevated())
|
||||
child_results = self._call_zone_method(context, "select",
|
||||
specs=json_spec, zones=all_zones)
|
||||
self._adjust_child_weights(child_results, all_zones)
|
||||
|
@ -65,7 +65,7 @@ def get_zone_list(context):
|
||||
for item in items:
|
||||
item['api_url'] = item['api_url'].replace('\\/', '/')
|
||||
if not items:
|
||||
items = db.zone_get_all(context)
|
||||
items = db.zone_get_all(context.elevated())
|
||||
return items
|
||||
|
||||
|
||||
@ -116,7 +116,7 @@ def call_zone_method(context, method_name, errors_to_ignore=None,
|
||||
pool = greenpool.GreenPool()
|
||||
results = []
|
||||
if zones is None:
|
||||
zones = db.zone_get_all(context)
|
||||
zones = db.zone_get_all(context.elevated())
|
||||
for zone in zones:
|
||||
try:
|
||||
# Do this on behalf of the user ...
|
||||
|
@ -29,12 +29,33 @@ from nova.scheduler import driver
|
||||
class ChanceScheduler(driver.Scheduler):
|
||||
"""Implements Scheduler as a random node selector."""
|
||||
|
||||
def schedule(self, context, topic, *_args, **_kwargs):
|
||||
def _schedule(self, context, topic, **kwargs):
|
||||
"""Picks a host that is up at random."""
|
||||
|
||||
hosts = self.hosts_up(context, topic)
|
||||
elevated = context.elevated()
|
||||
hosts = self.hosts_up(elevated, topic)
|
||||
if not hosts:
|
||||
raise driver.NoValidHost(_("Scheduler was unable to locate a host"
|
||||
" for this request. Is the appropriate"
|
||||
" service running?"))
|
||||
return hosts[int(random.random() * len(hosts))]
|
||||
|
||||
def schedule(self, context, topic, method, *_args, **kwargs):
|
||||
"""Picks a host that is up at random."""
|
||||
|
||||
host = self._schedule(context, topic, **kwargs)
|
||||
driver.cast_to_host(context, topic, host, method, **kwargs)
|
||||
|
||||
def schedule_run_instance(self, context, request_spec, *_args, **kwargs):
|
||||
"""Create and run an instance or instances"""
|
||||
elevated = context.elevated()
|
||||
num_instances = request_spec.get('num_instances', 1)
|
||||
instances = []
|
||||
for num in xrange(num_instances):
|
||||
host = self._schedule(context, 'compute', **kwargs)
|
||||
instance = self.create_instance_db_entry(elevated, request_spec)
|
||||
driver.cast_to_compute_host(context, host,
|
||||
'run_instance', instance_id=instance['id'], **kwargs)
|
||||
instances.append(driver.encode_instance(instance))
|
||||
|
||||
return instances
|
||||
|
@ -29,17 +29,94 @@ from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import rpc
|
||||
from nova import utils
|
||||
from nova.compute import api as compute_api
|
||||
from nova.compute import power_state
|
||||
from nova.compute import vm_states
|
||||
from nova.api.ec2 import ec2utils
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
LOG = logging.getLogger('nova.scheduler.driver')
|
||||
flags.DEFINE_integer('service_down_time', 60,
|
||||
'maximum time since last checkin for up service')
|
||||
flags.DECLARE('instances_path', 'nova.compute.manager')
|
||||
|
||||
|
||||
def cast_to_volume_host(context, host, method, update_db=True, **kwargs):
|
||||
"""Cast request to a volume host queue"""
|
||||
|
||||
if update_db:
|
||||
volume_id = kwargs.get('volume_id', None)
|
||||
if volume_id is not None:
|
||||
now = utils.utcnow()
|
||||
db.volume_update(context, volume_id,
|
||||
{'host': host, 'scheduled_at': now})
|
||||
rpc.cast(context,
|
||||
db.queue_get_for(context, 'volume', host),
|
||||
{"method": method, "args": kwargs})
|
||||
LOG.debug(_("Casted '%(method)s' to volume '%(host)s'") % locals())
|
||||
|
||||
|
||||
def cast_to_compute_host(context, host, method, update_db=True, **kwargs):
|
||||
"""Cast request to a compute host queue"""
|
||||
|
||||
if update_db:
|
||||
instance_id = kwargs.get('instance_id', None)
|
||||
if instance_id is not None:
|
||||
now = utils.utcnow()
|
||||
db.instance_update(context, instance_id,
|
||||
{'host': host, 'scheduled_at': now})
|
||||
rpc.cast(context,
|
||||
db.queue_get_for(context, 'compute', host),
|
||||
{"method": method, "args": kwargs})
|
||||
LOG.debug(_("Casted '%(method)s' to compute '%(host)s'") % locals())
|
||||
|
||||
|
||||
def cast_to_network_host(context, host, method, update_db=False, **kwargs):
|
||||
"""Cast request to a network host queue"""
|
||||
|
||||
rpc.cast(context,
|
||||
db.queue_get_for(context, 'network', host),
|
||||
{"method": method, "args": kwargs})
|
||||
LOG.debug(_("Casted '%(method)s' to network '%(host)s'") % locals())
|
||||
|
||||
|
||||
def cast_to_host(context, topic, host, method, update_db=True, **kwargs):
|
||||
"""Generic cast to host"""
|
||||
|
||||
topic_mapping = {
|
||||
"compute": cast_to_compute_host,
|
||||
"volume": cast_to_volume_host,
|
||||
'network': cast_to_network_host}
|
||||
|
||||
func = topic_mapping.get(topic)
|
||||
if func:
|
||||
func(context, host, method, update_db=update_db, **kwargs)
|
||||
else:
|
||||
rpc.cast(context,
|
||||
db.queue_get_for(context, topic, host),
|
||||
{"method": method, "args": kwargs})
|
||||
LOG.debug(_("Casted '%(method)s' to %(topic)s '%(host)s'")
|
||||
% locals())
|
||||
|
||||
|
||||
def encode_instance(instance, local=True):
|
||||
"""Encode locally created instance for return via RPC"""
|
||||
# TODO(comstud): I would love to be able to return the full
|
||||
# instance information here, but we'll need some modifications
|
||||
# to the RPC code to handle datetime conversions with the
|
||||
# json encoding/decoding. We should be able to set a default
|
||||
# json handler somehow to do it.
|
||||
#
|
||||
# For now, I'll just return the instance ID and let the caller
|
||||
# do a DB lookup :-/
|
||||
if local:
|
||||
return dict(id=instance['id'], _is_precooked=False)
|
||||
else:
|
||||
instance['_is_precooked'] = True
|
||||
return instance
|
||||
|
||||
|
||||
class NoValidHost(exception.Error):
|
||||
"""There is no valid host for the command."""
|
||||
pass
|
||||
@ -55,6 +132,7 @@ class Scheduler(object):
|
||||
|
||||
def __init__(self):
|
||||
self.zone_manager = None
|
||||
self.compute_api = compute_api.API()
|
||||
|
||||
def set_zone_manager(self, zone_manager):
|
||||
"""Called by the Scheduler Service to supply a ZoneManager."""
|
||||
@ -76,7 +154,20 @@ class Scheduler(object):
|
||||
for service in services
|
||||
if self.service_is_up(service)]
|
||||
|
||||
def schedule(self, context, topic, *_args, **_kwargs):
|
||||
def create_instance_db_entry(self, context, request_spec):
|
||||
"""Create instance DB entry based on request_spec"""
|
||||
base_options = request_spec['instance_properties']
|
||||
image = request_spec['image']
|
||||
instance_type = request_spec.get('instance_type')
|
||||
security_group = request_spec.get('security_group', 'default')
|
||||
block_device_mapping = request_spec.get('block_device_mapping', [])
|
||||
|
||||
instance = self.compute_api.create_db_entry_for_new_instance(
|
||||
context, instance_type, image, base_options,
|
||||
security_group, block_device_mapping)
|
||||
return instance
|
||||
|
||||
def schedule(self, context, topic, method, *_args, **_kwargs):
|
||||
"""Must override at least this method for scheduler to work."""
|
||||
raise NotImplementedError(_("Must implement a fallback schedule"))
|
||||
|
||||
@ -114,10 +205,12 @@ class Scheduler(object):
|
||||
volume_ref['id'],
|
||||
{'status': 'migrating'})
|
||||
|
||||
# Return value is necessary to send request to src
|
||||
# Check _schedule() in detail.
|
||||
src = instance_ref['host']
|
||||
return src
|
||||
cast_to_compute_host(context, src, 'live_migration',
|
||||
update_db=False,
|
||||
instance_id=instance_id,
|
||||
dest=dest,
|
||||
block_migration=block_migration)
|
||||
|
||||
def _live_migration_src_check(self, context, instance_ref):
|
||||
"""Live migration check routine (for src host).
|
||||
@ -205,7 +298,7 @@ class Scheduler(object):
|
||||
if not block_migration:
|
||||
src = instance_ref['host']
|
||||
ipath = FLAGS.instances_path
|
||||
logging.error(_("Cannot confirm tmpfile at %(ipath)s is on "
|
||||
LOG.error(_("Cannot confirm tmpfile at %(ipath)s is on "
|
||||
"same shared storage between %(src)s "
|
||||
"and %(dest)s.") % locals())
|
||||
raise
|
||||
@ -243,7 +336,7 @@ class Scheduler(object):
|
||||
|
||||
except rpc.RemoteError:
|
||||
src = instance_ref['host']
|
||||
logging.exception(_("host %(dest)s is not compatible with "
|
||||
LOG.exception(_("host %(dest)s is not compatible with "
|
||||
"original host %(src)s.") % locals())
|
||||
raise
|
||||
|
||||
@ -354,6 +447,8 @@ class Scheduler(object):
|
||||
dst_t = db.queue_get_for(context, FLAGS.compute_topic, dest)
|
||||
src_t = db.queue_get_for(context, FLAGS.compute_topic, src)
|
||||
|
||||
filename = None
|
||||
|
||||
try:
|
||||
# create tmpfile at dest host
|
||||
filename = rpc.call(context, dst_t,
|
||||
@ -370,6 +465,8 @@ class Scheduler(object):
|
||||
raise
|
||||
|
||||
finally:
|
||||
rpc.call(context, dst_t,
|
||||
{"method": 'cleanup_shared_storage_test_file',
|
||||
"args": {'filename': filename}})
|
||||
# Should only be None for tests?
|
||||
if filename is not None:
|
||||
rpc.call(context, dst_t,
|
||||
{"method": 'cleanup_shared_storage_test_file',
|
||||
"args": {'filename': filename}})
|
||||
|
@ -160,8 +160,7 @@ class LeastCostScheduler(base_scheduler.BaseScheduler):
|
||||
|
||||
weighted = []
|
||||
weight_log = []
|
||||
for cost, (hostname, service) in zip(costs, hosts):
|
||||
caps = service[topic]
|
||||
for cost, (hostname, caps) in zip(costs, hosts):
|
||||
weight_log.append("%s: %s" % (hostname, "%.2f" % cost))
|
||||
weight_dict = dict(weight=cost, hostname=hostname,
|
||||
capabilities=caps)
|
||||
|
@ -81,37 +81,23 @@ class SchedulerManager(manager.Manager):
|
||||
"""Select a list of hosts best matching the provided specs."""
|
||||
return self.driver.select(context, *args, **kwargs)
|
||||
|
||||
def get_scheduler_rules(self, context=None, *args, **kwargs):
|
||||
"""Ask the driver how requests should be made of it."""
|
||||
return self.driver.get_scheduler_rules(context, *args, **kwargs)
|
||||
|
||||
def _schedule(self, method, context, topic, *args, **kwargs):
|
||||
"""Tries to call schedule_* method on the driver to retrieve host.
|
||||
|
||||
Falls back to schedule(context, topic) if method doesn't exist.
|
||||
"""
|
||||
driver_method = 'schedule_%s' % method
|
||||
elevated = context.elevated()
|
||||
try:
|
||||
real_meth = getattr(self.driver, driver_method)
|
||||
args = (elevated,) + args
|
||||
args = (context,) + args
|
||||
except AttributeError, e:
|
||||
LOG.warning(_("Driver Method %(driver_method)s missing: %(e)s."
|
||||
"Reverting to schedule()") % locals())
|
||||
real_meth = self.driver.schedule
|
||||
args = (elevated, topic) + args
|
||||
host = real_meth(*args, **kwargs)
|
||||
args = (context, topic, method) + args
|
||||
|
||||
if not host:
|
||||
LOG.debug(_("%(topic)s %(method)s handled in Scheduler")
|
||||
% locals())
|
||||
return
|
||||
|
||||
rpc.cast(context,
|
||||
db.queue_get_for(context, topic, host),
|
||||
{"method": method,
|
||||
"args": kwargs})
|
||||
LOG.debug(_("Casted to %(topic)s %(host)s for %(method)s") % locals())
|
||||
# Scheduler methods are responsible for casting.
|
||||
return real_meth(*args, **kwargs)
|
||||
|
||||
# NOTE (masumotok) : This method should be moved to nova.api.ec2.admin.
|
||||
# Based on bexar design summit discussion,
|
||||
|
@ -38,7 +38,8 @@ flags.DEFINE_string('volume_scheduler_driver',
|
||||
# A mapping of methods to topics so we can figure out which driver to use.
|
||||
_METHOD_MAP = {'run_instance': 'compute',
|
||||
'start_instance': 'compute',
|
||||
'create_volume': 'volume'}
|
||||
'create_volume': 'volume',
|
||||
'create_volumes': 'volume'}
|
||||
|
||||
|
||||
class MultiScheduler(driver.Scheduler):
|
||||
@ -69,5 +70,6 @@ class MultiScheduler(driver.Scheduler):
|
||||
for k, v in self.drivers.iteritems():
|
||||
v.set_zone_manager(zone_manager)
|
||||
|
||||
def schedule(self, context, topic, *_args, **_kwargs):
|
||||
return self.drivers[topic].schedule(context, topic, *_args, **_kwargs)
|
||||
def schedule(self, context, topic, method, *_args, **_kwargs):
|
||||
return self.drivers[topic].schedule(context, topic,
|
||||
method, *_args, **_kwargs)
|
||||
|
@ -39,47 +39,50 @@ flags.DEFINE_integer("max_networks", 1000,
|
||||
class SimpleScheduler(chance.ChanceScheduler):
|
||||
"""Implements Naive Scheduler that tries to find least loaded host."""
|
||||
|
||||
def _schedule_instance(self, context, instance_id, *_args, **_kwargs):
|
||||
def _schedule_instance(self, context, instance_opts, *_args, **_kwargs):
|
||||
"""Picks a host that is up and has the fewest running instances."""
|
||||
instance_ref = db.instance_get(context, instance_id)
|
||||
if (instance_ref['availability_zone']
|
||||
and ':' in instance_ref['availability_zone']
|
||||
and context.is_admin):
|
||||
zone, _x, host = instance_ref['availability_zone'].partition(':')
|
||||
|
||||
availability_zone = instance_opts.get('availability_zone')
|
||||
|
||||
if availability_zone and context.is_admin and \
|
||||
(':' in availability_zone):
|
||||
zone, host = availability_zone.split(':', 1)
|
||||
service = db.service_get_by_args(context.elevated(), host,
|
||||
'nova-compute')
|
||||
if not self.service_is_up(service):
|
||||
raise driver.WillNotSchedule(_("Host %s is not alive") % host)
|
||||
|
||||
# TODO(vish): this probably belongs in the manager, if we
|
||||
# can generalize this somehow
|
||||
now = utils.utcnow()
|
||||
db.instance_update(context, instance_id, {'host': host,
|
||||
'scheduled_at': now})
|
||||
return host
|
||||
|
||||
results = db.service_get_all_compute_sorted(context)
|
||||
for result in results:
|
||||
(service, instance_cores) = result
|
||||
if instance_cores + instance_ref['vcpus'] > FLAGS.max_cores:
|
||||
if instance_cores + instance_opts['vcpus'] > FLAGS.max_cores:
|
||||
raise driver.NoValidHost(_("All hosts have too many cores"))
|
||||
if self.service_is_up(service):
|
||||
# NOTE(vish): this probably belongs in the manager, if we
|
||||
# can generalize this somehow
|
||||
now = utils.utcnow()
|
||||
db.instance_update(context,
|
||||
instance_id,
|
||||
{'host': service['host'],
|
||||
'scheduled_at': now})
|
||||
return service['host']
|
||||
raise driver.NoValidHost(_("Scheduler was unable to locate a host"
|
||||
" for this request. Is the appropriate"
|
||||
" service running?"))
|
||||
|
||||
def schedule_run_instance(self, context, instance_id, *_args, **_kwargs):
|
||||
return self._schedule_instance(context, instance_id, *_args, **_kwargs)
|
||||
def schedule_run_instance(self, context, request_spec, *_args, **_kwargs):
|
||||
num_instances = request_spec.get('num_instances', 1)
|
||||
instances = []
|
||||
for num in xrange(num_instances):
|
||||
host = self._schedule_instance(context,
|
||||
request_spec['instance_properties'], *_args, **_kwargs)
|
||||
instance_ref = self.create_instance_db_entry(context,
|
||||
request_spec)
|
||||
driver.cast_to_compute_host(context, host, 'run_instance',
|
||||
instance_id=instance_ref['id'], **_kwargs)
|
||||
instances.append(driver.encode_instance(instance_ref))
|
||||
return instances
|
||||
|
||||
def schedule_start_instance(self, context, instance_id, *_args, **_kwargs):
|
||||
return self._schedule_instance(context, instance_id, *_args, **_kwargs)
|
||||
instance_ref = db.instance_get(context, instance_id)
|
||||
host = self._schedule_instance(context, instance_ref,
|
||||
*_args, **_kwargs)
|
||||
driver.cast_to_compute_host(context, host, 'start_instance',
|
||||
instance_id=intance_id, **_kwargs)
|
||||
|
||||
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
|
||||
"""Picks a host that is up and has the fewest volumes."""
|
||||
@ -92,13 +95,9 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
'nova-volume')
|
||||
if not self.service_is_up(service):
|
||||
raise driver.WillNotSchedule(_("Host %s not available") % host)
|
||||
|
||||
# TODO(vish): this probably belongs in the manager, if we
|
||||
# can generalize this somehow
|
||||
now = utils.utcnow()
|
||||
db.volume_update(context, volume_id, {'host': host,
|
||||
'scheduled_at': now})
|
||||
return host
|
||||
driver.cast_to_volume_host(context, host, 'create_volume',
|
||||
volume_id=volume_id, **_kwargs)
|
||||
return None
|
||||
results = db.service_get_all_volume_sorted(context)
|
||||
for result in results:
|
||||
(service, volume_gigabytes) = result
|
||||
@ -106,14 +105,9 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
raise driver.NoValidHost(_("All hosts have too many "
|
||||
"gigabytes"))
|
||||
if self.service_is_up(service):
|
||||
# NOTE(vish): this probably belongs in the manager, if we
|
||||
# can generalize this somehow
|
||||
now = utils.utcnow()
|
||||
db.volume_update(context,
|
||||
volume_id,
|
||||
{'host': service['host'],
|
||||
'scheduled_at': now})
|
||||
return service['host']
|
||||
driver.cast_to_volume_host(context, service['host'],
|
||||
'create_volume', volume_id=volume_id, **_kwargs)
|
||||
return None
|
||||
raise driver.NoValidHost(_("Scheduler was unable to locate a host"
|
||||
" for this request. Is the appropriate"
|
||||
" service running?"))
|
||||
@ -127,7 +121,9 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
if instance_count >= FLAGS.max_networks:
|
||||
raise driver.NoValidHost(_("All hosts have too many networks"))
|
||||
if self.service_is_up(service):
|
||||
return service['host']
|
||||
driver.cast_to_network_host(context, service['host'],
|
||||
'set_network_host', **_kwargs)
|
||||
return None
|
||||
raise driver.NoValidHost(_("Scheduler was unable to locate a host"
|
||||
" for this request. Is the appropriate"
|
||||
" service running?"))
|
||||
|
@ -195,8 +195,6 @@ class VsaScheduler(simple.SimpleScheduler):
|
||||
'display_description': vol['description'],
|
||||
'volume_type_id': vol['volume_type_id'],
|
||||
'metadata': dict(to_vsa_id=vsa_id),
|
||||
'host': vol['host'],
|
||||
'scheduled_at': now
|
||||
}
|
||||
|
||||
size = vol['size']
|
||||
@ -205,12 +203,10 @@ class VsaScheduler(simple.SimpleScheduler):
|
||||
LOG.debug(_("Provision volume %(name)s of size %(size)s GB on "\
|
||||
"host %(host)s"), locals())
|
||||
|
||||
volume_ref = db.volume_create(context, options)
|
||||
rpc.cast(context,
|
||||
db.queue_get_for(context, "volume", vol['host']),
|
||||
{"method": "create_volume",
|
||||
"args": {"volume_id": volume_ref['id'],
|
||||
"snapshot_id": None}})
|
||||
volume_ref = db.volume_create(context.elevated(), options)
|
||||
driver.cast_to_volume_host(context, vol['host'],
|
||||
'create_volume', volume_id=volume_ref['id'],
|
||||
snapshot_id=None)
|
||||
|
||||
def _check_host_enforcement(self, context, availability_zone):
|
||||
if (availability_zone
|
||||
@ -274,7 +270,6 @@ class VsaScheduler(simple.SimpleScheduler):
|
||||
def schedule_create_volumes(self, context, request_spec,
|
||||
availability_zone=None, *_args, **_kwargs):
|
||||
"""Picks hosts for hosting multiple volumes."""
|
||||
|
||||
num_volumes = request_spec.get('num_volumes')
|
||||
LOG.debug(_("Attempting to spawn %(num_volumes)d volume(s)") %
|
||||
locals())
|
||||
@ -291,7 +286,8 @@ class VsaScheduler(simple.SimpleScheduler):
|
||||
|
||||
for vol in volume_params:
|
||||
self._provision_volume(context, vol, vsa_id, availability_zone)
|
||||
except:
|
||||
except Exception:
|
||||
LOG.exception(_("Error creating volumes"))
|
||||
if vsa_id:
|
||||
db.vsa_update(context, vsa_id, dict(status=VsaState.FAILED))
|
||||
|
||||
@ -310,10 +306,9 @@ class VsaScheduler(simple.SimpleScheduler):
|
||||
host = self._check_host_enforcement(context,
|
||||
volume_ref['availability_zone'])
|
||||
if host:
|
||||
now = utils.utcnow()
|
||||
db.volume_update(context, volume_id, {'host': host,
|
||||
'scheduled_at': now})
|
||||
return host
|
||||
driver.cast_to_volume_host(context, host, 'create_volume',
|
||||
volume_id=volume_id, **_kwargs)
|
||||
return None
|
||||
|
||||
volume_type_id = volume_ref['volume_type_id']
|
||||
if volume_type_id:
|
||||
@ -344,18 +339,16 @@ class VsaScheduler(simple.SimpleScheduler):
|
||||
|
||||
try:
|
||||
(host, qos_cap) = self._select_hosts(request_spec, all_hosts=hosts)
|
||||
except:
|
||||
except Exception:
|
||||
LOG.exception(_("Error creating volume"))
|
||||
if volume_ref['to_vsa_id']:
|
||||
db.vsa_update(context, volume_ref['to_vsa_id'],
|
||||
dict(status=VsaState.FAILED))
|
||||
raise
|
||||
|
||||
if host:
|
||||
now = utils.utcnow()
|
||||
db.volume_update(context, volume_id, {'host': host,
|
||||
'scheduled_at': now})
|
||||
self._consume_resource(qos_cap, volume_ref['size'], -1)
|
||||
return host
|
||||
driver.cast_to_volume_host(context, host, 'create_volume',
|
||||
volume_id=volume_id, **_kwargs)
|
||||
|
||||
def _consume_full_drive(self, qos_values, direction):
|
||||
qos_values['FullDrive']['NumFreeDrives'] += direction
|
||||
|
@ -35,7 +35,7 @@ class ZoneScheduler(driver.Scheduler):
|
||||
for topic and availability zone (if defined).
|
||||
"""
|
||||
|
||||
if zone is None:
|
||||
if not zone:
|
||||
return self.hosts_up(context, topic)
|
||||
|
||||
services = db.service_get_all_by_topic(context, topic)
|
||||
@ -44,16 +44,34 @@ class ZoneScheduler(driver.Scheduler):
|
||||
if self.service_is_up(service)
|
||||
and service.availability_zone == zone]
|
||||
|
||||
def schedule(self, context, topic, *_args, **_kwargs):
|
||||
def _schedule(self, context, topic, request_spec, **kwargs):
|
||||
"""Picks a host that is up at random in selected
|
||||
availability zone (if defined).
|
||||
"""
|
||||
|
||||
zone = _kwargs.get('availability_zone')
|
||||
hosts = self.hosts_up_with_zone(context, topic, zone)
|
||||
zone = kwargs.get('availability_zone')
|
||||
if not zone and request_spec:
|
||||
zone = request_spec['instance_properties'].get(
|
||||
'availability_zone')
|
||||
hosts = self.hosts_up_with_zone(context.elevated(), topic, zone)
|
||||
if not hosts:
|
||||
raise driver.NoValidHost(_("Scheduler was unable to locate a host"
|
||||
" for this request. Is the appropriate"
|
||||
" service running?"))
|
||||
|
||||
return hosts[int(random.random() * len(hosts))]
|
||||
|
||||
def schedule(self, context, topic, method, *_args, **kwargs):
|
||||
host = self._schedule(context, topic, None, **kwargs)
|
||||
driver.cast_to_host(context, topic, host, method, **kwargs)
|
||||
|
||||
def schedule_run_instance(self, context, request_spec, *_args, **kwargs):
|
||||
"""Builds and starts instances on selected hosts"""
|
||||
num_instances = request_spec.get('num_instances', 1)
|
||||
instances = []
|
||||
for num in xrange(num_instances):
|
||||
host = self._schedule(context, 'compute', request_spec, **kwargs)
|
||||
instance = self.create_instance_db_entry(context, request_spec)
|
||||
driver.cast_to_compute_host(context, host,
|
||||
'run_instance', instance_id=instance['id'], **kwargs)
|
||||
instances.append(driver.encode_instance(instance))
|
||||
return instances
|
||||
|
@ -25,6 +25,7 @@ import webob
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import rpc
|
||||
from nova import test
|
||||
import nova.api.openstack
|
||||
from nova.tests.api.openstack import fakes
|
||||
@ -118,13 +119,15 @@ class CreateserverextTest(test.TestCase):
|
||||
if 'user_data' in kwargs:
|
||||
self.user_data = kwargs['user_data']
|
||||
|
||||
return [{'id': '1234', 'display_name': 'fakeinstance',
|
||||
resv_id = None
|
||||
|
||||
return ([{'id': '1234', 'display_name': 'fakeinstance',
|
||||
'uuid': FAKE_UUID,
|
||||
'user_id': 'fake',
|
||||
'project_id': 'fake',
|
||||
'created_at': "",
|
||||
'updated_at': "",
|
||||
'progress': 0}]
|
||||
'progress': 0}], resv_id)
|
||||
|
||||
def set_admin_password(self, *args, **kwargs):
|
||||
pass
|
||||
@ -133,10 +136,9 @@ class CreateserverextTest(test.TestCase):
|
||||
self.stubs.Set(nova.compute, 'API',
|
||||
self._make_stub_method(compute_api))
|
||||
self.stubs.Set(
|
||||
nova.api.openstack.create_instance_helper.CreateInstanceHelper,
|
||||
nova.api.openstack.servers.Controller,
|
||||
'_get_kernel_ramdisk_from_image',
|
||||
self._make_stub_method((1, 1)))
|
||||
|
||||
return compute_api
|
||||
|
||||
def _setup_mock_network_api(self):
|
||||
@ -399,7 +401,8 @@ class CreateserverextTest(test.TestCase):
|
||||
self._setup_mock_network_api()
|
||||
body_dict = self._create_security_group_request_dict(security_groups)
|
||||
request = self._get_create_request_json(body_dict)
|
||||
response = request.get_response(fakes.wsgi_app())
|
||||
compute_api, response = \
|
||||
self._run_create_instance_with_mock_compute_api(request)
|
||||
self.assertEquals(response.status_int, 202)
|
||||
|
||||
def test_get_server_by_id_verify_security_groups_json(self):
|
||||
|
@ -29,8 +29,12 @@ FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
def fake_compute_api_create(cls, context, instance_type, image_href, **kwargs):
|
||||
global _block_device_mapping_seen
|
||||
_block_device_mapping_seen = kwargs.get('block_device_mapping')
|
||||
|
||||
inst_type = instance_types.get_instance_type_by_flavor_id(2)
|
||||
return [{'id': 1,
|
||||
resv_id = None
|
||||
return ([{'id': 1,
|
||||
'display_name': 'test_server',
|
||||
'uuid': fake_gen_uuid(),
|
||||
'instance_type': dict(inst_type),
|
||||
@ -42,7 +46,7 @@ def fake_compute_api_create(cls, context, instance_type, image_href, **kwargs):
|
||||
'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
|
||||
'updated_at': datetime.datetime(2010, 11, 11, 11, 0, 0),
|
||||
'progress': 0
|
||||
}]
|
||||
}], resv_id)
|
||||
|
||||
|
||||
def fake_get_instance_nw_info(cls, context, instance):
|
||||
@ -73,6 +77,8 @@ class BootFromVolumeTest(test.TestCase):
|
||||
delete_on_termination=False,
|
||||
)]
|
||||
))
|
||||
global _block_device_mapping_seen
|
||||
_block_device_mapping_seen = None
|
||||
req = webob.Request.blank('/v1.1/fake/os-volumes_boot')
|
||||
req.method = 'POST'
|
||||
req.body = json.dumps(body)
|
||||
@ -85,3 +91,7 @@ class BootFromVolumeTest(test.TestCase):
|
||||
self.assertEqual(u'test_server', server['name'])
|
||||
self.assertEqual(3, int(server['image']['id']))
|
||||
self.assertEqual(FLAGS.password_length, len(server['adminPass']))
|
||||
self.assertEqual(len(_block_device_mapping_seen), 1)
|
||||
self.assertEqual(_block_device_mapping_seen[0]['volume_id'], 1)
|
||||
self.assertEqual(_block_device_mapping_seen[0]['device_name'],
|
||||
'/dev/vda')
|
||||
|
@ -87,6 +87,7 @@ class ExtensionControllerTest(test.TestCase):
|
||||
self.ext_list = [
|
||||
"Createserverext",
|
||||
"DeferredDelete",
|
||||
"DiskConfig",
|
||||
"FlavorExtraSpecs",
|
||||
"FlavorExtraData",
|
||||
"Floating_ips",
|
||||
@ -102,7 +103,7 @@ class ExtensionControllerTest(test.TestCase):
|
||||
"VirtualInterfaces",
|
||||
"Volumes",
|
||||
"VolumeTypes",
|
||||
"DiskConfig",
|
||||
"Zones",
|
||||
]
|
||||
self.ext_list.sort()
|
||||
|
||||
|
@ -9,7 +9,7 @@ from nova import context
|
||||
from nova import utils
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova.api.openstack import create_instance_helper
|
||||
from nova.api.openstack import servers
|
||||
from nova.compute import vm_states
|
||||
from nova.compute import instance_types
|
||||
import nova.db.api
|
||||
@ -971,7 +971,7 @@ class ServerActionsTestV11(test.TestCase):
|
||||
class TestServerActionXMLDeserializerV11(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.deserializer = create_instance_helper.ServerXMLDeserializerV11()
|
||||
self.deserializer = servers.ServerXMLDeserializerV11()
|
||||
|
||||
def tearDown(self):
|
||||
pass
|
||||
|
@ -33,7 +33,6 @@ from nova import flags
|
||||
from nova import test
|
||||
from nova import utils
|
||||
import nova.api.openstack
|
||||
from nova.api.openstack import create_instance_helper
|
||||
from nova.api.openstack import servers
|
||||
from nova.api.openstack import xmlutil
|
||||
import nova.compute.api
|
||||
@ -1562,10 +1561,15 @@ class ServersTest(test.TestCase):
|
||||
|
||||
def _setup_for_create_instance(self):
|
||||
"""Shared implementation for tests below that create instance"""
|
||||
|
||||
self.instance_cache_num = 0
|
||||
self.instance_cache = {}
|
||||
|
||||
def instance_create(context, inst):
|
||||
inst_type = instance_types.get_instance_type_by_flavor_id(3)
|
||||
image_ref = 'http://localhost/images/2'
|
||||
return {'id': 1,
|
||||
self.instance_cache_num += 1
|
||||
instance = {'id': self.instance_cache_num,
|
||||
'display_name': 'server_test',
|
||||
'uuid': FAKE_UUID,
|
||||
'instance_type': dict(inst_type),
|
||||
@ -1574,11 +1578,32 @@ class ServersTest(test.TestCase):
|
||||
'image_ref': image_ref,
|
||||
'user_id': 'fake',
|
||||
'project_id': 'fake',
|
||||
'reservation_id': inst['reservation_id'],
|
||||
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
|
||||
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
|
||||
"config_drive": self.config_drive,
|
||||
"progress": 0
|
||||
}
|
||||
self.instance_cache[instance['id']] = instance
|
||||
return instance
|
||||
|
||||
def instance_get(context, instance_id):
|
||||
"""Stub for compute/api create() pulling in instance after
|
||||
scheduling
|
||||
"""
|
||||
return self.instance_cache[instance_id]
|
||||
|
||||
def rpc_call_wrapper(context, topic, msg):
|
||||
"""Stub out the scheduler creating the instance entry"""
|
||||
if topic == FLAGS.scheduler_topic and \
|
||||
msg['method'] == 'run_instance':
|
||||
request_spec = msg['args']['request_spec']
|
||||
num_instances = request_spec.get('num_instances', 1)
|
||||
instances = []
|
||||
for x in xrange(num_instances):
|
||||
instances.append(instance_create(context,
|
||||
request_spec['instance_properties']))
|
||||
return instances
|
||||
|
||||
def server_update(context, id, params):
|
||||
return instance_create(context, id)
|
||||
@ -1601,18 +1626,20 @@ class ServersTest(test.TestCase):
|
||||
self.stubs.Set(nova.db.api, 'project_get_networks',
|
||||
project_get_networks)
|
||||
self.stubs.Set(nova.db.api, 'instance_create', instance_create)
|
||||
self.stubs.Set(nova.db.api, 'instance_get', instance_get)
|
||||
self.stubs.Set(nova.rpc, 'cast', fake_method)
|
||||
self.stubs.Set(nova.rpc, 'call', fake_method)
|
||||
self.stubs.Set(nova.rpc, 'call', rpc_call_wrapper)
|
||||
self.stubs.Set(nova.db.api, 'instance_update', server_update)
|
||||
self.stubs.Set(nova.db.api, 'queue_get_for', queue_get_for)
|
||||
self.stubs.Set(nova.network.manager.VlanManager, 'allocate_fixed_ip',
|
||||
fake_method)
|
||||
self.stubs.Set(
|
||||
nova.api.openstack.create_instance_helper.CreateInstanceHelper,
|
||||
"_get_kernel_ramdisk_from_image", kernel_ramdisk_mapping)
|
||||
servers.Controller,
|
||||
"_get_kernel_ramdisk_from_image",
|
||||
kernel_ramdisk_mapping)
|
||||
self.stubs.Set(nova.compute.api.API, "_find_host", find_host)
|
||||
|
||||
def _test_create_instance_helper(self):
|
||||
def _test_create_instance(self):
|
||||
self._setup_for_create_instance()
|
||||
|
||||
body = dict(server=dict(
|
||||
@ -1636,7 +1663,7 @@ class ServersTest(test.TestCase):
|
||||
self.assertEqual(FAKE_UUID, server['uuid'])
|
||||
|
||||
def test_create_instance(self):
|
||||
self._test_create_instance_helper()
|
||||
self._test_create_instance()
|
||||
|
||||
def test_create_instance_has_uuid(self):
|
||||
"""Tests at the db-layer instead of API layer since that's where the
|
||||
@ -1648,51 +1675,134 @@ class ServersTest(test.TestCase):
|
||||
expected = FAKE_UUID
|
||||
self.assertEqual(instance['uuid'], expected)
|
||||
|
||||
def test_create_instance_via_zones(self):
|
||||
"""Server generated ReservationID"""
|
||||
def test_create_multiple_instances(self):
|
||||
"""Test creating multiple instances but not asking for
|
||||
reservation_id
|
||||
"""
|
||||
self._setup_for_create_instance()
|
||||
self.flags(allow_admin_api=True)
|
||||
|
||||
body = dict(server=dict(
|
||||
name='server_test', imageId=3, flavorId=2,
|
||||
metadata={'hello': 'world', 'open': 'stack'},
|
||||
personality={}))
|
||||
req = webob.Request.blank('/v1.0/zones/boot')
|
||||
image_href = 'http://localhost/v1.1/123/images/2'
|
||||
flavor_ref = 'http://localhost/123/flavors/3'
|
||||
body = {
|
||||
'server': {
|
||||
'min_count': 2,
|
||||
'name': 'server_test',
|
||||
'imageRef': image_href,
|
||||
'flavorRef': flavor_ref,
|
||||
'metadata': {'hello': 'world',
|
||||
'open': 'stack'},
|
||||
'personality': []
|
||||
}
|
||||
}
|
||||
|
||||
req = webob.Request.blank('/v1.1/123/servers')
|
||||
req.method = 'POST'
|
||||
req.body = json.dumps(body)
|
||||
req.headers["content-type"] = "application/json"
|
||||
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 202)
|
||||
body = json.loads(res.body)
|
||||
self.assertIn('server', body)
|
||||
|
||||
reservation_id = json.loads(res.body)['reservation_id']
|
||||
self.assertEqual(res.status_int, 200)
|
||||
def test_create_multiple_instances_resv_id_return(self):
|
||||
"""Test creating multiple instances with asking for
|
||||
reservation_id
|
||||
"""
|
||||
self._setup_for_create_instance()
|
||||
|
||||
image_href = 'http://localhost/v1.1/123/images/2'
|
||||
flavor_ref = 'http://localhost/123/flavors/3'
|
||||
body = {
|
||||
'server': {
|
||||
'min_count': 2,
|
||||
'name': 'server_test',
|
||||
'imageRef': image_href,
|
||||
'flavorRef': flavor_ref,
|
||||
'metadata': {'hello': 'world',
|
||||
'open': 'stack'},
|
||||
'personality': [],
|
||||
'return_reservation_id': True
|
||||
}
|
||||
}
|
||||
|
||||
req = webob.Request.blank('/v1.1/123/servers')
|
||||
req.method = 'POST'
|
||||
req.body = json.dumps(body)
|
||||
req.headers["content-type"] = "application/json"
|
||||
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 202)
|
||||
body = json.loads(res.body)
|
||||
reservation_id = body.get('reservation_id')
|
||||
self.assertNotEqual(reservation_id, "")
|
||||
self.assertNotEqual(reservation_id, None)
|
||||
self.assertTrue(len(reservation_id) > 1)
|
||||
|
||||
def test_create_instance_via_zones_with_resid(self):
|
||||
"""User supplied ReservationID"""
|
||||
def test_create_instance_with_user_supplied_reservation_id(self):
|
||||
"""Non-admin supplied reservation_id should be ignored."""
|
||||
self._setup_for_create_instance()
|
||||
self.flags(allow_admin_api=True)
|
||||
|
||||
body = dict(server=dict(
|
||||
name='server_test', imageId=3, flavorId=2,
|
||||
metadata={'hello': 'world', 'open': 'stack'},
|
||||
personality={}, reservation_id='myresid'))
|
||||
req = webob.Request.blank('/v1.0/zones/boot')
|
||||
image_href = 'http://localhost/v1.1/123/images/2'
|
||||
flavor_ref = 'http://localhost/123/flavors/3'
|
||||
body = {
|
||||
'server': {
|
||||
'name': 'server_test',
|
||||
'imageRef': image_href,
|
||||
'flavorRef': flavor_ref,
|
||||
'metadata': {'hello': 'world',
|
||||
'open': 'stack'},
|
||||
'personality': [],
|
||||
'reservation_id': 'myresid',
|
||||
'return_reservation_id': True
|
||||
}
|
||||
}
|
||||
|
||||
req = webob.Request.blank('/v1.1/123/servers')
|
||||
req.method = 'POST'
|
||||
req.body = json.dumps(body)
|
||||
req.headers["content-type"] = "application/json"
|
||||
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 202)
|
||||
res_body = json.loads(res.body)
|
||||
self.assertIn('reservation_id', res_body)
|
||||
self.assertNotEqual(res_body['reservation_id'], 'myresid')
|
||||
|
||||
def test_create_instance_with_admin_supplied_reservation_id(self):
|
||||
"""Admin supplied reservation_id should be honored."""
|
||||
self._setup_for_create_instance()
|
||||
|
||||
image_href = 'http://localhost/v1.1/123/images/2'
|
||||
flavor_ref = 'http://localhost/123/flavors/3'
|
||||
body = {
|
||||
'server': {
|
||||
'name': 'server_test',
|
||||
'imageRef': image_href,
|
||||
'flavorRef': flavor_ref,
|
||||
'metadata': {'hello': 'world',
|
||||
'open': 'stack'},
|
||||
'personality': [],
|
||||
'reservation_id': 'myresid',
|
||||
'return_reservation_id': True
|
||||
}
|
||||
}
|
||||
|
||||
req = webob.Request.blank('/v1.1/123/servers')
|
||||
req.method = 'POST'
|
||||
req.body = json.dumps(body)
|
||||
req.headers["content-type"] = "application/json"
|
||||
|
||||
context = nova.context.RequestContext('testuser', 'testproject',
|
||||
is_admin=True)
|
||||
res = req.get_response(fakes.wsgi_app(fake_auth_context=context))
|
||||
self.assertEqual(res.status_int, 202)
|
||||
reservation_id = json.loads(res.body)['reservation_id']
|
||||
self.assertEqual(res.status_int, 200)
|
||||
self.assertEqual(reservation_id, "myresid")
|
||||
|
||||
def test_create_instance_no_key_pair(self):
|
||||
fakes.stub_out_key_pair_funcs(self.stubs, have_key_pair=False)
|
||||
self._test_create_instance_helper()
|
||||
self._test_create_instance()
|
||||
|
||||
def test_create_instance_no_name(self):
|
||||
self._setup_for_create_instance()
|
||||
@ -2782,7 +2892,7 @@ class TestServerStatus(test.TestCase):
|
||||
class TestServerCreateRequestXMLDeserializerV10(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.deserializer = create_instance_helper.ServerXMLDeserializer()
|
||||
self.deserializer = servers.ServerXMLDeserializer()
|
||||
|
||||
def test_minimal_request(self):
|
||||
serial_request = """
|
||||
@ -3068,7 +3178,7 @@ class TestServerCreateRequestXMLDeserializerV11(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestServerCreateRequestXMLDeserializerV11, self).setUp()
|
||||
self.deserializer = create_instance_helper.ServerXMLDeserializerV11()
|
||||
self.deserializer = servers.ServerXMLDeserializerV11()
|
||||
|
||||
def test_minimal_request(self):
|
||||
serial_request = """
|
||||
@ -3543,10 +3653,12 @@ class TestServerInstanceCreation(test.TestCase):
|
||||
else:
|
||||
self.injected_files = None
|
||||
|
||||
return [{'id': '1234', 'display_name': 'fakeinstance',
|
||||
resv_id = None
|
||||
|
||||
return ([{'id': '1234', 'display_name': 'fakeinstance',
|
||||
'user_id': 'fake',
|
||||
'project_id': 'fake',
|
||||
'uuid': FAKE_UUID}]
|
||||
'uuid': FAKE_UUID}], resv_id)
|
||||
|
||||
def set_admin_password(self, *args, **kwargs):
|
||||
pass
|
||||
@ -3559,8 +3671,9 @@ class TestServerInstanceCreation(test.TestCase):
|
||||
compute_api = MockComputeAPI()
|
||||
self.stubs.Set(nova.compute, 'API', make_stub_method(compute_api))
|
||||
self.stubs.Set(
|
||||
nova.api.openstack.create_instance_helper.CreateInstanceHelper,
|
||||
'_get_kernel_ramdisk_from_image', make_stub_method((1, 1)))
|
||||
servers.Controller,
|
||||
'_get_kernel_ramdisk_from_image',
|
||||
make_stub_method((1, 1)))
|
||||
return compute_api
|
||||
|
||||
def _create_personality_request_dict(self, personality_files):
|
||||
@ -3821,8 +3934,8 @@ class TestGetKernelRamdiskFromImage(test.TestCase):
|
||||
@staticmethod
|
||||
def _get_k_r(image_meta):
|
||||
"""Rebinding function to a shorter name for convenience"""
|
||||
kernel_id, ramdisk_id = create_instance_helper.CreateInstanceHelper. \
|
||||
_do_get_kernel_ramdisk_from_image(image_meta)
|
||||
kernel_id, ramdisk_id = servers.Controller.\
|
||||
_do_get_kernel_ramdisk_from_image(image_meta)
|
||||
return kernel_id, ramdisk_id
|
||||
|
||||
|
||||
|
@ -16,6 +16,7 @@
|
||||
|
||||
import json
|
||||
import httplib
|
||||
import urllib
|
||||
import urlparse
|
||||
|
||||
from nova import log as logging
|
||||
@ -100,7 +101,7 @@ class TestOpenStackClient(object):
|
||||
|
||||
relative_url = parsed_url.path
|
||||
if parsed_url.query:
|
||||
relative_url = relative_url + parsed_url.query
|
||||
relative_url = relative_url + "?" + parsed_url.query
|
||||
LOG.info(_("Doing %(method)s on %(relative_url)s") % locals())
|
||||
if body:
|
||||
LOG.info(_("Body: %s") % body)
|
||||
@ -205,12 +206,24 @@ class TestOpenStackClient(object):
|
||||
def get_server(self, server_id):
|
||||
return self.api_get('/servers/%s' % server_id)['server']
|
||||
|
||||
def get_servers(self, detail=True):
|
||||
def get_servers(self, detail=True, search_opts=None):
|
||||
rel_url = '/servers/detail' if detail else '/servers'
|
||||
|
||||
if search_opts is not None:
|
||||
qparams = {}
|
||||
for opt, val in search_opts.iteritems():
|
||||
qparams[opt] = val
|
||||
if qparams:
|
||||
query_string = "?%s" % urllib.urlencode(qparams)
|
||||
rel_url += query_string
|
||||
return self.api_get(rel_url)['servers']
|
||||
|
||||
def post_server(self, server):
|
||||
return self.api_post('/servers', server)['server']
|
||||
response = self.api_post('/servers', server)
|
||||
if 'reservation_id' in response:
|
||||
return response
|
||||
else:
|
||||
return response['server']
|
||||
|
||||
def put_server(self, server_id, server):
|
||||
return self.api_put('/servers/%s' % server_id, server)
|
||||
|
@ -438,6 +438,42 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
|
||||
# Cleanup
|
||||
self._delete_server(server_id)
|
||||
|
||||
def test_create_multiple_servers(self):
|
||||
"""Creates multiple servers and checks for reservation_id"""
|
||||
|
||||
# Create 2 servers, setting 'return_reservation_id, which should
|
||||
# return a reservation_id
|
||||
server = self._build_minimal_create_server_request()
|
||||
server['min_count'] = 2
|
||||
server['return_reservation_id'] = True
|
||||
post = {'server': server}
|
||||
response = self.api.post_server(post)
|
||||
self.assertIn('reservation_id', response)
|
||||
reservation_id = response['reservation_id']
|
||||
self.assertNotIn(reservation_id, ['', None])
|
||||
|
||||
# Create 1 more server, which should not return a reservation_id
|
||||
server = self._build_minimal_create_server_request()
|
||||
post = {'server': server}
|
||||
created_server = self.api.post_server(post)
|
||||
self.assertTrue(created_server['id'])
|
||||
created_server_id = created_server['id']
|
||||
|
||||
# lookup servers created by the first request.
|
||||
servers = self.api.get_servers(detail=True,
|
||||
search_opts={'reservation_id': reservation_id})
|
||||
server_map = dict((server['id'], server) for server in servers)
|
||||
found_server = server_map.get(created_server_id)
|
||||
# The server from the 2nd request should not be there.
|
||||
self.assertEqual(found_server, None)
|
||||
# Should have found 2 servers.
|
||||
self.assertEqual(len(server_map), 2)
|
||||
|
||||
# Cleanup
|
||||
self._delete_server(created_server_id)
|
||||
for server_id in server_map.iterkeys():
|
||||
self._delete_server(server_id)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
@ -20,6 +20,7 @@ import json
|
||||
|
||||
import nova.db
|
||||
|
||||
from nova import context
|
||||
from nova import exception
|
||||
from nova import rpc
|
||||
from nova import test
|
||||
@ -102,7 +103,7 @@ def fake_empty_call_zone_method(context, method, specs, zones):
|
||||
was_called = False
|
||||
|
||||
|
||||
def fake_provision_resource(context, item, instance_id, request_spec, kwargs):
|
||||
def fake_provision_resource(context, item, request_spec, kwargs):
|
||||
global was_called
|
||||
was_called = True
|
||||
|
||||
@ -118,8 +119,7 @@ def fake_provision_resource_locally(context, build_plan, request_spec, kwargs):
|
||||
was_called = True
|
||||
|
||||
|
||||
def fake_provision_resource_from_blob(context, item, instance_id,
|
||||
request_spec, kwargs):
|
||||
def fake_provision_resource_from_blob(context, item, request_spec, kwargs):
|
||||
global was_called
|
||||
was_called = True
|
||||
|
||||
@ -185,7 +185,7 @@ class AbstractSchedulerTestCase(test.TestCase):
|
||||
zm = FakeZoneManager()
|
||||
sched.set_zone_manager(zm)
|
||||
|
||||
fake_context = {}
|
||||
fake_context = context.RequestContext('user', 'project')
|
||||
build_plan = sched.select(fake_context,
|
||||
{'instance_type': {'memory_mb': 512},
|
||||
'num_instances': 4})
|
||||
@ -229,9 +229,10 @@ class AbstractSchedulerTestCase(test.TestCase):
|
||||
zm = FakeEmptyZoneManager()
|
||||
sched.set_zone_manager(zm)
|
||||
|
||||
fake_context = {}
|
||||
fake_context = context.RequestContext('user', 'project')
|
||||
request_spec = {}
|
||||
self.assertRaises(driver.NoValidHost, sched.schedule_run_instance,
|
||||
fake_context, 1,
|
||||
fake_context, request_spec,
|
||||
dict(host_filter=None, instance_type={}))
|
||||
|
||||
def test_schedule_do_not_schedule_with_hint(self):
|
||||
@ -250,8 +251,8 @@ class AbstractSchedulerTestCase(test.TestCase):
|
||||
'blob': "Non-None blob data",
|
||||
}
|
||||
|
||||
result = sched.schedule_run_instance(None, 1, request_spec)
|
||||
self.assertEquals(None, result)
|
||||
instances = sched.schedule_run_instance(None, request_spec)
|
||||
self.assertTrue(instances)
|
||||
self.assertTrue(was_called)
|
||||
|
||||
def test_provision_resource_local(self):
|
||||
@ -263,7 +264,7 @@ class AbstractSchedulerTestCase(test.TestCase):
|
||||
fake_provision_resource_locally)
|
||||
|
||||
request_spec = {'hostname': "foo"}
|
||||
sched._provision_resource(None, request_spec, 1, request_spec, {})
|
||||
sched._provision_resource(None, request_spec, request_spec, {})
|
||||
self.assertTrue(was_called)
|
||||
|
||||
def test_provision_resource_remote(self):
|
||||
@ -275,7 +276,7 @@ class AbstractSchedulerTestCase(test.TestCase):
|
||||
fake_provision_resource_from_blob)
|
||||
|
||||
request_spec = {}
|
||||
sched._provision_resource(None, request_spec, 1, request_spec, {})
|
||||
sched._provision_resource(None, request_spec, request_spec, {})
|
||||
self.assertTrue(was_called)
|
||||
|
||||
def test_provision_resource_from_blob_empty(self):
|
||||
@ -285,7 +286,7 @@ class AbstractSchedulerTestCase(test.TestCase):
|
||||
request_spec = {}
|
||||
self.assertRaises(abstract_scheduler.InvalidBlob,
|
||||
sched._provision_resource_from_blob,
|
||||
None, {}, 1, {}, {})
|
||||
None, {}, {}, {})
|
||||
|
||||
def test_provision_resource_from_blob_with_local_blob(self):
|
||||
"""
|
||||
@ -303,20 +304,21 @@ class AbstractSchedulerTestCase(test.TestCase):
|
||||
# return fake instances
|
||||
return {'id': 1, 'uuid': 'f874093c-7b17-49c0-89c3-22a5348497f9'}
|
||||
|
||||
def fake_rpc_cast(*args, **kwargs):
|
||||
def fake_cast_to_compute_host(*args, **kwargs):
|
||||
pass
|
||||
|
||||
self.stubs.Set(sched, '_decrypt_blob',
|
||||
fake_decrypt_blob_returns_local_info)
|
||||
self.stubs.Set(driver, 'cast_to_compute_host',
|
||||
fake_cast_to_compute_host)
|
||||
self.stubs.Set(compute_api.API,
|
||||
'create_db_entry_for_new_instance',
|
||||
fake_create_db_entry_for_new_instance)
|
||||
self.stubs.Set(rpc, 'cast', fake_rpc_cast)
|
||||
|
||||
build_plan_item = {'blob': "Non-None blob data"}
|
||||
request_spec = {'image': {}, 'instance_properties': {}}
|
||||
|
||||
sched._provision_resource_from_blob(None, build_plan_item, 1,
|
||||
sched._provision_resource_from_blob(None, build_plan_item,
|
||||
request_spec, {})
|
||||
self.assertTrue(was_called)
|
||||
|
||||
@ -335,7 +337,7 @@ class AbstractSchedulerTestCase(test.TestCase):
|
||||
|
||||
request_spec = {'blob': "Non-None blob data"}
|
||||
|
||||
sched._provision_resource_from_blob(None, request_spec, 1,
|
||||
sched._provision_resource_from_blob(None, request_spec,
|
||||
request_spec, {})
|
||||
self.assertTrue(was_called)
|
||||
|
||||
@ -352,7 +354,7 @@ class AbstractSchedulerTestCase(test.TestCase):
|
||||
|
||||
request_spec = {'child_blob': True, 'child_zone': True}
|
||||
|
||||
sched._provision_resource_from_blob(None, request_spec, 1,
|
||||
sched._provision_resource_from_blob(None, request_spec,
|
||||
request_spec, {})
|
||||
self.assertTrue(was_called)
|
||||
|
||||
@ -386,7 +388,7 @@ class AbstractSchedulerTestCase(test.TestCase):
|
||||
zm.service_states = {}
|
||||
sched.set_zone_manager(zm)
|
||||
|
||||
fake_context = {}
|
||||
fake_context = context.RequestContext('user', 'project')
|
||||
build_plan = sched.select(fake_context,
|
||||
{'instance_type': {'memory_mb': 512},
|
||||
'num_instances': 4})
|
||||
@ -394,6 +396,45 @@ class AbstractSchedulerTestCase(test.TestCase):
|
||||
# 0 from local zones, 12 from remotes
|
||||
self.assertEqual(12, len(build_plan))
|
||||
|
||||
def test_run_instance_non_admin(self):
|
||||
"""Test creating an instance locally using run_instance, passing
|
||||
a non-admin context. DB actions should work."""
|
||||
sched = FakeAbstractScheduler()
|
||||
|
||||
def fake_cast_to_compute_host(*args, **kwargs):
|
||||
pass
|
||||
|
||||
def fake_zone_get_all_zero(context):
|
||||
# make sure this is called with admin context, even though
|
||||
# we're using user context below
|
||||
self.assertTrue(context.is_admin)
|
||||
return []
|
||||
|
||||
self.stubs.Set(driver, 'cast_to_compute_host',
|
||||
fake_cast_to_compute_host)
|
||||
self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
|
||||
self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all_zero)
|
||||
|
||||
zm = FakeZoneManager()
|
||||
sched.set_zone_manager(zm)
|
||||
|
||||
fake_context = context.RequestContext('user', 'project')
|
||||
|
||||
request_spec = {
|
||||
'image': {'properties': {}},
|
||||
'security_group': [],
|
||||
'instance_properties': {
|
||||
'project_id': fake_context.project_id,
|
||||
'user_id': fake_context.user_id},
|
||||
'instance_type': {'memory_mb': 256},
|
||||
'filter_driver': 'nova.scheduler.host_filter.AllHostsFilter'
|
||||
}
|
||||
|
||||
instances = sched.schedule_run_instance(fake_context, request_spec)
|
||||
self.assertEqual(len(instances), 1)
|
||||
self.assertFalse(instances[0].get('_is_precooked', False))
|
||||
nova.db.instance_destroy(fake_context, instances[0]['id'])
|
||||
|
||||
|
||||
class BaseSchedulerTestCase(test.TestCase):
|
||||
"""Test case for Base Scheduler."""
|
||||
|
@ -134,7 +134,7 @@ class LeastCostSchedulerTestCase(test.TestCase):
|
||||
|
||||
expected = []
|
||||
for idx, (hostname, services) in enumerate(hosts):
|
||||
caps = copy.deepcopy(services["compute"])
|
||||
caps = copy.deepcopy(services)
|
||||
# Costs are normalized so over 10 hosts, each host with increasing
|
||||
# free ram will cost 1/N more. Since the lowest cost host has some
|
||||
# free ram, we add in the 1/N for the base_cost
|
||||
|
@ -35,10 +35,13 @@ from nova import service
|
||||
from nova import test
|
||||
from nova import rpc
|
||||
from nova import utils
|
||||
from nova.db.sqlalchemy import models
|
||||
from nova.scheduler import api
|
||||
from nova.scheduler import driver
|
||||
from nova.scheduler import manager
|
||||
from nova.scheduler import multi
|
||||
from nova.scheduler.simple import SimpleScheduler
|
||||
from nova.scheduler.zone import ZoneScheduler
|
||||
from nova.compute import power_state
|
||||
from nova.compute import vm_states
|
||||
|
||||
@ -53,17 +56,86 @@ FAKE_UUID_NOT_FOUND = 'ffffffff-ffff-ffff-ffff-ffffffffffff'
|
||||
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
|
||||
|
||||
|
||||
class FakeContext(object):
|
||||
auth_token = None
|
||||
def _create_instance_dict(**kwargs):
|
||||
"""Create a dictionary for a test instance"""
|
||||
inst = {}
|
||||
# NOTE(jk0): If an integer is passed as the image_ref, the image
|
||||
# service will use the default image service (in this case, the fake).
|
||||
inst['image_ref'] = '1'
|
||||
inst['reservation_id'] = 'r-fakeres'
|
||||
inst['user_id'] = kwargs.get('user_id', 'admin')
|
||||
inst['project_id'] = kwargs.get('project_id', 'fake')
|
||||
inst['instance_type_id'] = '1'
|
||||
if 'host' in kwargs:
|
||||
inst['host'] = kwargs.get('host')
|
||||
inst['vcpus'] = kwargs.get('vcpus', 1)
|
||||
inst['memory_mb'] = kwargs.get('memory_mb', 20)
|
||||
inst['local_gb'] = kwargs.get('local_gb', 30)
|
||||
inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE)
|
||||
inst['power_state'] = kwargs.get('power_state', power_state.RUNNING)
|
||||
inst['task_state'] = kwargs.get('task_state', None)
|
||||
inst['availability_zone'] = kwargs.get('availability_zone', None)
|
||||
inst['ami_launch_index'] = 0
|
||||
inst['launched_on'] = kwargs.get('launched_on', 'dummy')
|
||||
return inst
|
||||
|
||||
|
||||
def _create_volume():
|
||||
"""Create a test volume"""
|
||||
vol = {}
|
||||
vol['size'] = 1
|
||||
vol['availability_zone'] = 'test'
|
||||
ctxt = context.get_admin_context()
|
||||
return db.volume_create(ctxt, vol)['id']
|
||||
|
||||
|
||||
def _create_instance(**kwargs):
|
||||
"""Create a test instance"""
|
||||
ctxt = context.get_admin_context()
|
||||
return db.instance_create(ctxt, _create_instance_dict(**kwargs))
|
||||
|
||||
|
||||
def _create_instance_from_spec(spec):
|
||||
return _create_instance(**spec['instance_properties'])
|
||||
|
||||
|
||||
def _create_request_spec(**kwargs):
|
||||
return dict(instance_properties=_create_instance_dict(**kwargs))
|
||||
|
||||
|
||||
def _fake_cast_to_compute_host(context, host, method, **kwargs):
|
||||
global _picked_host
|
||||
_picked_host = host
|
||||
|
||||
|
||||
def _fake_cast_to_volume_host(context, host, method, **kwargs):
|
||||
global _picked_host
|
||||
_picked_host = host
|
||||
|
||||
|
||||
def _fake_create_instance_db_entry(simple_self, context, request_spec):
|
||||
instance = _create_instance_from_spec(request_spec)
|
||||
global instance_ids
|
||||
instance_ids.append(instance['id'])
|
||||
return instance
|
||||
|
||||
|
||||
class FakeContext(context.RequestContext):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(FakeContext, self).__init__('user', 'project', **kwargs)
|
||||
|
||||
|
||||
class TestDriver(driver.Scheduler):
|
||||
"""Scheduler Driver for Tests"""
|
||||
def schedule(context, topic, *args, **kwargs):
|
||||
return 'fallback_host'
|
||||
def schedule(self, context, topic, method, *args, **kwargs):
|
||||
host = 'fallback_host'
|
||||
driver.cast_to_host(context, topic, host, method, **kwargs)
|
||||
|
||||
def schedule_named_method(context, topic, num):
|
||||
return 'named_host'
|
||||
def schedule_named_method(self, context, num=None):
|
||||
topic = 'topic'
|
||||
host = 'named_host'
|
||||
method = 'named_method'
|
||||
driver.cast_to_host(context, topic, host, method, num=num)
|
||||
|
||||
|
||||
class SchedulerTestCase(test.TestCase):
|
||||
@ -89,31 +161,16 @@ class SchedulerTestCase(test.TestCase):
|
||||
|
||||
return db.service_get(ctxt, s_ref['id'])
|
||||
|
||||
def _create_instance(self, **kwargs):
|
||||
"""Create a test instance"""
|
||||
ctxt = context.get_admin_context()
|
||||
inst = {}
|
||||
inst['user_id'] = 'admin'
|
||||
inst['project_id'] = kwargs.get('project_id', 'fake')
|
||||
inst['host'] = kwargs.get('host', 'dummy')
|
||||
inst['vcpus'] = kwargs.get('vcpus', 1)
|
||||
inst['memory_mb'] = kwargs.get('memory_mb', 10)
|
||||
inst['local_gb'] = kwargs.get('local_gb', 20)
|
||||
inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE)
|
||||
inst['power_state'] = kwargs.get('power_state', power_state.RUNNING)
|
||||
inst['task_state'] = kwargs.get('task_state', None)
|
||||
return db.instance_create(ctxt, inst)
|
||||
|
||||
def test_fallback(self):
|
||||
scheduler = manager.SchedulerManager()
|
||||
self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
|
||||
ctxt = context.get_admin_context()
|
||||
rpc.cast(ctxt,
|
||||
'topic.fallback_host',
|
||||
'fake_topic.fallback_host',
|
||||
{'method': 'noexist',
|
||||
'args': {'num': 7}})
|
||||
self.mox.ReplayAll()
|
||||
scheduler.noexist(ctxt, 'topic', num=7)
|
||||
scheduler.noexist(ctxt, 'fake_topic', num=7)
|
||||
|
||||
def test_named_method(self):
|
||||
scheduler = manager.SchedulerManager()
|
||||
@ -173,8 +230,8 @@ class SchedulerTestCase(test.TestCase):
|
||||
scheduler = manager.SchedulerManager()
|
||||
ctxt = context.get_admin_context()
|
||||
s_ref = self._create_compute_service()
|
||||
i_ref1 = self._create_instance(project_id='p-01', host=s_ref['host'])
|
||||
i_ref2 = self._create_instance(project_id='p-02', vcpus=3,
|
||||
i_ref1 = _create_instance(project_id='p-01', host=s_ref['host'])
|
||||
i_ref2 = _create_instance(project_id='p-02', vcpus=3,
|
||||
host=s_ref['host'])
|
||||
|
||||
result = scheduler.show_host_resources(ctxt, s_ref['host'])
|
||||
@ -197,7 +254,10 @@ class ZoneSchedulerTestCase(test.TestCase):
|
||||
"""Test case for zone scheduler"""
|
||||
def setUp(self):
|
||||
super(ZoneSchedulerTestCase, self).setUp()
|
||||
self.flags(scheduler_driver='nova.scheduler.zone.ZoneScheduler')
|
||||
self.flags(
|
||||
scheduler_driver='nova.scheduler.multi.MultiScheduler',
|
||||
compute_scheduler_driver='nova.scheduler.zone.ZoneScheduler',
|
||||
volume_scheduler_driver='nova.scheduler.zone.ZoneScheduler')
|
||||
|
||||
def _create_service_model(self, **kwargs):
|
||||
service = db.sqlalchemy.models.Service()
|
||||
@ -214,7 +274,7 @@ class ZoneSchedulerTestCase(test.TestCase):
|
||||
|
||||
def test_with_two_zones(self):
|
||||
scheduler = manager.SchedulerManager()
|
||||
ctxt = context.get_admin_context()
|
||||
ctxt = context.RequestContext('user', 'project')
|
||||
service_list = [self._create_service_model(id=1,
|
||||
host='host1',
|
||||
zone='zone1'),
|
||||
@ -230,66 +290,53 @@ class ZoneSchedulerTestCase(test.TestCase):
|
||||
self._create_service_model(id=5,
|
||||
host='host5',
|
||||
zone='zone2')]
|
||||
|
||||
request_spec = _create_request_spec(availability_zone='zone1')
|
||||
|
||||
fake_instance = _create_instance_dict(
|
||||
**request_spec['instance_properties'])
|
||||
fake_instance['id'] = 100
|
||||
fake_instance['uuid'] = FAKE_UUID
|
||||
|
||||
self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
|
||||
self.mox.StubOutWithMock(db, 'instance_update')
|
||||
# Assumes we're testing with MultiScheduler
|
||||
compute_sched_driver = scheduler.driver.drivers['compute']
|
||||
self.mox.StubOutWithMock(compute_sched_driver,
|
||||
'create_instance_db_entry')
|
||||
self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
|
||||
|
||||
arg = IgnoreArg()
|
||||
db.service_get_all_by_topic(arg, arg).AndReturn(service_list)
|
||||
self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
|
||||
rpc.cast(ctxt,
|
||||
compute_sched_driver.create_instance_db_entry(arg,
|
||||
request_spec).AndReturn(fake_instance)
|
||||
db.instance_update(arg, 100, {'host': 'host1', 'scheduled_at': arg})
|
||||
rpc.cast(arg,
|
||||
'compute.host1',
|
||||
{'method': 'run_instance',
|
||||
'args': {'instance_id': 'i-ffffffff',
|
||||
'availability_zone': 'zone1'}})
|
||||
'args': {'instance_id': 100}})
|
||||
self.mox.ReplayAll()
|
||||
scheduler.run_instance(ctxt,
|
||||
'compute',
|
||||
instance_id='i-ffffffff',
|
||||
availability_zone='zone1')
|
||||
request_spec=request_spec)
|
||||
|
||||
|
||||
class SimpleDriverTestCase(test.TestCase):
|
||||
"""Test case for simple driver"""
|
||||
def setUp(self):
|
||||
super(SimpleDriverTestCase, self).setUp()
|
||||
simple_scheduler = 'nova.scheduler.simple.SimpleScheduler'
|
||||
self.flags(connection_type='fake',
|
||||
stub_network=True,
|
||||
max_cores=4,
|
||||
max_gigabytes=4,
|
||||
network_manager='nova.network.manager.FlatManager',
|
||||
volume_driver='nova.volume.driver.FakeISCSIDriver',
|
||||
scheduler_driver='nova.scheduler.simple.SimpleScheduler')
|
||||
stub_network=True,
|
||||
max_cores=4,
|
||||
max_gigabytes=4,
|
||||
network_manager='nova.network.manager.FlatManager',
|
||||
volume_driver='nova.volume.driver.FakeISCSIDriver',
|
||||
scheduler_driver='nova.scheduler.multi.MultiScheduler',
|
||||
compute_scheduler_driver=simple_scheduler,
|
||||
volume_scheduler_driver=simple_scheduler)
|
||||
self.scheduler = manager.SchedulerManager()
|
||||
self.context = context.get_admin_context()
|
||||
self.user_id = 'fake'
|
||||
self.project_id = 'fake'
|
||||
|
||||
def _create_instance(self, **kwargs):
|
||||
"""Create a test instance"""
|
||||
inst = {}
|
||||
# NOTE(jk0): If an integer is passed as the image_ref, the image
|
||||
# service will use the default image service (in this case, the fake).
|
||||
inst['image_ref'] = '1'
|
||||
inst['reservation_id'] = 'r-fakeres'
|
||||
inst['user_id'] = self.user_id
|
||||
inst['project_id'] = self.project_id
|
||||
inst['instance_type_id'] = '1'
|
||||
inst['vcpus'] = kwargs.get('vcpus', 1)
|
||||
inst['ami_launch_index'] = 0
|
||||
inst['availability_zone'] = kwargs.get('availability_zone', None)
|
||||
inst['host'] = kwargs.get('host', 'dummy')
|
||||
inst['memory_mb'] = kwargs.get('memory_mb', 20)
|
||||
inst['local_gb'] = kwargs.get('local_gb', 30)
|
||||
inst['launched_on'] = kwargs.get('launghed_on', 'dummy')
|
||||
inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE)
|
||||
inst['task_state'] = kwargs.get('task_state', None)
|
||||
inst['power_state'] = kwargs.get('power_state', power_state.RUNNING)
|
||||
return db.instance_create(self.context, inst)['id']
|
||||
|
||||
def _create_volume(self):
|
||||
"""Create a test volume"""
|
||||
vol = {}
|
||||
vol['size'] = 1
|
||||
vol['availability_zone'] = 'test'
|
||||
return db.volume_create(self.context, vol)['id']
|
||||
|
||||
def _create_compute_service(self, **kwargs):
|
||||
"""Create a compute service."""
|
||||
@ -369,14 +416,30 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
compute2.start()
|
||||
instance_id1 = self._create_instance()
|
||||
compute1.run_instance(self.context, instance_id1)
|
||||
instance_id2 = self._create_instance()
|
||||
host = self.scheduler.driver.schedule_run_instance(self.context,
|
||||
instance_id2)
|
||||
self.assertEqual(host, 'host2')
|
||||
compute1.terminate_instance(self.context, instance_id1)
|
||||
db.instance_destroy(self.context, instance_id2)
|
||||
|
||||
global instance_ids
|
||||
instance_ids = []
|
||||
instance_ids.append(_create_instance()['id'])
|
||||
compute1.run_instance(self.context, instance_ids[0])
|
||||
|
||||
self.stubs.Set(SimpleScheduler,
|
||||
'create_instance_db_entry', _fake_create_instance_db_entry)
|
||||
global _picked_host
|
||||
_picked_host = None
|
||||
self.stubs.Set(driver,
|
||||
'cast_to_compute_host', _fake_cast_to_compute_host)
|
||||
|
||||
request_spec = _create_request_spec()
|
||||
instances = self.scheduler.driver.schedule_run_instance(
|
||||
self.context, request_spec)
|
||||
|
||||
self.assertEqual(_picked_host, 'host2')
|
||||
self.assertEqual(len(instance_ids), 2)
|
||||
self.assertEqual(len(instances), 1)
|
||||
self.assertEqual(instances[0].get('_is_precooked', False), False)
|
||||
|
||||
compute1.terminate_instance(self.context, instance_ids[0])
|
||||
compute2.terminate_instance(self.context, instance_ids[1])
|
||||
compute1.kill()
|
||||
compute2.kill()
|
||||
|
||||
@ -392,14 +455,27 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
compute2.start()
|
||||
instance_id1 = self._create_instance()
|
||||
compute1.run_instance(self.context, instance_id1)
|
||||
instance_id2 = self._create_instance(availability_zone='nova:host1')
|
||||
host = self.scheduler.driver.schedule_run_instance(self.context,
|
||||
instance_id2)
|
||||
self.assertEqual('host1', host)
|
||||
compute1.terminate_instance(self.context, instance_id1)
|
||||
db.instance_destroy(self.context, instance_id2)
|
||||
|
||||
global instance_ids
|
||||
instance_ids = []
|
||||
instance_ids.append(_create_instance()['id'])
|
||||
compute1.run_instance(self.context, instance_ids[0])
|
||||
|
||||
self.stubs.Set(SimpleScheduler,
|
||||
'create_instance_db_entry', _fake_create_instance_db_entry)
|
||||
global _picked_host
|
||||
_picked_host = None
|
||||
self.stubs.Set(driver,
|
||||
'cast_to_compute_host', _fake_cast_to_compute_host)
|
||||
|
||||
request_spec = _create_request_spec(availability_zone='nova:host1')
|
||||
instances = self.scheduler.driver.schedule_run_instance(
|
||||
self.context, request_spec)
|
||||
self.assertEqual(_picked_host, 'host1')
|
||||
self.assertEqual(len(instance_ids), 2)
|
||||
|
||||
compute1.terminate_instance(self.context, instance_ids[0])
|
||||
compute1.terminate_instance(self.context, instance_ids[1])
|
||||
compute1.kill()
|
||||
compute2.kill()
|
||||
|
||||
@ -414,12 +490,21 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2)
|
||||
past = now - delta
|
||||
db.service_update(self.context, s1['id'], {'updated_at': past})
|
||||
instance_id2 = self._create_instance(availability_zone='nova:host1')
|
||||
|
||||
global instance_ids
|
||||
instance_ids = []
|
||||
self.stubs.Set(SimpleScheduler,
|
||||
'create_instance_db_entry', _fake_create_instance_db_entry)
|
||||
global _picked_host
|
||||
_picked_host = None
|
||||
self.stubs.Set(driver,
|
||||
'cast_to_compute_host', _fake_cast_to_compute_host)
|
||||
|
||||
request_spec = _create_request_spec(availability_zone='nova:host1')
|
||||
self.assertRaises(driver.WillNotSchedule,
|
||||
self.scheduler.driver.schedule_run_instance,
|
||||
self.context,
|
||||
instance_id2)
|
||||
db.instance_destroy(self.context, instance_id2)
|
||||
request_spec)
|
||||
compute1.kill()
|
||||
|
||||
def test_will_schedule_on_disabled_host_if_specified_no_queue(self):
|
||||
@ -430,11 +515,22 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
compute1.start()
|
||||
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
|
||||
db.service_update(self.context, s1['id'], {'disabled': True})
|
||||
instance_id2 = self._create_instance(availability_zone='nova:host1')
|
||||
host = self.scheduler.driver.schedule_run_instance(self.context,
|
||||
instance_id2)
|
||||
self.assertEqual('host1', host)
|
||||
db.instance_destroy(self.context, instance_id2)
|
||||
|
||||
global instance_ids
|
||||
instance_ids = []
|
||||
self.stubs.Set(SimpleScheduler,
|
||||
'create_instance_db_entry', _fake_create_instance_db_entry)
|
||||
global _picked_host
|
||||
_picked_host = None
|
||||
self.stubs.Set(driver,
|
||||
'cast_to_compute_host', _fake_cast_to_compute_host)
|
||||
|
||||
request_spec = _create_request_spec(availability_zone='nova:host1')
|
||||
instances = self.scheduler.driver.schedule_run_instance(
|
||||
self.context, request_spec)
|
||||
self.assertEqual(_picked_host, 'host1')
|
||||
self.assertEqual(len(instance_ids), 1)
|
||||
compute1.terminate_instance(self.context, instance_ids[0])
|
||||
compute1.kill()
|
||||
|
||||
def test_too_many_cores_no_queue(self):
|
||||
@ -452,17 +548,17 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
instance_ids1 = []
|
||||
instance_ids2 = []
|
||||
for index in xrange(FLAGS.max_cores):
|
||||
instance_id = self._create_instance()
|
||||
instance_id = _create_instance()['id']
|
||||
compute1.run_instance(self.context, instance_id)
|
||||
instance_ids1.append(instance_id)
|
||||
instance_id = self._create_instance()
|
||||
instance_id = _create_instance()['id']
|
||||
compute2.run_instance(self.context, instance_id)
|
||||
instance_ids2.append(instance_id)
|
||||
instance_id = self._create_instance()
|
||||
request_spec = _create_request_spec()
|
||||
self.assertRaises(driver.NoValidHost,
|
||||
self.scheduler.driver.schedule_run_instance,
|
||||
self.context,
|
||||
instance_id)
|
||||
request_spec)
|
||||
for instance_id in instance_ids1:
|
||||
compute1.terminate_instance(self.context, instance_id)
|
||||
for instance_id in instance_ids2:
|
||||
@ -481,13 +577,19 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
'nova-volume',
|
||||
'volume',
|
||||
FLAGS.volume_manager)
|
||||
|
||||
global _picked_host
|
||||
_picked_host = None
|
||||
self.stubs.Set(driver,
|
||||
'cast_to_volume_host', _fake_cast_to_volume_host)
|
||||
|
||||
volume2.start()
|
||||
volume_id1 = self._create_volume()
|
||||
volume_id1 = _create_volume()
|
||||
volume1.create_volume(self.context, volume_id1)
|
||||
volume_id2 = self._create_volume()
|
||||
host = self.scheduler.driver.schedule_create_volume(self.context,
|
||||
volume_id2)
|
||||
self.assertEqual(host, 'host2')
|
||||
volume_id2 = _create_volume()
|
||||
self.scheduler.driver.schedule_create_volume(self.context,
|
||||
volume_id2)
|
||||
self.assertEqual(_picked_host, 'host2')
|
||||
volume1.delete_volume(self.context, volume_id1)
|
||||
db.volume_destroy(self.context, volume_id2)
|
||||
|
||||
@ -514,17 +616,30 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
compute2.kill()
|
||||
|
||||
def test_least_busy_host_gets_instance(self):
|
||||
"""Ensures the host with less cores gets the next one"""
|
||||
"""Ensures the host with less cores gets the next one w/ Simple"""
|
||||
compute1 = self.start_service('compute', host='host1')
|
||||
compute2 = self.start_service('compute', host='host2')
|
||||
instance_id1 = self._create_instance()
|
||||
compute1.run_instance(self.context, instance_id1)
|
||||
instance_id2 = self._create_instance()
|
||||
host = self.scheduler.driver.schedule_run_instance(self.context,
|
||||
instance_id2)
|
||||
self.assertEqual(host, 'host2')
|
||||
compute1.terminate_instance(self.context, instance_id1)
|
||||
db.instance_destroy(self.context, instance_id2)
|
||||
|
||||
global instance_ids
|
||||
instance_ids = []
|
||||
instance_ids.append(_create_instance()['id'])
|
||||
compute1.run_instance(self.context, instance_ids[0])
|
||||
|
||||
self.stubs.Set(SimpleScheduler,
|
||||
'create_instance_db_entry', _fake_create_instance_db_entry)
|
||||
global _picked_host
|
||||
_picked_host = None
|
||||
self.stubs.Set(driver,
|
||||
'cast_to_compute_host', _fake_cast_to_compute_host)
|
||||
|
||||
request_spec = _create_request_spec()
|
||||
instances = self.scheduler.driver.schedule_run_instance(
|
||||
self.context, request_spec)
|
||||
self.assertEqual(_picked_host, 'host2')
|
||||
self.assertEqual(len(instance_ids), 2)
|
||||
|
||||
compute1.terminate_instance(self.context, instance_ids[0])
|
||||
compute2.terminate_instance(self.context, instance_ids[1])
|
||||
compute1.kill()
|
||||
compute2.kill()
|
||||
|
||||
@ -532,41 +647,64 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
"""Ensures if you set availability_zone it launches on that zone"""
|
||||
compute1 = self.start_service('compute', host='host1')
|
||||
compute2 = self.start_service('compute', host='host2')
|
||||
instance_id1 = self._create_instance()
|
||||
compute1.run_instance(self.context, instance_id1)
|
||||
instance_id2 = self._create_instance(availability_zone='nova:host1')
|
||||
host = self.scheduler.driver.schedule_run_instance(self.context,
|
||||
instance_id2)
|
||||
self.assertEqual('host1', host)
|
||||
compute1.terminate_instance(self.context, instance_id1)
|
||||
db.instance_destroy(self.context, instance_id2)
|
||||
|
||||
global instance_ids
|
||||
instance_ids = []
|
||||
instance_ids.append(_create_instance()['id'])
|
||||
compute1.run_instance(self.context, instance_ids[0])
|
||||
|
||||
self.stubs.Set(SimpleScheduler,
|
||||
'create_instance_db_entry', _fake_create_instance_db_entry)
|
||||
global _picked_host
|
||||
_picked_host = None
|
||||
self.stubs.Set(driver,
|
||||
'cast_to_compute_host', _fake_cast_to_compute_host)
|
||||
|
||||
request_spec = _create_request_spec(availability_zone='nova:host1')
|
||||
instances = self.scheduler.driver.schedule_run_instance(
|
||||
self.context, request_spec)
|
||||
self.assertEqual(_picked_host, 'host1')
|
||||
self.assertEqual(len(instance_ids), 2)
|
||||
|
||||
compute1.terminate_instance(self.context, instance_ids[0])
|
||||
compute1.terminate_instance(self.context, instance_ids[1])
|
||||
compute1.kill()
|
||||
compute2.kill()
|
||||
|
||||
def test_wont_sechedule_if_specified_host_is_down(self):
|
||||
def test_wont_schedule_if_specified_host_is_down(self):
|
||||
compute1 = self.start_service('compute', host='host1')
|
||||
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
|
||||
now = utils.utcnow()
|
||||
delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2)
|
||||
past = now - delta
|
||||
db.service_update(self.context, s1['id'], {'updated_at': past})
|
||||
instance_id2 = self._create_instance(availability_zone='nova:host1')
|
||||
request_spec = _create_request_spec(availability_zone='nova:host1')
|
||||
self.assertRaises(driver.WillNotSchedule,
|
||||
self.scheduler.driver.schedule_run_instance,
|
||||
self.context,
|
||||
instance_id2)
|
||||
db.instance_destroy(self.context, instance_id2)
|
||||
request_spec)
|
||||
compute1.kill()
|
||||
|
||||
def test_will_schedule_on_disabled_host_if_specified(self):
|
||||
compute1 = self.start_service('compute', host='host1')
|
||||
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
|
||||
db.service_update(self.context, s1['id'], {'disabled': True})
|
||||
instance_id2 = self._create_instance(availability_zone='nova:host1')
|
||||
host = self.scheduler.driver.schedule_run_instance(self.context,
|
||||
instance_id2)
|
||||
self.assertEqual('host1', host)
|
||||
db.instance_destroy(self.context, instance_id2)
|
||||
|
||||
global instance_ids
|
||||
instance_ids = []
|
||||
self.stubs.Set(SimpleScheduler,
|
||||
'create_instance_db_entry', _fake_create_instance_db_entry)
|
||||
global _picked_host
|
||||
_picked_host = None
|
||||
self.stubs.Set(driver,
|
||||
'cast_to_compute_host', _fake_cast_to_compute_host)
|
||||
|
||||
request_spec = _create_request_spec(availability_zone='nova:host1')
|
||||
instances = self.scheduler.driver.schedule_run_instance(
|
||||
self.context, request_spec)
|
||||
self.assertEqual(_picked_host, 'host1')
|
||||
self.assertEqual(len(instance_ids), 1)
|
||||
compute1.terminate_instance(self.context, instance_ids[0])
|
||||
compute1.kill()
|
||||
|
||||
def test_too_many_cores(self):
|
||||
@ -576,18 +714,30 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
instance_ids1 = []
|
||||
instance_ids2 = []
|
||||
for index in xrange(FLAGS.max_cores):
|
||||
instance_id = self._create_instance()
|
||||
instance_id = _create_instance()['id']
|
||||
compute1.run_instance(self.context, instance_id)
|
||||
instance_ids1.append(instance_id)
|
||||
instance_id = self._create_instance()
|
||||
instance_id = _create_instance()['id']
|
||||
compute2.run_instance(self.context, instance_id)
|
||||
instance_ids2.append(instance_id)
|
||||
instance_id = self._create_instance()
|
||||
|
||||
def _create_instance_db_entry(simple_self, context, request_spec):
|
||||
self.fail(_("Shouldn't try to create DB entry when at "
|
||||
"max cores"))
|
||||
self.stubs.Set(SimpleScheduler,
|
||||
'create_instance_db_entry', _create_instance_db_entry)
|
||||
|
||||
global _picked_host
|
||||
_picked_host = None
|
||||
self.stubs.Set(driver,
|
||||
'cast_to_compute_host', _fake_cast_to_compute_host)
|
||||
|
||||
request_spec = _create_request_spec()
|
||||
|
||||
self.assertRaises(driver.NoValidHost,
|
||||
self.scheduler.driver.schedule_run_instance,
|
||||
self.context,
|
||||
instance_id)
|
||||
db.instance_destroy(self.context, instance_id)
|
||||
request_spec)
|
||||
for instance_id in instance_ids1:
|
||||
compute1.terminate_instance(self.context, instance_id)
|
||||
for instance_id in instance_ids2:
|
||||
@ -599,12 +749,18 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
"""Ensures the host with less gigabytes gets the next one"""
|
||||
volume1 = self.start_service('volume', host='host1')
|
||||
volume2 = self.start_service('volume', host='host2')
|
||||
volume_id1 = self._create_volume()
|
||||
|
||||
global _picked_host
|
||||
_picked_host = None
|
||||
self.stubs.Set(driver,
|
||||
'cast_to_volume_host', _fake_cast_to_volume_host)
|
||||
|
||||
volume_id1 = _create_volume()
|
||||
volume1.create_volume(self.context, volume_id1)
|
||||
volume_id2 = self._create_volume()
|
||||
host = self.scheduler.driver.schedule_create_volume(self.context,
|
||||
volume_id2)
|
||||
self.assertEqual(host, 'host2')
|
||||
volume_id2 = _create_volume()
|
||||
self.scheduler.driver.schedule_create_volume(self.context,
|
||||
volume_id2)
|
||||
self.assertEqual(_picked_host, 'host2')
|
||||
volume1.delete_volume(self.context, volume_id1)
|
||||
db.volume_destroy(self.context, volume_id2)
|
||||
volume1.kill()
|
||||
@ -617,13 +773,13 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
volume_ids1 = []
|
||||
volume_ids2 = []
|
||||
for index in xrange(FLAGS.max_gigabytes):
|
||||
volume_id = self._create_volume()
|
||||
volume_id = _create_volume()
|
||||
volume1.create_volume(self.context, volume_id)
|
||||
volume_ids1.append(volume_id)
|
||||
volume_id = self._create_volume()
|
||||
volume_id = _create_volume()
|
||||
volume2.create_volume(self.context, volume_id)
|
||||
volume_ids2.append(volume_id)
|
||||
volume_id = self._create_volume()
|
||||
volume_id = _create_volume()
|
||||
self.assertRaises(driver.NoValidHost,
|
||||
self.scheduler.driver.schedule_create_volume,
|
||||
self.context,
|
||||
@ -636,13 +792,13 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
volume2.kill()
|
||||
|
||||
def test_scheduler_live_migration_with_volume(self):
|
||||
"""scheduler_live_migration() works correctly as expected.
|
||||
"""schedule_live_migration() works correctly as expected.
|
||||
|
||||
Also, checks instance state is changed from 'running' -> 'migrating'.
|
||||
|
||||
"""
|
||||
|
||||
instance_id = self._create_instance()
|
||||
instance_id = _create_instance(host='dummy')['id']
|
||||
i_ref = db.instance_get(self.context, instance_id)
|
||||
dic = {'instance_id': instance_id, 'size': 1}
|
||||
v_ref = db.volume_create(self.context, dic)
|
||||
@ -680,7 +836,8 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
def test_live_migration_src_check_instance_not_running(self):
|
||||
"""The instance given by instance_id is not running."""
|
||||
|
||||
instance_id = self._create_instance(power_state=power_state.NOSTATE)
|
||||
instance_id = _create_instance(
|
||||
power_state=power_state.NOSTATE)['id']
|
||||
i_ref = db.instance_get(self.context, instance_id)
|
||||
|
||||
try:
|
||||
@ -695,7 +852,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
def test_live_migration_src_check_volume_node_not_alive(self):
|
||||
"""Raise exception when volume node is not alive."""
|
||||
|
||||
instance_id = self._create_instance()
|
||||
instance_id = _create_instance()['id']
|
||||
i_ref = db.instance_get(self.context, instance_id)
|
||||
dic = {'instance_id': instance_id, 'size': 1}
|
||||
v_ref = db.volume_create(self.context, {'instance_id': instance_id,
|
||||
@ -715,7 +872,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
|
||||
def test_live_migration_src_check_compute_node_not_alive(self):
|
||||
"""Confirms src-compute node is alive."""
|
||||
instance_id = self._create_instance()
|
||||
instance_id = _create_instance()['id']
|
||||
i_ref = db.instance_get(self.context, instance_id)
|
||||
t = utils.utcnow() - datetime.timedelta(10)
|
||||
s_ref = self._create_compute_service(created_at=t, updated_at=t,
|
||||
@ -730,7 +887,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
|
||||
def test_live_migration_src_check_works_correctly(self):
|
||||
"""Confirms this method finishes with no error."""
|
||||
instance_id = self._create_instance()
|
||||
instance_id = _create_instance()['id']
|
||||
i_ref = db.instance_get(self.context, instance_id)
|
||||
s_ref = self._create_compute_service(host=i_ref['host'])
|
||||
|
||||
@ -743,7 +900,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
|
||||
def test_live_migration_dest_check_not_alive(self):
|
||||
"""Confirms exception raises in case dest host does not exist."""
|
||||
instance_id = self._create_instance()
|
||||
instance_id = _create_instance()['id']
|
||||
i_ref = db.instance_get(self.context, instance_id)
|
||||
t = utils.utcnow() - datetime.timedelta(10)
|
||||
s_ref = self._create_compute_service(created_at=t, updated_at=t,
|
||||
@ -758,7 +915,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
|
||||
def test_live_migration_dest_check_service_same_host(self):
|
||||
"""Confirms exceptioin raises in case dest and src is same host."""
|
||||
instance_id = self._create_instance()
|
||||
instance_id = _create_instance()['id']
|
||||
i_ref = db.instance_get(self.context, instance_id)
|
||||
s_ref = self._create_compute_service(host=i_ref['host'])
|
||||
|
||||
@ -771,9 +928,9 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
|
||||
def test_live_migration_dest_check_service_lack_memory(self):
|
||||
"""Confirms exception raises when dest doesn't have enough memory."""
|
||||
instance_id = self._create_instance()
|
||||
instance_id2 = self._create_instance(host='somewhere',
|
||||
memory_mb=12)
|
||||
instance_id = _create_instance()['id']
|
||||
instance_id2 = _create_instance(host='somewhere',
|
||||
memory_mb=12)['id']
|
||||
i_ref = db.instance_get(self.context, instance_id)
|
||||
s_ref = self._create_compute_service(host='somewhere')
|
||||
|
||||
@ -787,9 +944,9 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
|
||||
def test_block_migration_dest_check_service_lack_disk(self):
|
||||
"""Confirms exception raises when dest doesn't have enough disk."""
|
||||
instance_id = self._create_instance()
|
||||
instance_id2 = self._create_instance(host='somewhere',
|
||||
local_gb=70)
|
||||
instance_id = _create_instance()['id']
|
||||
instance_id2 = _create_instance(host='somewhere',
|
||||
local_gb=70)['id']
|
||||
i_ref = db.instance_get(self.context, instance_id)
|
||||
s_ref = self._create_compute_service(host='somewhere')
|
||||
|
||||
@ -803,7 +960,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
|
||||
def test_live_migration_dest_check_service_works_correctly(self):
|
||||
"""Confirms method finishes with no error."""
|
||||
instance_id = self._create_instance()
|
||||
instance_id = _create_instance()['id']
|
||||
i_ref = db.instance_get(self.context, instance_id)
|
||||
s_ref = self._create_compute_service(host='somewhere',
|
||||
memory_mb_used=5)
|
||||
@ -821,7 +978,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
|
||||
dest = 'dummydest'
|
||||
# mocks for live_migration_common_check()
|
||||
instance_id = self._create_instance()
|
||||
instance_id = _create_instance()['id']
|
||||
i_ref = db.instance_get(self.context, instance_id)
|
||||
t1 = utils.utcnow() - datetime.timedelta(10)
|
||||
s_ref = self._create_compute_service(created_at=t1, updated_at=t1,
|
||||
@ -855,7 +1012,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
def test_live_migration_common_check_service_different_hypervisor(self):
|
||||
"""Original host and dest host has different hypervisor type."""
|
||||
dest = 'dummydest'
|
||||
instance_id = self._create_instance()
|
||||
instance_id = _create_instance(host='dummy')['id']
|
||||
i_ref = db.instance_get(self.context, instance_id)
|
||||
|
||||
# compute service for destination
|
||||
@ -880,7 +1037,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
def test_live_migration_common_check_service_different_version(self):
|
||||
"""Original host and dest host has different hypervisor version."""
|
||||
dest = 'dummydest'
|
||||
instance_id = self._create_instance()
|
||||
instance_id = _create_instance(host='dummy')['id']
|
||||
i_ref = db.instance_get(self.context, instance_id)
|
||||
|
||||
# compute service for destination
|
||||
@ -904,10 +1061,10 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
db.service_destroy(self.context, s_ref2['id'])
|
||||
|
||||
def test_live_migration_common_check_checking_cpuinfo_fail(self):
|
||||
"""Raise excetion when original host doen't have compatible cpu."""
|
||||
"""Raise exception when original host doesn't have compatible cpu."""
|
||||
|
||||
dest = 'dummydest'
|
||||
instance_id = self._create_instance()
|
||||
instance_id = _create_instance(host='dummy')['id']
|
||||
i_ref = db.instance_get(self.context, instance_id)
|
||||
|
||||
# compute service for destination
|
||||
@ -927,7 +1084,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
|
||||
self.mox.ReplayAll()
|
||||
try:
|
||||
self.scheduler.driver._live_migration_common_check(self.context,
|
||||
driver._live_migration_common_check(self.context,
|
||||
i_ref,
|
||||
dest,
|
||||
False)
|
||||
@ -1021,7 +1178,6 @@ class FakeResource(object):
|
||||
class ZoneRedirectTest(test.TestCase):
|
||||
def setUp(self):
|
||||
super(ZoneRedirectTest, self).setUp()
|
||||
self.stubs = stubout.StubOutForTesting()
|
||||
|
||||
self.stubs.Set(db, 'zone_get_all', zone_get_all)
|
||||
self.stubs.Set(db, 'instance_get_by_uuid',
|
||||
@ -1029,7 +1185,6 @@ class ZoneRedirectTest(test.TestCase):
|
||||
self.flags(enable_zone_routing=True)
|
||||
|
||||
def tearDown(self):
|
||||
self.stubs.UnsetAll()
|
||||
super(ZoneRedirectTest, self).tearDown()
|
||||
|
||||
def test_trap_found_locally(self):
|
||||
@ -1257,12 +1412,10 @@ class FakeNovaClientZones(object):
|
||||
class CallZoneMethodTest(test.TestCase):
|
||||
def setUp(self):
|
||||
super(CallZoneMethodTest, self).setUp()
|
||||
self.stubs = stubout.StubOutForTesting()
|
||||
self.stubs.Set(db, 'zone_get_all', zone_get_all)
|
||||
self.stubs.Set(novaclient, 'Client', FakeNovaClientZones)
|
||||
|
||||
def tearDown(self):
|
||||
self.stubs.UnsetAll()
|
||||
super(CallZoneMethodTest, self).tearDown()
|
||||
|
||||
def test_call_zone_method(self):
|
||||
|
@ -22,6 +22,7 @@ from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import rpc
|
||||
from nova import test
|
||||
from nova import utils
|
||||
from nova.volume import volume_types
|
||||
@ -37,6 +38,10 @@ scheduled_volume = {}
|
||||
global_volume = {}
|
||||
|
||||
|
||||
def fake_rpc_cast(*args, **kwargs):
|
||||
pass
|
||||
|
||||
|
||||
class FakeVsaLeastUsedScheduler(
|
||||
vsa_sched.VsaSchedulerLeastUsedHost):
|
||||
# No need to stub anything at the moment
|
||||
@ -170,12 +175,10 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
LOG.debug(_("Test: provision vol %(name)s on host %(host)s"),
|
||||
locals())
|
||||
LOG.debug(_("\t vol=%(vol)s"), locals())
|
||||
pass
|
||||
|
||||
def _fake_vsa_update(self, context, vsa_id, values):
|
||||
LOG.debug(_("Test: VSA update request: vsa_id=%(vsa_id)s "\
|
||||
"values=%(values)s"), locals())
|
||||
pass
|
||||
|
||||
def _fake_volume_create(self, context, options):
|
||||
LOG.debug(_("Test: Volume create: %s"), options)
|
||||
@ -196,7 +199,6 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
"values=%(values)s"), locals())
|
||||
global scheduled_volume
|
||||
scheduled_volume = {'id': volume_id, 'host': values['host']}
|
||||
pass
|
||||
|
||||
def _fake_service_get_by_args(self, context, host, binary):
|
||||
return "service"
|
||||
@ -209,7 +211,6 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
|
||||
def setUp(self, sched_class=None):
|
||||
super(VsaSchedulerTestCase, self).setUp()
|
||||
self.stubs = stubout.StubOutForTesting()
|
||||
self.context = context.get_admin_context()
|
||||
|
||||
if sched_class is None:
|
||||
@ -220,6 +221,7 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
self.host_num = 10
|
||||
self.drive_type_num = 5
|
||||
|
||||
self.stubs.Set(rpc, 'cast', fake_rpc_cast)
|
||||
self.stubs.Set(self.sched,
|
||||
'_get_service_states', self._fake_get_service_states)
|
||||
self.stubs.Set(self.sched,
|
||||
@ -234,8 +236,6 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
def tearDown(self):
|
||||
for name in self.created_types_lst:
|
||||
volume_types.purge(self.context, name)
|
||||
|
||||
self.stubs.UnsetAll()
|
||||
super(VsaSchedulerTestCase, self).tearDown()
|
||||
|
||||
def test_vsa_sched_create_volumes_simple(self):
|
||||
@ -333,6 +333,8 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
self.stubs.Set(self.sched,
|
||||
'_get_service_states', self._fake_get_service_states)
|
||||
self.stubs.Set(nova.db, 'volume_create', self._fake_volume_create)
|
||||
self.stubs.Set(nova.db, 'volume_update', self._fake_volume_update)
|
||||
self.stubs.Set(rpc, 'cast', fake_rpc_cast)
|
||||
|
||||
self.sched.schedule_create_volumes(self.context,
|
||||
request_spec,
|
||||
@ -467,10 +469,9 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
self.stubs.Set(self.sched,
|
||||
'service_is_up', self._fake_service_is_up_True)
|
||||
|
||||
host = self.sched.schedule_create_volume(self.context,
|
||||
123, availability_zone=None)
|
||||
self.sched.schedule_create_volume(self.context,
|
||||
123, availability_zone=None)
|
||||
|
||||
self.assertEqual(host, 'host_3')
|
||||
self.assertEqual(scheduled_volume['id'], 123)
|
||||
self.assertEqual(scheduled_volume['host'], 'host_3')
|
||||
|
||||
@ -514,10 +515,9 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
global_volume['volume_type_id'] = volume_type['id']
|
||||
global_volume['size'] = 0
|
||||
|
||||
host = self.sched.schedule_create_volume(self.context,
|
||||
123, availability_zone=None)
|
||||
self.sched.schedule_create_volume(self.context,
|
||||
123, availability_zone=None)
|
||||
|
||||
self.assertEqual(host, 'host_2')
|
||||
self.assertEqual(scheduled_volume['id'], 123)
|
||||
self.assertEqual(scheduled_volume['host'], 'host_2')
|
||||
|
||||
@ -529,7 +529,6 @@ class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase):
|
||||
FakeVsaMostAvailCapacityScheduler())
|
||||
|
||||
def tearDown(self):
|
||||
self.stubs.UnsetAll()
|
||||
super(VsaSchedulerTestCaseMostAvail, self).tearDown()
|
||||
|
||||
def test_vsa_sched_create_single_volume(self):
|
||||
@ -558,10 +557,9 @@ class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase):
|
||||
global_volume['volume_type_id'] = volume_type['id']
|
||||
global_volume['size'] = 0
|
||||
|
||||
host = self.sched.schedule_create_volume(self.context,
|
||||
123, availability_zone=None)
|
||||
self.sched.schedule_create_volume(self.context,
|
||||
123, availability_zone=None)
|
||||
|
||||
self.assertEqual(host, 'host_9')
|
||||
self.assertEqual(scheduled_volume['id'], 123)
|
||||
self.assertEqual(scheduled_volume['host'], 'host_9')
|
||||
|
||||
|
@ -28,6 +28,7 @@ from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova.scheduler import driver as scheduler_driver
|
||||
from nova import rpc
|
||||
from nova import test
|
||||
from nova import utils
|
||||
@ -56,6 +57,38 @@ class FakeTime(object):
|
||||
self.counter += t
|
||||
|
||||
|
||||
orig_rpc_call = rpc.call
|
||||
orig_rpc_cast = rpc.cast
|
||||
|
||||
|
||||
def rpc_call_wrapper(context, topic, msg, do_cast=True):
|
||||
"""Stub out the scheduler creating the instance entry"""
|
||||
if topic == FLAGS.scheduler_topic and \
|
||||
msg['method'] == 'run_instance':
|
||||
request_spec = msg['args']['request_spec']
|
||||
scheduler = scheduler_driver.Scheduler
|
||||
num_instances = request_spec.get('num_instances', 1)
|
||||
instances = []
|
||||
for x in xrange(num_instances):
|
||||
instance = scheduler().create_instance_db_entry(
|
||||
context, request_spec)
|
||||
encoded = scheduler_driver.encode_instance(instance)
|
||||
instances.append(encoded)
|
||||
return instances
|
||||
else:
|
||||
if do_cast:
|
||||
orig_rpc_cast(context, topic, msg)
|
||||
else:
|
||||
return orig_rpc_call(context, topic, msg)
|
||||
|
||||
|
||||
def rpc_cast_wrapper(context, topic, msg):
|
||||
"""Stub out the scheduler creating the instance entry in
|
||||
the reservation_id case.
|
||||
"""
|
||||
rpc_call_wrapper(context, topic, msg, do_cast=True)
|
||||
|
||||
|
||||
def nop_report_driver_status(self):
|
||||
pass
|
||||
|
||||
@ -80,6 +113,8 @@ class ComputeTestCase(test.TestCase):
|
||||
'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
|
||||
|
||||
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
|
||||
self.stubs.Set(rpc, 'call', rpc_call_wrapper)
|
||||
self.stubs.Set(rpc, 'cast', rpc_cast_wrapper)
|
||||
|
||||
def _create_instance(self, params=None):
|
||||
"""Create a test instance"""
|
||||
@ -142,7 +177,7 @@ class ComputeTestCase(test.TestCase):
|
||||
"""Verify that an instance cannot be created without a display_name."""
|
||||
cases = [dict(), dict(display_name=None)]
|
||||
for instance in cases:
|
||||
ref = self.compute_api.create(self.context,
|
||||
(ref, resv_id) = self.compute_api.create(self.context,
|
||||
instance_types.get_default_instance_type(), None, **instance)
|
||||
try:
|
||||
self.assertNotEqual(ref[0]['display_name'], None)
|
||||
@ -152,7 +187,7 @@ class ComputeTestCase(test.TestCase):
|
||||
def test_create_instance_associates_security_groups(self):
|
||||
"""Make sure create associates security groups"""
|
||||
group = self._create_group()
|
||||
ref = self.compute_api.create(
|
||||
(ref, resv_id) = self.compute_api.create(
|
||||
self.context,
|
||||
instance_type=instance_types.get_default_instance_type(),
|
||||
image_href=None,
|
||||
@ -212,7 +247,7 @@ class ComputeTestCase(test.TestCase):
|
||||
('<}\x1fh\x10e\x08l\x02l\x05o\x12!{>', 'hello'),
|
||||
('hello_server', 'hello-server')]
|
||||
for display_name, hostname in cases:
|
||||
ref = self.compute_api.create(self.context,
|
||||
(ref, resv_id) = self.compute_api.create(self.context,
|
||||
instance_types.get_default_instance_type(), None,
|
||||
display_name=display_name)
|
||||
try:
|
||||
@ -224,7 +259,7 @@ class ComputeTestCase(test.TestCase):
|
||||
"""Make sure destroying disassociates security groups"""
|
||||
group = self._create_group()
|
||||
|
||||
ref = self.compute_api.create(
|
||||
(ref, resv_id) = self.compute_api.create(
|
||||
self.context,
|
||||
instance_type=instance_types.get_default_instance_type(),
|
||||
image_href=None,
|
||||
@ -240,7 +275,7 @@ class ComputeTestCase(test.TestCase):
|
||||
"""Make sure destroying security groups disassociates instances"""
|
||||
group = self._create_group()
|
||||
|
||||
ref = self.compute_api.create(
|
||||
(ref, resv_id) = self.compute_api.create(
|
||||
self.context,
|
||||
instance_type=instance_types.get_default_instance_type(),
|
||||
image_href=None,
|
||||
@ -1398,6 +1433,84 @@ class ComputeTestCase(test.TestCase):
|
||||
'swap'),
|
||||
swap_size)
|
||||
|
||||
def test_reservation_id_one_instance(self):
|
||||
"""Verify building an instance has a reservation_id that
|
||||
matches return value from create"""
|
||||
(refs, resv_id) = self.compute_api.create(self.context,
|
||||
instance_types.get_default_instance_type(), None)
|
||||
try:
|
||||
self.assertEqual(len(refs), 1)
|
||||
self.assertEqual(refs[0]['reservation_id'], resv_id)
|
||||
finally:
|
||||
db.instance_destroy(self.context, refs[0]['id'])
|
||||
|
||||
def test_reservation_ids_two_instances(self):
|
||||
"""Verify building 2 instances at once results in a
|
||||
reservation_id being returned equal to reservation id set
|
||||
in both instances
|
||||
"""
|
||||
(refs, resv_id) = self.compute_api.create(self.context,
|
||||
instance_types.get_default_instance_type(), None,
|
||||
min_count=2, max_count=2)
|
||||
try:
|
||||
self.assertEqual(len(refs), 2)
|
||||
self.assertNotEqual(resv_id, None)
|
||||
finally:
|
||||
for instance in refs:
|
||||
self.assertEqual(instance['reservation_id'], resv_id)
|
||||
db.instance_destroy(self.context, instance['id'])
|
||||
|
||||
def test_reservation_ids_two_instances_no_wait(self):
|
||||
"""Verify building 2 instances at once without waiting for
|
||||
instance IDs results in a reservation_id being returned equal
|
||||
to reservation id set in both instances
|
||||
"""
|
||||
(refs, resv_id) = self.compute_api.create(self.context,
|
||||
instance_types.get_default_instance_type(), None,
|
||||
min_count=2, max_count=2, wait_for_instances=False)
|
||||
try:
|
||||
self.assertEqual(refs, None)
|
||||
self.assertNotEqual(resv_id, None)
|
||||
finally:
|
||||
instances = self.compute_api.get_all(self.context,
|
||||
search_opts={'reservation_id': resv_id})
|
||||
self.assertEqual(len(instances), 2)
|
||||
for instance in instances:
|
||||
self.assertEqual(instance['reservation_id'], resv_id)
|
||||
db.instance_destroy(self.context, instance['id'])
|
||||
|
||||
def test_create_with_specified_reservation_id(self):
|
||||
"""Verify building instances with a specified
|
||||
reservation_id results in the correct reservation_id
|
||||
being set
|
||||
"""
|
||||
|
||||
# We need admin context to be able to specify our own
|
||||
# reservation_ids.
|
||||
context = self.context.elevated()
|
||||
# 1 instance
|
||||
(refs, resv_id) = self.compute_api.create(context,
|
||||
instance_types.get_default_instance_type(), None,
|
||||
min_count=1, max_count=1, reservation_id='meow')
|
||||
try:
|
||||
self.assertEqual(len(refs), 1)
|
||||
self.assertEqual(resv_id, 'meow')
|
||||
finally:
|
||||
self.assertEqual(refs[0]['reservation_id'], resv_id)
|
||||
db.instance_destroy(self.context, refs[0]['id'])
|
||||
|
||||
# 2 instances
|
||||
(refs, resv_id) = self.compute_api.create(context,
|
||||
instance_types.get_default_instance_type(), None,
|
||||
min_count=2, max_count=2, reservation_id='woof')
|
||||
try:
|
||||
self.assertEqual(len(refs), 2)
|
||||
self.assertEqual(resv_id, 'woof')
|
||||
finally:
|
||||
for instance in refs:
|
||||
self.assertEqual(instance['reservation_id'], resv_id)
|
||||
db.instance_destroy(self.context, instance['id'])
|
||||
|
||||
|
||||
class ComputeTestMinRamMinDisk(test.TestCase):
|
||||
def setUp(self):
|
||||
@ -1405,6 +1518,8 @@ class ComputeTestMinRamMinDisk(test.TestCase):
|
||||
self.compute = utils.import_object(FLAGS.compute_manager)
|
||||
self.compute_api = compute.API()
|
||||
self.context = context.RequestContext('fake', 'fake')
|
||||
self.stubs.Set(rpc, 'call', rpc_call_wrapper)
|
||||
self.stubs.Set(rpc, 'cast', rpc_cast_wrapper)
|
||||
self.fake_image = {
|
||||
'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
|
||||
|
||||
@ -1425,10 +1540,9 @@ class ComputeTestMinRamMinDisk(test.TestCase):
|
||||
|
||||
# Now increase the inst_type memory and make sure all is fine.
|
||||
inst_type['memory_mb'] = 2
|
||||
ref = self.compute_api.create(self.context, inst_type, None)
|
||||
self.assertTrue(ref)
|
||||
|
||||
db.instance_destroy(self.context, ref[0]['id'])
|
||||
(refs, resv_id) = self.compute_api.create(self.context,
|
||||
inst_type, None)
|
||||
db.instance_destroy(self.context, refs[0]['id'])
|
||||
|
||||
def test_create_with_too_little_disk(self):
|
||||
"""Test an instance type with too little disk space"""
|
||||
@ -1447,10 +1561,9 @@ class ComputeTestMinRamMinDisk(test.TestCase):
|
||||
|
||||
# Now increase the inst_type disk space and make sure all is fine.
|
||||
inst_type['local_gb'] = 2
|
||||
ref = self.compute_api.create(self.context, inst_type, None)
|
||||
self.assertTrue(ref)
|
||||
|
||||
db.instance_destroy(self.context, ref[0]['id'])
|
||||
(refs, resv_id) = self.compute_api.create(self.context,
|
||||
inst_type, None)
|
||||
db.instance_destroy(self.context, refs[0]['id'])
|
||||
|
||||
def test_create_just_enough_ram_and_disk(self):
|
||||
"""Test an instance type with just enough ram and disk space"""
|
||||
@ -1466,10 +1579,9 @@ class ComputeTestMinRamMinDisk(test.TestCase):
|
||||
return img
|
||||
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
|
||||
|
||||
ref = self.compute_api.create(self.context, inst_type, None)
|
||||
self.assertTrue(ref)
|
||||
|
||||
db.instance_destroy(self.context, ref[0]['id'])
|
||||
(refs, resv_id) = self.compute_api.create(self.context,
|
||||
inst_type, None)
|
||||
db.instance_destroy(self.context, refs[0]['id'])
|
||||
|
||||
def test_create_with_no_ram_and_disk_reqs(self):
|
||||
"""Test an instance type with no min_ram or min_disk"""
|
||||
@ -1482,7 +1594,6 @@ class ComputeTestMinRamMinDisk(test.TestCase):
|
||||
return copy(self.fake_image)
|
||||
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
|
||||
|
||||
ref = self.compute_api.create(self.context, inst_type, None)
|
||||
self.assertTrue(ref)
|
||||
|
||||
db.instance_destroy(self.context, ref[0]['id'])
|
||||
(refs, resv_id) = self.compute_api.create(self.context,
|
||||
inst_type, None)
|
||||
db.instance_destroy(self.context, refs[0]['id'])
|
||||
|
@ -21,9 +21,11 @@ from nova import context
|
||||
from nova import db
|
||||
from nova import flags
|
||||
from nova import quota
|
||||
from nova import rpc
|
||||
from nova import test
|
||||
from nova import volume
|
||||
from nova.compute import instance_types
|
||||
from nova.scheduler import driver as scheduler_driver
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
@ -51,6 +53,21 @@ class QuotaTestCase(test.TestCase):
|
||||
self.context = context.RequestContext(self.user_id,
|
||||
self.project_id,
|
||||
True)
|
||||
orig_rpc_call = rpc.call
|
||||
|
||||
def rpc_call_wrapper(context, topic, msg):
|
||||
"""Stub out the scheduler creating the instance entry"""
|
||||
if topic == FLAGS.scheduler_topic and \
|
||||
msg['method'] == 'run_instance':
|
||||
scheduler = scheduler_driver.Scheduler
|
||||
instance = scheduler().create_instance_db_entry(
|
||||
context,
|
||||
msg['args']['request_spec'])
|
||||
return [scheduler_driver.encode_instance(instance)]
|
||||
else:
|
||||
return orig_rpc_call(context, topic, msg)
|
||||
|
||||
self.stubs.Set(rpc, 'call', rpc_call_wrapper)
|
||||
|
||||
def _create_instance(self, cores=2):
|
||||
"""Create a test instance"""
|
||||
|
Loading…
x
Reference in New Issue
Block a user