Replacing disk config extension to match spec.
Related to instance-disk-management blueprint. Change-Id: I84689583562f23356064a502577b2924bcbbc460
This commit is contained in:
parent
57ad4de648
commit
84ac4d84ab
189
nova/api/openstack/contrib/disk_config.py
Normal file
189
nova/api/openstack/contrib/disk_config.py
Normal file
@ -0,0 +1,189 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License
|
||||
|
||||
"""Disk Config extension."""
|
||||
|
||||
from webob import exc
|
||||
from xml.dom import minidom
|
||||
|
||||
from nova.api.openstack import extensions
|
||||
from nova.api.openstack import servers
|
||||
from nova.api.openstack import xmlutil
|
||||
from nova import compute
|
||||
from nova import log as logging
|
||||
from nova import utils
|
||||
|
||||
LOG = logging.getLogger('nova.api.openstack.contrib.disk_config')
|
||||
|
||||
ALIAS = 'RAX-DCF'
|
||||
XMLNS_DCF = "http://docs.rackspacecloud.com/servers/api/ext/diskConfig/v1.0"
|
||||
|
||||
|
||||
class ServerDiskConfigTemplate(xmlutil.TemplateBuilder):
|
||||
def construct(self):
|
||||
root = xmlutil.TemplateElement('server')
|
||||
root.set('{%s}diskConfig' % XMLNS_DCF, '%s:diskConfig' % ALIAS)
|
||||
return xmlutil.SlaveTemplate(root, 1, nsmap={ALIAS: XMLNS_DCF})
|
||||
|
||||
|
||||
class ServersDiskConfigTemplate(xmlutil.TemplateBuilder):
|
||||
def construct(self):
|
||||
root = xmlutil.TemplateElement('servers')
|
||||
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
|
||||
elem.set('{%s}diskConfig' % XMLNS_DCF, '%s:diskConfig' % ALIAS)
|
||||
return xmlutil.SlaveTemplate(root, 1, nsmap={ALIAS: XMLNS_DCF})
|
||||
|
||||
|
||||
class ImageDiskConfigTemplate(xmlutil.TemplateBuilder):
|
||||
def construct(self):
|
||||
root = xmlutil.TemplateElement('image')
|
||||
root.set('{%s}diskConfig' % XMLNS_DCF, '%s:diskConfig' % ALIAS)
|
||||
return xmlutil.SlaveTemplate(root, 1, nsmap={ALIAS: XMLNS_DCF})
|
||||
|
||||
|
||||
class ImagesDiskConfigTemplate(xmlutil.TemplateBuilder):
|
||||
def construct(self):
|
||||
root = xmlutil.TemplateElement('images')
|
||||
elem = xmlutil.SubTemplateElement(root, 'image', selector='images')
|
||||
elem.set('{%s}diskConfig' % XMLNS_DCF, '%s:diskConfig' % ALIAS)
|
||||
return xmlutil.SlaveTemplate(root, 1, nsmap={ALIAS: XMLNS_DCF})
|
||||
|
||||
|
||||
def disk_config_to_api(value):
|
||||
return 'AUTO' if value else 'MANUAL'
|
||||
|
||||
|
||||
def disk_config_from_api(value):
|
||||
if value == 'AUTO':
|
||||
return True
|
||||
elif value == 'MANUAL':
|
||||
return False
|
||||
else:
|
||||
msg = _("RAX-DCF:diskConfig must be either 'MANUAL' or 'AUTO'.")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
|
||||
class Disk_config(extensions.ExtensionDescriptor):
|
||||
"""Disk Management Extension"""
|
||||
|
||||
name = "DiskConfig"
|
||||
alias = ALIAS
|
||||
namespace = XMLNS_DCF
|
||||
updated = "2011-09-27:00:00+00:00"
|
||||
|
||||
API_DISK_CONFIG = "%s:diskConfig" % ALIAS
|
||||
INTERNAL_DISK_CONFIG = "auto_disk_config"
|
||||
|
||||
def __init__(self, ext_mgr):
|
||||
super(Disk_config, self).__init__(ext_mgr)
|
||||
self.compute_api = compute.API()
|
||||
|
||||
def _extract_resource_from_body(self, res, body,
|
||||
singular, singular_template, plural, plural_template):
|
||||
"""Returns a list of the given resources from the request body.
|
||||
|
||||
The templates passed in are used for XML serialization.
|
||||
"""
|
||||
template = res.environ.get('nova.template')
|
||||
if plural in body:
|
||||
resources = body[plural]
|
||||
if template:
|
||||
template.attach(plural_template)
|
||||
elif singular in body:
|
||||
resources = [body[singular]]
|
||||
if template:
|
||||
template.attach(singular_template)
|
||||
else:
|
||||
resources = []
|
||||
|
||||
return resources
|
||||
|
||||
def _GET_servers(self, req, res, body):
|
||||
context = req.environ['nova.context']
|
||||
|
||||
servers = self._extract_resource_from_body(res, body,
|
||||
singular='server', singular_template=ServerDiskConfigTemplate(),
|
||||
plural='servers', plural_template=ServersDiskConfigTemplate())
|
||||
|
||||
for server in servers:
|
||||
db_server = self.compute_api.routing_get(context, server['id'])
|
||||
value = db_server[self.INTERNAL_DISK_CONFIG]
|
||||
server[self.API_DISK_CONFIG] = disk_config_to_api(value)
|
||||
|
||||
return res
|
||||
|
||||
def _GET_images(self, req, res, body):
|
||||
images = self._extract_resource_from_body(res, body,
|
||||
singular='image', singular_template=ImageDiskConfigTemplate(),
|
||||
plural='images', plural_template=ImagesDiskConfigTemplate())
|
||||
|
||||
for image in images:
|
||||
metadata = image['metadata']
|
||||
|
||||
if self.INTERNAL_DISK_CONFIG in metadata:
|
||||
raw_value = metadata[self.INTERNAL_DISK_CONFIG]
|
||||
value = utils.bool_from_str(raw_value)
|
||||
image[self.API_DISK_CONFIG] = disk_config_to_api(value)
|
||||
|
||||
return res
|
||||
|
||||
def _POST_servers(self, req, res, body):
|
||||
return self._GET_servers(req, res, body)
|
||||
|
||||
def _pre_POST_servers(self, req):
|
||||
# NOTE(sirp): deserialization currently occurs *after* pre-processing
|
||||
# extensions are called. Until extensions are refactored so that
|
||||
# deserialization occurs earlier, we have to perform the
|
||||
# deserialization ourselves.
|
||||
content_type = req.content_type
|
||||
|
||||
if 'xml' in content_type:
|
||||
node = minidom.parseString(req.body)
|
||||
server = node.getElementsByTagName('server')[0]
|
||||
api_value = server.getAttribute(self.API_DISK_CONFIG)
|
||||
if api_value:
|
||||
value = disk_config_from_api(api_value)
|
||||
server.setAttribute(self.INTERNAL_DISK_CONFIG, str(value))
|
||||
req.body = str(node.toxml())
|
||||
else:
|
||||
body = utils.loads(req.body)
|
||||
server = body['server']
|
||||
api_value = server.get(self.API_DISK_CONFIG)
|
||||
if api_value:
|
||||
value = disk_config_from_api(api_value)
|
||||
server[self.INTERNAL_DISK_CONFIG] = value
|
||||
req.body = utils.dumps(body)
|
||||
|
||||
def _pre_PUT_servers(self, req):
|
||||
return self._pre_POST_servers(req)
|
||||
|
||||
def get_request_extensions(self):
|
||||
ReqExt = extensions.RequestExtension
|
||||
return [
|
||||
ReqExt(method='GET',
|
||||
url_route='/:(project_id)/servers/:(id)',
|
||||
handler=self._GET_servers),
|
||||
ReqExt(method='POST',
|
||||
url_route='/:(project_id)/servers',
|
||||
handler=self._POST_servers,
|
||||
pre_handler=self._pre_POST_servers),
|
||||
ReqExt(method='PUT',
|
||||
url_route='/:(project_id)/servers/:(id)',
|
||||
pre_handler=self._pre_PUT_servers),
|
||||
ReqExt(method='GET',
|
||||
url_route='/:(project_id)/images/:(id)',
|
||||
handler=self._GET_images)
|
||||
]
|
@ -1,135 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License
|
||||
|
||||
|
||||
from webob import exc
|
||||
import webob
|
||||
|
||||
from nova import compute
|
||||
from nova import exception
|
||||
import nova.image
|
||||
from nova import log as logging
|
||||
from nova.api.openstack import extensions
|
||||
from nova.api.openstack import wsgi
|
||||
|
||||
LOG = logging.getLogger('nova.api.openstack.contrib.disk_config')
|
||||
|
||||
|
||||
class DiskConfigController(object):
|
||||
def __init__(self):
|
||||
self.compute_api = compute.API()
|
||||
|
||||
def _return_dict(self, server_id, managed_disk):
|
||||
return {'server': {'id': server_id,
|
||||
'managed_disk': managed_disk}}
|
||||
|
||||
def index(self, req, server_id):
|
||||
context = req.environ['nova.context']
|
||||
try:
|
||||
server = self.compute_api.routing_get(context, server_id)
|
||||
except exception.NotFound:
|
||||
explanation = _("Server not found.")
|
||||
raise exc.HTTPNotFound(explanation=explanation)
|
||||
managed_disk = server['managed_disk'] or False
|
||||
return self._return_dict(server_id, managed_disk)
|
||||
|
||||
def update(self, req, server_id, body=None):
|
||||
if not body:
|
||||
raise exc.HTTPUnprocessableEntity()
|
||||
context = req.environ['nova.context']
|
||||
try:
|
||||
server = self.compute_api.routing_get(context, server_id)
|
||||
except exception.NotFound:
|
||||
explanation = _("Server not found.")
|
||||
raise exc.HTTPNotFound(explanation=explanation)
|
||||
|
||||
managed_disk = str(body['server'].get('managed_disk', False)).lower()
|
||||
managed_disk = managed_disk == 'true' or False
|
||||
self.compute_api.update(context, server_id, managed_disk=managed_disk)
|
||||
|
||||
return self._return_dict(server_id, managed_disk)
|
||||
|
||||
|
||||
class ImageDiskConfigController(object):
|
||||
def __init__(self, image_service=None):
|
||||
self.compute_api = compute.API()
|
||||
self._image_service = image_service or \
|
||||
nova.image.get_default_image_service()
|
||||
|
||||
def _return_dict(self, image_id, managed_disk):
|
||||
return {'image': {'id': image_id,
|
||||
'managed_disk': managed_disk}}
|
||||
|
||||
def index(self, req, image_id):
|
||||
context = req.environ['nova.context']
|
||||
try:
|
||||
image = self._image_service.show(context, image_id)
|
||||
except (exception.NotFound, exception.InvalidImageRef):
|
||||
explanation = _("Image not found.")
|
||||
raise webob.exc.HTTPNotFound(explanation=explanation)
|
||||
image_properties = image.get('properties', None)
|
||||
if image_properties:
|
||||
managed_disk = image_properties.get('managed_disk', False)
|
||||
|
||||
return self._return_dict(image_id, managed_disk)
|
||||
|
||||
|
||||
class Diskconfig(extensions.ExtensionDescriptor):
|
||||
"""Disk Configuration support"""
|
||||
|
||||
name = "DiskConfig"
|
||||
alias = "OS-DCFG"
|
||||
namespace = "http://docs.openstack.org/ext/disk_config/api/v1.1"
|
||||
updated = "2011-08-31T00:00:00+00:00"
|
||||
|
||||
def _server_extension_controller(self):
|
||||
metadata = {
|
||||
"attributes": {
|
||||
'managed_disk': ["server_id", "enabled"]}}
|
||||
|
||||
body_serializers = {
|
||||
'application/xml': wsgi.XMLDictSerializer(metadata=metadata,
|
||||
xmlns=wsgi.XMLNS_V11)}
|
||||
serializer = wsgi.ResponseSerializer(body_serializers, None)
|
||||
res = extensions.ResourceExtension(
|
||||
'os-disk-config',
|
||||
controller=DiskConfigController(),
|
||||
collection_actions={'update': 'PUT'},
|
||||
parent=dict(member_name='server', collection_name='servers'),
|
||||
serializer=serializer)
|
||||
return res
|
||||
|
||||
def _image_extension_controller(self):
|
||||
resources = []
|
||||
metadata = {
|
||||
"attributes": {
|
||||
'managed_disk': ["image_id", "enabled"]}}
|
||||
|
||||
body_serializers = {
|
||||
'application/xml': wsgi.XMLDictSerializer(metadata=metadata,
|
||||
xmlns=wsgi.XMLNS_V11)}
|
||||
serializer = wsgi.ResponseSerializer(body_serializers, None)
|
||||
res = extensions.ResourceExtension(
|
||||
'os-disk-config',
|
||||
controller=ImageDiskConfigController(),
|
||||
collection_actions={'update': 'PUT'},
|
||||
parent=dict(member_name='image', collection_name='images'),
|
||||
serializer=serializer)
|
||||
return res
|
||||
|
||||
def get_resources(self):
|
||||
return [self._server_extension_controller(),
|
||||
self._image_extension_controller()]
|
@ -133,13 +133,26 @@ class RequestExtensionController(object):
|
||||
def __init__(self, application):
|
||||
self.application = application
|
||||
self.handlers = []
|
||||
self.pre_handlers = []
|
||||
|
||||
def add_handler(self, handler):
|
||||
self.handlers.append(handler)
|
||||
|
||||
def add_pre_handler(self, pre_handler):
|
||||
self.pre_handlers.append(pre_handler)
|
||||
|
||||
def process(self, req, *args, **kwargs):
|
||||
for pre_handler in self.pre_handlers:
|
||||
pre_handler(req)
|
||||
|
||||
res = req.get_response(self.application)
|
||||
|
||||
# Don't call extensions if the main application returned an
|
||||
# unsuccessful status
|
||||
successful = 200 <= res.status_int < 400
|
||||
if not successful:
|
||||
return res
|
||||
|
||||
# Deserialize the response body, if any
|
||||
body = None
|
||||
if res.body:
|
||||
@ -165,6 +178,9 @@ class RequestExtensionResource(wsgi.Resource):
|
||||
def add_handler(self, handler):
|
||||
self.controller.add_handler(handler)
|
||||
|
||||
def add_pre_handler(self, pre_handler):
|
||||
self.controller.add_pre_handler(pre_handler)
|
||||
|
||||
|
||||
class ExtensionsResource(wsgi.Resource):
|
||||
|
||||
@ -294,7 +310,10 @@ class ExtensionMiddleware(base_wsgi.Middleware):
|
||||
for request_ext in ext_mgr.get_request_extensions():
|
||||
LOG.debug(_('Extended request: %s'), request_ext.key)
|
||||
controller = req_controllers[request_ext.key]
|
||||
controller.add_handler(request_ext.handler)
|
||||
if request_ext.handler:
|
||||
controller.add_handler(request_ext.handler)
|
||||
if request_ext.pre_handler:
|
||||
controller.add_pre_handler(request_ext.pre_handler)
|
||||
|
||||
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
|
||||
mapper)
|
||||
@ -437,11 +456,12 @@ class RequestExtension(object):
|
||||
that is sent to core nova OpenStack API controllers.
|
||||
|
||||
"""
|
||||
def __init__(self, method, url_route, handler):
|
||||
def __init__(self, method, url_route, handler=None, pre_handler=None):
|
||||
self.url_route = url_route
|
||||
self.handler = handler
|
||||
self.conditions = dict(method=[method])
|
||||
self.key = "%s-%s" % (method, url_route)
|
||||
self.pre_handler = pre_handler
|
||||
|
||||
|
||||
class ActionExtension(object):
|
||||
|
@ -319,9 +319,9 @@ class Controller(object):
|
||||
name = name.strip()
|
||||
|
||||
image_href = self._image_ref_from_req_data(body)
|
||||
|
||||
# If the image href was generated by nova api, strip image_href
|
||||
# down to an id and use the default glance connection params
|
||||
|
||||
if str(image_href).startswith(req.application_url):
|
||||
image_href = image_href.split('/').pop()
|
||||
|
||||
@ -386,6 +386,8 @@ class Controller(object):
|
||||
if min_count > max_count:
|
||||
min_count = max_count
|
||||
|
||||
auto_disk_config = server_dict.get('auto_disk_config')
|
||||
|
||||
try:
|
||||
inst_type = \
|
||||
instance_types.get_instance_type_by_flavor_id(flavor_id)
|
||||
@ -410,7 +412,8 @@ class Controller(object):
|
||||
user_data=user_data,
|
||||
availability_zone=availability_zone,
|
||||
config_drive=config_drive,
|
||||
block_device_mapping=block_device_mapping)
|
||||
block_device_mapping=block_device_mapping,
|
||||
auto_disk_config=auto_disk_config)
|
||||
except exception.QuotaError as error:
|
||||
self._handle_quota_error(error)
|
||||
except exception.InstanceTypeMemoryTooSmall as error:
|
||||
@ -483,6 +486,11 @@ class Controller(object):
|
||||
access_ipv6 = body['server']['accessIPv6']
|
||||
update_dict['access_ip_v6'] = access_ipv6.strip()
|
||||
|
||||
if 'auto_disk_config' in body['server']:
|
||||
auto_disk_config = utils.bool_from_str(
|
||||
body['server']['auto_disk_config'])
|
||||
update_dict['auto_disk_config'] = auto_disk_config
|
||||
|
||||
try:
|
||||
self.compute_api.update(ctxt, id, **update_dict)
|
||||
except exception.NotFound:
|
||||
@ -1120,6 +1128,10 @@ class ServerXMLDeserializer(wsgi.MetadataXMLDeserializer):
|
||||
if security_groups is not None:
|
||||
server["security_groups"] = security_groups
|
||||
|
||||
auto_disk_config = server_node.getAttribute('auto_disk_config')
|
||||
if auto_disk_config:
|
||||
server['auto_disk_config'] = utils.bool_from_str(auto_disk_config)
|
||||
|
||||
return server
|
||||
|
||||
def _extract_personality(self, server_node):
|
||||
|
@ -163,7 +163,8 @@ class API(base.Base):
|
||||
injected_files, admin_password, zone_blob,
|
||||
reservation_id, access_ip_v4, access_ip_v6,
|
||||
requested_networks, config_drive,
|
||||
block_device_mapping, create_instance_here=False):
|
||||
block_device_mapping, auto_disk_config,
|
||||
create_instance_here=False):
|
||||
"""Verify all the input parameters regardless of the provisioning
|
||||
strategy being performed and schedule the instance(s) for
|
||||
creation."""
|
||||
@ -227,10 +228,14 @@ class API(base.Base):
|
||||
vm_mode = None
|
||||
if 'properties' in image and 'vm_mode' in image['properties']:
|
||||
vm_mode = image['properties']['vm_mode']
|
||||
managed_disk = False
|
||||
if 'properties' in image and 'managed_disk' in image['properties']:
|
||||
managed_disk = utils.bool_from_str(
|
||||
image['properties']['managed_disk'])
|
||||
|
||||
# If instance doesn't have auto_disk_config overriden by request, use
|
||||
# whatever the image indicates
|
||||
if auto_disk_config is None:
|
||||
if ('properties' in image and
|
||||
'auto_disk_config' in image['properties']):
|
||||
auto_disk_config = utils.bool_from_str(
|
||||
image['properties']['auto_disk_config'])
|
||||
|
||||
if kernel_id is None:
|
||||
kernel_id = image['properties'].get('kernel_id', None)
|
||||
@ -294,7 +299,7 @@ class API(base.Base):
|
||||
'architecture': architecture,
|
||||
'vm_mode': vm_mode,
|
||||
'root_device_name': root_device_name,
|
||||
'managed_disk': managed_disk}
|
||||
'auto_disk_config': auto_disk_config}
|
||||
|
||||
LOG.debug(_("Going to run %s instances...") % num_instances)
|
||||
|
||||
@ -526,7 +531,8 @@ class API(base.Base):
|
||||
injected_files=None, admin_password=None, zone_blob=None,
|
||||
reservation_id=None, block_device_mapping=None,
|
||||
access_ip_v4=None, access_ip_v6=None,
|
||||
requested_networks=None, config_drive=None):
|
||||
requested_networks=None, config_drive=None,
|
||||
auto_disk_config=None):
|
||||
"""
|
||||
Provision instances, sending instance information to the
|
||||
scheduler. The scheduler will determine where the instance(s)
|
||||
@ -554,7 +560,7 @@ class API(base.Base):
|
||||
injected_files, admin_password, zone_blob,
|
||||
reservation_id, access_ip_v4, access_ip_v6,
|
||||
requested_networks, config_drive,
|
||||
block_device_mapping,
|
||||
block_device_mapping, auto_disk_config,
|
||||
create_instance_here=create_instance_here)
|
||||
|
||||
if create_instance_here or instances is None:
|
||||
|
@ -0,0 +1,40 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from sqlalchemy import Column, Integer, MetaData, String, Table
|
||||
|
||||
|
||||
meta = MetaData()
|
||||
|
||||
|
||||
def upgrade(migrate_engine):
|
||||
# Upgrade operations go here. Don't create your own engine;
|
||||
# bind migrate_engine to your metadata
|
||||
meta.bind = migrate_engine
|
||||
instances = Table('instances', meta, autoload=True,
|
||||
autoload_with=migrate_engine)
|
||||
|
||||
managed_disk = instances.c.managed_disk
|
||||
managed_disk.alter(name='auto_disk_config')
|
||||
|
||||
|
||||
def downgrade(migrate_engine):
|
||||
meta.bind = migrate_engine
|
||||
instances = Table('instances', meta, autoload=True,
|
||||
autoload_with=migrate_engine)
|
||||
|
||||
image_ref_column = instances.c.auto_disk_config
|
||||
image_ref_column.alter(name='managed_disk')
|
@ -255,7 +255,7 @@ class Instance(BASE, NovaBase):
|
||||
access_ip_v4 = Column(String(255))
|
||||
access_ip_v6 = Column(String(255))
|
||||
|
||||
managed_disk = Column(Boolean())
|
||||
auto_disk_config = Column(Boolean())
|
||||
progress = Column(Integer)
|
||||
|
||||
|
||||
|
@ -113,11 +113,45 @@ class _FakeImageService(object):
|
||||
'155d900f-4e14-4e4c-a73d-069cbf4541e6',
|
||||
'ramdisk_id': None}}
|
||||
|
||||
# NOTE(sirp): was image '6'
|
||||
image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379',
|
||||
'name': 'fakeimage6',
|
||||
'created_at': timestamp,
|
||||
'updated_at': timestamp,
|
||||
'deleted_at': None,
|
||||
'deleted': False,
|
||||
'status': 'active',
|
||||
'is_public': False,
|
||||
'container_format': 'ova',
|
||||
'disk_format': 'vhd',
|
||||
'properties': {'kernel_id': FLAGS.null_kernel,
|
||||
'ramdisk_id': FLAGS.null_kernel,
|
||||
'architecture': 'x86_64',
|
||||
'auto_disk_config': 'False'}}
|
||||
|
||||
# NOTE(sirp): was image '7'
|
||||
image7 = {'id': '70a599e0-31e7-49b7-b260-868f441e862b',
|
||||
'name': 'fakeimage7',
|
||||
'created_at': timestamp,
|
||||
'updated_at': timestamp,
|
||||
'deleted_at': None,
|
||||
'deleted': False,
|
||||
'status': 'active',
|
||||
'is_public': False,
|
||||
'container_format': 'ova',
|
||||
'disk_format': 'vhd',
|
||||
'properties': {'kernel_id': FLAGS.null_kernel,
|
||||
'ramdisk_id': FLAGS.null_kernel,
|
||||
'architecture': 'x86_64',
|
||||
'auto_disk_config': 'True'}}
|
||||
|
||||
self.create(None, image1)
|
||||
self.create(None, image2)
|
||||
self.create(None, image3)
|
||||
self.create(None, image4)
|
||||
self.create(None, image5)
|
||||
self.create(None, image6)
|
||||
self.create(None, image7)
|
||||
self._imagedata = {}
|
||||
super(_FakeImageService, self).__init__()
|
||||
|
||||
|
248
nova/tests/api/openstack/contrib/test_disk_config.py
Normal file
248
nova/tests/api/openstack/contrib/test_disk_config.py
Normal file
@ -0,0 +1,248 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
#
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import datetime
|
||||
|
||||
import nova.db.api
|
||||
import nova.rpc
|
||||
|
||||
from nova import flags
|
||||
from nova import test
|
||||
from nova import utils
|
||||
from nova.api import openstack
|
||||
from nova.api.openstack import extensions
|
||||
from nova.api.openstack import servers
|
||||
from nova.api.openstack import wsgi
|
||||
from nova.tests.api.openstack import fakes
|
||||
|
||||
MANUAL_INSTANCE_UUID = fakes.FAKE_UUID
|
||||
AUTO_INSTANCE_UUID = fakes.FAKE_UUID.replace('a', 'b')
|
||||
|
||||
stub_instance = fakes.stub_instance
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
def instance_addresses(context, instance_id):
|
||||
return None
|
||||
|
||||
|
||||
class DiskConfigTestCase(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(DiskConfigTestCase, self).setUp()
|
||||
self.flags(verbose=True)
|
||||
fakes.stub_out_nw_api(self.stubs)
|
||||
|
||||
FAKE_INSTANCES = [
|
||||
fakes.stub_instance(1,
|
||||
uuid=MANUAL_INSTANCE_UUID,
|
||||
auto_disk_config=False),
|
||||
fakes.stub_instance(2,
|
||||
uuid=AUTO_INSTANCE_UUID,
|
||||
auto_disk_config=True)
|
||||
]
|
||||
|
||||
def fake_instance_get(context, id_):
|
||||
for instance in FAKE_INSTANCES:
|
||||
if id_ == instance['id']:
|
||||
return instance
|
||||
|
||||
self.stubs.Set(nova.db.api, 'instance_get', fake_instance_get)
|
||||
self.stubs.Set(nova.db, 'instance_get', fake_instance_get)
|
||||
|
||||
def fake_instance_get_by_uuid(context, uuid):
|
||||
for instance in FAKE_INSTANCES:
|
||||
if uuid == instance['uuid']:
|
||||
return instance
|
||||
|
||||
self.stubs.Set(nova.db, 'instance_get_by_uuid',
|
||||
fake_instance_get_by_uuid)
|
||||
|
||||
def fake_instance_get_all(context, *args, **kwargs):
|
||||
return FAKE_INSTANCES
|
||||
|
||||
self.stubs.Set(nova.db, 'instance_get_all', fake_instance_get_all)
|
||||
self.stubs.Set(nova.db.api, 'instance_get_all_by_filters',
|
||||
fake_instance_get_all)
|
||||
|
||||
def fake_instance_create(context, inst_, session=None):
|
||||
class FakeModel(dict):
|
||||
def save(self, session=None):
|
||||
pass
|
||||
|
||||
inst = FakeModel(**inst_)
|
||||
inst['id'] = 1
|
||||
inst['uuid'] = AUTO_INSTANCE_UUID
|
||||
inst['created_at'] = datetime.datetime(2010, 10, 10, 12, 0, 0)
|
||||
inst['updated_at'] = datetime.datetime(2010, 10, 10, 12, 0, 0)
|
||||
inst['progress'] = 0
|
||||
inst['name'] = 'instance-1' # this is a property
|
||||
|
||||
def fake_instance_get_for_create(context, id_, session=None):
|
||||
return inst
|
||||
|
||||
self.stubs.Set(nova.db, 'instance_get',
|
||||
fake_instance_get_for_create)
|
||||
self.stubs.Set(nova.db.api, 'instance_get',
|
||||
fake_instance_get_for_create)
|
||||
self.stubs.Set(nova.db.sqlalchemy.api, 'instance_get',
|
||||
fake_instance_get_for_create)
|
||||
|
||||
def fake_instance_add_security_group(context, instance_id,
|
||||
security_group_id):
|
||||
pass
|
||||
|
||||
self.stubs.Set(nova.db.sqlalchemy.api,
|
||||
'instance_add_security_group',
|
||||
fake_instance_add_security_group)
|
||||
|
||||
return inst
|
||||
|
||||
self.stubs.Set(nova.db, 'instance_create', fake_instance_create)
|
||||
|
||||
app = openstack.APIRouter()
|
||||
app = extensions.ExtensionMiddleware(app)
|
||||
app = wsgi.LazySerializationMiddleware(app)
|
||||
self.app = app
|
||||
|
||||
def assertDiskConfig(self, dict_, value):
|
||||
self.assert_('RAX-DCF:diskConfig' in dict_)
|
||||
self.assertEqual(dict_['RAX-DCF:diskConfig'], value)
|
||||
|
||||
def test_show_server(self):
|
||||
req = fakes.HTTPRequest.blank(
|
||||
'/fake/servers/%s' % MANUAL_INSTANCE_UUID)
|
||||
res = req.get_response(self.app)
|
||||
server_dict = utils.loads(res.body)['server']
|
||||
self.assertDiskConfig(server_dict, 'MANUAL')
|
||||
|
||||
req = fakes.HTTPRequest.blank(
|
||||
'/fake/servers/%s' % AUTO_INSTANCE_UUID)
|
||||
res = req.get_response(self.app)
|
||||
server_dict = utils.loads(res.body)['server']
|
||||
self.assertDiskConfig(server_dict, 'AUTO')
|
||||
|
||||
def test_detail_servers(self):
|
||||
req = fakes.HTTPRequest.blank('/fake/servers/detail')
|
||||
res = req.get_response(self.app)
|
||||
server_dicts = utils.loads(res.body)['servers']
|
||||
|
||||
expectations = ['MANUAL', 'AUTO']
|
||||
for server_dict, expected in zip(server_dicts, expectations):
|
||||
self.assertDiskConfig(server_dict, expected)
|
||||
|
||||
def test_show_image(self):
|
||||
req = fakes.HTTPRequest.blank(
|
||||
'/fake/images/a440c04b-79fa-479c-bed1-0b816eaec379')
|
||||
res = req.get_response(self.app)
|
||||
image_dict = utils.loads(res.body)['image']
|
||||
self.assertDiskConfig(image_dict, 'MANUAL')
|
||||
|
||||
req = fakes.HTTPRequest.blank(
|
||||
'/fake/images/70a599e0-31e7-49b7-b260-868f441e862b')
|
||||
res = req.get_response(self.app)
|
||||
image_dict = utils.loads(res.body)['image']
|
||||
self.assertDiskConfig(image_dict, 'AUTO')
|
||||
|
||||
def test_detail_image(self):
|
||||
req = fakes.HTTPRequest.blank('/fake/images/detail')
|
||||
res = req.get_response(self.app)
|
||||
image_dicts = utils.loads(res.body)['images']
|
||||
|
||||
expectations = ['MANUAL', 'AUTO']
|
||||
for image_dict, expected in zip(image_dicts, expectations):
|
||||
# NOTE(sirp): image fixtures 6 and 7 are setup for
|
||||
# auto_disk_config testing
|
||||
if image_dict['id'] in (6, 7):
|
||||
self.assertDiskConfig(image_dict, expected)
|
||||
|
||||
def test_create_server_override_auto(self):
|
||||
req = fakes.HTTPRequest.blank('/fake/servers')
|
||||
req.method = 'POST'
|
||||
req.content_type = 'application/json'
|
||||
body = {'server': {
|
||||
'name': 'server_test',
|
||||
'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
|
||||
'flavorRef': '1',
|
||||
'RAX-DCF:diskConfig': 'AUTO'
|
||||
}}
|
||||
|
||||
req.body = utils.dumps(body)
|
||||
res = req.get_response(self.app)
|
||||
server_dict = utils.loads(res.body)['server']
|
||||
self.assertDiskConfig(server_dict, 'AUTO')
|
||||
|
||||
def test_create_server_override_manual(self):
|
||||
req = fakes.HTTPRequest.blank('/fake/servers')
|
||||
req.method = 'POST'
|
||||
req.content_type = 'application/json'
|
||||
body = {'server': {
|
||||
'name': 'server_test',
|
||||
'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
|
||||
'flavorRef': '1',
|
||||
'RAX-DCF:diskConfig': 'MANUAL'
|
||||
}}
|
||||
|
||||
req.body = utils.dumps(body)
|
||||
res = req.get_response(self.app)
|
||||
server_dict = utils.loads(res.body)['server']
|
||||
self.assertDiskConfig(server_dict, 'MANUAL')
|
||||
|
||||
def test_create_server_detect_from_image(self):
|
||||
"""If user doesn't pass in diskConfig for server, use image metadata
|
||||
to specify AUTO or MANUAL.
|
||||
"""
|
||||
req = fakes.HTTPRequest.blank('/fake/servers')
|
||||
req.method = 'POST'
|
||||
req.content_type = 'application/json'
|
||||
body = {'server': {
|
||||
'name': 'server_test',
|
||||
'imageRef': 'a440c04b-79fa-479c-bed1-0b816eaec379',
|
||||
'flavorRef': '1',
|
||||
}}
|
||||
|
||||
req.body = utils.dumps(body)
|
||||
res = req.get_response(self.app)
|
||||
server_dict = utils.loads(res.body)['server']
|
||||
self.assertDiskConfig(server_dict, 'MANUAL')
|
||||
|
||||
req = fakes.HTTPRequest.blank('/fake/servers')
|
||||
req.method = 'POST'
|
||||
req.content_type = 'application/json'
|
||||
body = {'server': {
|
||||
'name': 'server_test',
|
||||
'imageRef': '70a599e0-31e7-49b7-b260-868f441e862b',
|
||||
'flavorRef': '1',
|
||||
}}
|
||||
|
||||
req.body = utils.dumps(body)
|
||||
res = req.get_response(self.app)
|
||||
server_dict = utils.loads(res.body)['server']
|
||||
self.assertDiskConfig(server_dict, 'AUTO')
|
||||
|
||||
def test_update_server_invalid_disk_config(self):
|
||||
"""Return BadRequest if user passes an invalid diskConfig value."""
|
||||
req = fakes.HTTPRequest.blank(
|
||||
'/fake/servers/%s' % MANUAL_INSTANCE_UUID)
|
||||
req.method = 'PUT'
|
||||
req.content_type = 'application/json'
|
||||
body = {'server': {'RAX-DCF:diskConfig': 'server_test'}}
|
||||
req.body = utils.dumps(body)
|
||||
res = req.get_response(self.app)
|
||||
self.assertEqual(res.status_int, 400)
|
||||
expected_msg = '{"badRequest": {"message": "RAX-DCF:diskConfig must'\
|
||||
' be either \'MANUAL\' or \'AUTO\'.", "code": 400}}'
|
||||
self.assertEqual(res.body, expected_msg)
|
@ -1,161 +0,0 @@
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
import webob
|
||||
|
||||
from nova import compute
|
||||
from nova import exception
|
||||
from nova import image
|
||||
from nova import test
|
||||
from nova.api.openstack.contrib.diskconfig import DiskConfigController
|
||||
from nova.api.openstack.contrib.diskconfig import ImageDiskConfigController
|
||||
from nova.tests.api.openstack import fakes
|
||||
|
||||
|
||||
class DiskConfigTest(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(DiskConfigTest, self).setUp()
|
||||
self.uuid = '70f6db34-de8d-4fbd-aafb-4065bdfa6114'
|
||||
self.url = '/v1.1/openstack/servers/%s/os-disk-config' % self.uuid
|
||||
|
||||
def test_retrieve_disk_config(self):
|
||||
def fake_compute_get(*args, **kwargs):
|
||||
return {'managed_disk': True}
|
||||
|
||||
self.stubs.Set(compute.api.API, 'routing_get', fake_compute_get)
|
||||
req = webob.Request.blank(self.url)
|
||||
req.headers['Accept'] = 'application/json'
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 200)
|
||||
body = json.loads(res.body)
|
||||
self.assertEqual(body['server']['managed_disk'], True)
|
||||
self.assertEqual(body['server']['id'], self.uuid)
|
||||
|
||||
def test_set_disk_config(self):
|
||||
def fake_compute_get(*args, **kwargs):
|
||||
return {'managed_disk': 'True'}
|
||||
|
||||
def fake_compute_update(*args, **kwargs):
|
||||
return {'managed_disk': 'False'}
|
||||
|
||||
self.stubs.Set(compute.api.API, 'update', fake_compute_update)
|
||||
self.stubs.Set(compute.api.API, 'routing_get', fake_compute_get)
|
||||
|
||||
req = webob.Request.blank(self.url)
|
||||
req.method = 'PUT'
|
||||
req.headers['Accept'] = 'application/json'
|
||||
req.headers['Content-Type'] = 'application/json'
|
||||
req.body = json.dumps({'server': {'managed_disk': False}})
|
||||
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 200)
|
||||
body = json.loads(res.body)
|
||||
self.assertEqual(body['server']['managed_disk'], False)
|
||||
self.assertEqual(body['server']['id'], self.uuid)
|
||||
|
||||
def test_retrieve_disk_config_bad_server_fails(self):
|
||||
def fake_compute_get(*args, **kwargs):
|
||||
raise exception.NotFound()
|
||||
|
||||
self.stubs.Set(compute.api.API, 'routing_get', fake_compute_get)
|
||||
req = webob.Request.blank(self.url)
|
||||
req.headers['Accept'] = 'application/json'
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 404)
|
||||
|
||||
def test_set_disk_config_bad_server_fails(self):
|
||||
self.called = False
|
||||
|
||||
def fake_compute_get(*args, **kwargs):
|
||||
raise exception.NotFound()
|
||||
|
||||
def fake_compute_update(*args, **kwargs):
|
||||
self.called = True
|
||||
|
||||
self.stubs.Set(compute.api.API, 'update', fake_compute_update)
|
||||
self.stubs.Set(compute.api.API, 'routing_get', fake_compute_get)
|
||||
|
||||
req = webob.Request.blank(self.url)
|
||||
req.method = 'PUT'
|
||||
req.headers['Accept'] = 'application/json'
|
||||
req.headers['Content-Type'] = 'application/json'
|
||||
req.body = json.dumps({'server': {'managed_disk': False}})
|
||||
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 404)
|
||||
self.assertEqual(self.called, False)
|
||||
|
||||
|
||||
class ImageDiskConfigTest(test.TestCase):
|
||||
|
||||
NOW_GLANCE_FORMAT = "2010-10-11T10:30:22"
|
||||
NOW_API_FORMAT = "2010-10-11T10:30:22Z"
|
||||
|
||||
def test_image_get_disk_config(self):
|
||||
self.flags(image_service='nova.image.glance.GlanceImageService')
|
||||
fakes.stub_out_glance(self.stubs)
|
||||
|
||||
def fake_image_service_show(*args, **kwargs):
|
||||
return {'properties': {'managed_disk': True}}
|
||||
|
||||
self.stubs.Set(image.glance.GlanceImageService, 'show',
|
||||
fake_image_service_show)
|
||||
|
||||
req = webob.Request.blank('/v1.1/openstack/images/10/os-disk-config')
|
||||
req.headers['Accept'] = 'application/json'
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 200)
|
||||
|
||||
body = json.loads(res.body)
|
||||
|
||||
self.assertEqual(body['image']['managed_disk'], True)
|
||||
self.assertEqual(int(body['image']['id']), 10)
|
||||
|
||||
def test_image_get_disk_config_no_image_fails(self):
|
||||
self.flags(image_service='nova.image.glance.GlanceImageService')
|
||||
fakes.stub_out_glance(self.stubs)
|
||||
|
||||
def fake_image_service_show(*args, **kwargs):
|
||||
raise exception.NotFound()
|
||||
|
||||
self.stubs.Set(image.glance.GlanceImageService, 'show',
|
||||
fake_image_service_show)
|
||||
|
||||
req = webob.Request.blank('/v1.1/openstack/images/10/os-disk-config')
|
||||
req.headers['Accept'] = 'application/json'
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 404)
|
||||
|
||||
@classmethod
|
||||
def _make_image_fixtures(cls):
|
||||
image_id = 123
|
||||
base_attrs = {'created_at': cls.NOW_GLANCE_FORMAT,
|
||||
'updated_at': cls.NOW_GLANCE_FORMAT,
|
||||
'deleted_at': None,
|
||||
'deleted': False}
|
||||
|
||||
fixtures = []
|
||||
|
||||
def add_fixture(**kwargs):
|
||||
kwargs.update(base_attrs)
|
||||
fixtures.append(kwargs)
|
||||
|
||||
# Public image
|
||||
add_fixture(id=1, name='snapshot', is_public=False,
|
||||
status='active', properties={})
|
||||
|
||||
return fixtures
|
@ -13,14 +13,12 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import datetime
|
||||
import json
|
||||
import webob
|
||||
|
||||
from nova import compute
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import image
|
||||
from nova import test
|
||||
from nova.tests.api.openstack import fakes
|
||||
|
||||
@ -28,34 +26,11 @@ from nova.tests.api.openstack import fakes
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.verbose = True
|
||||
|
||||
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
|
||||
|
||||
FAKE_NETWORKS = [('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '10.0.1.12'),
|
||||
('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', '10.0.2.12')]
|
||||
|
||||
DUPLICATE_NETWORKS = [('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '10.0.1.12'),
|
||||
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '10.0.1.12')]
|
||||
|
||||
INVALID_NETWORKS = [('invalid', 'invalid-ip-address')]
|
||||
|
||||
INSTANCE = {
|
||||
"id": 1,
|
||||
"name": "fake",
|
||||
"display_name": "test_server",
|
||||
"uuid": FAKE_UUID,
|
||||
"user_id": 'fake_user_id',
|
||||
"task_state": "kayaking",
|
||||
"vm_state": "slightly crunchy",
|
||||
"power_state": "empowered",
|
||||
"tenant_id": 'fake_tenant_id',
|
||||
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
|
||||
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
|
||||
"security_groups": [{"id": 1, "name": "test"}],
|
||||
"progress": 0,
|
||||
"image_ref": 'http://foo.com/123',
|
||||
"fixed_ips": [],
|
||||
"instance_type": {"flavorid": '124'},
|
||||
}
|
||||
def fake_compute_get(*args, **kwargs):
|
||||
return fakes.stub_instance(1, task_state="kayaking",
|
||||
vm_state="slightly crunchy",
|
||||
power_state="empowered")
|
||||
|
||||
|
||||
class ExtendedStatusTest(test.TestCase):
|
||||
@ -65,45 +40,48 @@ class ExtendedStatusTest(test.TestCase):
|
||||
self.uuid = '70f6db34-de8d-4fbd-aafb-4065bdfa6114'
|
||||
self.url = '/v1.1/openstack/servers/%s' % self.uuid
|
||||
fakes.stub_out_nw_api(self.stubs)
|
||||
self.stubs.Set(compute.api.API, 'routing_get', fake_compute_get)
|
||||
|
||||
def _make_request(self):
|
||||
req = webob.Request.blank(self.url)
|
||||
req.headers['Accept'] = 'application/json'
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
return res
|
||||
|
||||
def assertServerStates(self, server, vm_state, power_state, task_state):
|
||||
self.assertEqual(server.get('OS-EXT-STS:vm_state'), vm_state)
|
||||
self.assertEqual(server.get('OS-EXT-STS:power_state'), power_state)
|
||||
self.assertEqual(server.get('OS-EXT-STS:task_state'), task_state)
|
||||
|
||||
def test_extended_status_with_admin(self):
|
||||
def fake_compute_get(*args, **kwargs):
|
||||
return INSTANCE
|
||||
|
||||
self.flags(allow_admin_api=True)
|
||||
self.stubs.Set(compute.api.API, 'routing_get', fake_compute_get)
|
||||
req = webob.Request.blank(self.url)
|
||||
req.headers['Accept'] = 'application/json'
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 200)
|
||||
res = self._make_request()
|
||||
body = json.loads(res.body)
|
||||
self.assertEqual(body['server']['OS-EXT-STS:vm_state'],
|
||||
'slightly crunchy')
|
||||
self.assertEqual(body['server']['OS-EXT-STS:power_state'], 'empowered')
|
||||
self.assertEqual(body['server']['OS-EXT-STS:task_state'], 'kayaking')
|
||||
|
||||
self.assertEqual(res.status_int, 200)
|
||||
self.assertServerStates(body['server'],
|
||||
vm_state='slightly crunchy',
|
||||
power_state='empowered',
|
||||
task_state='kayaking')
|
||||
|
||||
def test_extended_status_no_admin(self):
|
||||
def fake_compute_get(*args, **kwargs):
|
||||
return INSTANCE
|
||||
|
||||
self.flags(allow_admin_api=False)
|
||||
self.stubs.Set(compute.api.API, 'routing_get', fake_compute_get)
|
||||
req = webob.Request.blank(self.url)
|
||||
req.headers['Accept'] = 'application/json'
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 200)
|
||||
res = self._make_request()
|
||||
body = json.loads(res.body)
|
||||
self.assertEqual(body['server'].get('OS-EXT-STS:vm_state'), None)
|
||||
self.assertEqual(body['server'].get('OS-EXT-STS:power_state'), None)
|
||||
self.assertEqual(body['server'].get('OS-EXT-STS:task_state'), None)
|
||||
|
||||
self.assertEqual(res.status_int, 200)
|
||||
self.assertServerStates(body['server'],
|
||||
vm_state=None,
|
||||
power_state=None,
|
||||
task_state=None)
|
||||
|
||||
def test_extended_status_no_instance_fails(self):
|
||||
self.flags(allow_admin_api=True)
|
||||
|
||||
def fake_compute_get(*args, **kwargs):
|
||||
raise exception.InstanceNotFound()
|
||||
|
||||
self.flags(allow_admin_api=True)
|
||||
self.stubs.Set(compute.api.API, 'routing_get', fake_compute_get)
|
||||
req = webob.Request.blank(self.url)
|
||||
req.headers['Accept'] = 'application/json'
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
res = self._make_request()
|
||||
|
||||
self.assertEqual(res.status_int, 404)
|
||||
|
@ -15,6 +15,8 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import datetime
|
||||
|
||||
import webob
|
||||
import webob.dec
|
||||
import webob.request
|
||||
@ -35,6 +37,9 @@ from nova.api.openstack import urlmap
|
||||
from nova.api.openstack import versions
|
||||
from nova.api.openstack import wsgi as os_wsgi
|
||||
from nova.auth.manager import User, Project
|
||||
from nova.compute import instance_types
|
||||
from nova.compute import vm_states
|
||||
from nova.db.sqlalchemy import models
|
||||
import nova.image.fake
|
||||
from nova.tests.glance import stubs as glance_stubs
|
||||
|
||||
@ -447,3 +452,114 @@ class FakeRateLimiter(object):
|
||||
@webob.dec.wsgify
|
||||
def __call__(self, req):
|
||||
return self.application
|
||||
|
||||
|
||||
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
|
||||
|
||||
|
||||
def create_fixed_ips(project_id, publics, privates, publics_are_floating):
|
||||
if publics is None:
|
||||
publics = []
|
||||
if privates is None:
|
||||
privates = []
|
||||
|
||||
fixed_ips = []
|
||||
private_vif = dict(address='aa:bb:cc:dd:ee:ff')
|
||||
private_net = dict(label='private', project_id=project_id, cidr_v6=None)
|
||||
|
||||
for private in privates:
|
||||
entry = dict(address=private, network=private_net,
|
||||
virtual_interface=private_vif, floating_ips=[])
|
||||
if publics_are_floating:
|
||||
for public in publics:
|
||||
entry['floating_ips'].append(dict(address=public))
|
||||
# Only add them once
|
||||
publics = []
|
||||
fixed_ips.append(entry)
|
||||
|
||||
if not publics_are_floating:
|
||||
public_vif = dict(address='ff:ee:dd:cc:bb:aa')
|
||||
public_net = dict(label='public', project_id=project_id,
|
||||
cidr_v6='b33f::/64')
|
||||
for public in publics:
|
||||
entry = dict(address=public, network=public_net,
|
||||
virtual_interface=public_vif, floating_ips=[])
|
||||
fixed_ips.append(entry)
|
||||
return fixed_ips
|
||||
|
||||
|
||||
def stub_instance(id, user_id='fake', project_id='fake', host=None,
|
||||
vm_state=None, task_state=None,
|
||||
reservation_id="", uuid=FAKE_UUID, image_ref="10",
|
||||
flavor_id="1", name=None, key_name='',
|
||||
access_ipv4=None, access_ipv6=None, progress=0,
|
||||
auto_disk_config=False, public_ips=None, private_ips=None,
|
||||
public_ips_are_floating=False, display_name=None,
|
||||
include_fake_metadata=True,
|
||||
power_state=None):
|
||||
|
||||
if include_fake_metadata:
|
||||
metadata = [models.InstanceMetadata(key='seq', value=id)]
|
||||
else:
|
||||
metadata = []
|
||||
|
||||
inst_type = instance_types.get_instance_type_by_flavor_id(int(flavor_id))
|
||||
|
||||
if host is not None:
|
||||
host = str(host)
|
||||
|
||||
if key_name:
|
||||
key_data = 'FAKE'
|
||||
else:
|
||||
key_data = ''
|
||||
|
||||
fixed_ips = create_fixed_ips(project_id, public_ips, private_ips,
|
||||
public_ips_are_floating)
|
||||
|
||||
# ReservationID isn't sent back, hack it in there.
|
||||
server_name = name or "server%s" % id
|
||||
if reservation_id != "":
|
||||
server_name = "reservation_%s" % (reservation_id, )
|
||||
|
||||
instance = {
|
||||
"id": int(id),
|
||||
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
|
||||
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
|
||||
"admin_pass": "",
|
||||
"user_id": user_id,
|
||||
"project_id": project_id,
|
||||
"image_ref": image_ref,
|
||||
"kernel_id": "",
|
||||
"ramdisk_id": "",
|
||||
"launch_index": 0,
|
||||
"key_name": key_name,
|
||||
"key_data": key_data,
|
||||
"vm_state": vm_state or vm_states.BUILDING,
|
||||
"task_state": task_state,
|
||||
"power_state": power_state,
|
||||
"memory_mb": 0,
|
||||
"vcpus": 0,
|
||||
"local_gb": 0,
|
||||
"hostname": "",
|
||||
"host": host,
|
||||
"instance_type": dict(inst_type),
|
||||
"user_data": "",
|
||||
"reservation_id": reservation_id,
|
||||
"mac_address": "",
|
||||
"scheduled_at": utils.utcnow(),
|
||||
"launched_at": utils.utcnow(),
|
||||
"terminated_at": utils.utcnow(),
|
||||
"availability_zone": "",
|
||||
"display_name": display_name or server_name,
|
||||
"display_description": "",
|
||||
"locked": False,
|
||||
"metadata": metadata,
|
||||
"access_ip_v4": access_ipv4,
|
||||
"access_ip_v6": access_ipv6,
|
||||
"uuid": uuid,
|
||||
"progress": progress,
|
||||
"auto_disk_config": auto_disk_config,
|
||||
"name": "instance-%s" % id,
|
||||
"fixed_ips": fixed_ips}
|
||||
|
||||
return instance
|
||||
|
@ -50,8 +50,8 @@ from nova import utils
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FAKE_UUIDS = {0: 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'}
|
||||
FAKE_UUID = FAKE_UUIDS[0]
|
||||
FAKE_UUID = fakes.FAKE_UUID
|
||||
FAKE_UUIDS = {0: FAKE_UUID}
|
||||
NS = "{http://docs.openstack.org/compute/api/v1.1}"
|
||||
ATOMNS = "{http://www.w3.org/2005/Atom}"
|
||||
XPATH_NS = {
|
||||
@ -71,36 +71,36 @@ def fake_gen_uuid():
|
||||
|
||||
|
||||
def return_server_by_id(context, id):
|
||||
return stub_instance(id)
|
||||
return fakes.stub_instance(id)
|
||||
|
||||
|
||||
def return_server_by_uuid(context, uuid):
|
||||
id = 1
|
||||
return stub_instance(id, uuid=uuid)
|
||||
return fakes.stub_instance(id, uuid=uuid)
|
||||
|
||||
|
||||
def return_server_with_attributes(**kwargs):
|
||||
def _return_server(context, instance_id):
|
||||
return stub_instance(instance_id, **kwargs)
|
||||
return fakes.stub_instance(instance_id, **kwargs)
|
||||
return _return_server
|
||||
|
||||
|
||||
def return_server_with_attributes_by_uuid(**kwargs):
|
||||
def _return_server(context, uuid):
|
||||
return stub_instance(1, uuid=uuid, **kwargs)
|
||||
return fakes.stub_instance(1, uuid=uuid, **kwargs)
|
||||
return _return_server
|
||||
|
||||
|
||||
def return_server_with_state(vm_state, task_state=None):
|
||||
def _return_server(context, uuid):
|
||||
return stub_instance(1, uuid=uuid, vm_state=vm_state,
|
||||
return fakes.stub_instance(1, uuid=uuid, vm_state=vm_state,
|
||||
task_state=task_state)
|
||||
return _return_server
|
||||
|
||||
|
||||
def return_server_with_uuid_and_state(vm_state, task_state):
|
||||
def _return_server(context, id):
|
||||
return stub_instance(id,
|
||||
return fakes.stub_instance(id,
|
||||
uuid=FAKE_UUID,
|
||||
vm_state=vm_state,
|
||||
task_state=task_state)
|
||||
@ -110,13 +110,13 @@ def return_server_with_uuid_and_state(vm_state, task_state):
|
||||
def return_servers(context, *args, **kwargs):
|
||||
servers = []
|
||||
for i in xrange(5):
|
||||
server = stub_instance(i, 'fake', 'fake', uuid=get_fake_uuid(i))
|
||||
server = fakes.stub_instance(i, 'fake', 'fake', uuid=get_fake_uuid(i))
|
||||
servers.append(server)
|
||||
return servers
|
||||
|
||||
|
||||
def return_servers_by_reservation(context, reservation_id=""):
|
||||
return [stub_instance(i, reservation_id) for i in xrange(5)]
|
||||
return [fakes.stub_instance(i, reservation_id) for i in xrange(5)]
|
||||
|
||||
|
||||
def return_servers_by_reservation_empty(context, reservation_id=""):
|
||||
@ -136,7 +136,8 @@ def return_servers_from_child_zones(*args, **kwargs):
|
||||
servers = []
|
||||
for server_id in xrange(5):
|
||||
server = Server()
|
||||
server._info = stub_instance(server_id, reservation_id="child")
|
||||
server._info = fakes.stub_instance(
|
||||
server_id, reservation_id="child")
|
||||
servers.append(server)
|
||||
|
||||
zones.append(("Zone%d" % zone, servers))
|
||||
@ -148,115 +149,13 @@ def return_security_group(context, instance_id, security_group_id):
|
||||
|
||||
|
||||
def instance_update(context, instance_id, values):
|
||||
return stub_instance(instance_id, name=values.get('display_name'))
|
||||
return fakes.stub_instance(instance_id, name=values.get('display_name'))
|
||||
|
||||
|
||||
def instance_addresses(context, instance_id):
|
||||
return None
|
||||
|
||||
|
||||
def create_fixed_ips(project_id, publics, privates, publics_are_floating):
|
||||
if publics is None:
|
||||
publics = []
|
||||
if privates is None:
|
||||
privates = []
|
||||
|
||||
fixed_ips = []
|
||||
private_vif = dict(address='aa:bb:cc:dd:ee:ff')
|
||||
private_net = dict(label='private', project_id=project_id, cidr_v6=None)
|
||||
|
||||
for private in privates:
|
||||
entry = dict(address=private, network=private_net,
|
||||
virtual_interface=private_vif, floating_ips=[])
|
||||
if publics_are_floating:
|
||||
for public in publics:
|
||||
entry['floating_ips'].append(dict(address=public))
|
||||
# Only add them once
|
||||
publics = []
|
||||
fixed_ips.append(entry)
|
||||
|
||||
if not publics_are_floating:
|
||||
public_vif = dict(address='ff:ee:dd:cc:bb:aa')
|
||||
public_net = dict(label='public', project_id=project_id,
|
||||
cidr_v6='b33f::/64')
|
||||
for public in publics:
|
||||
entry = dict(address=public, network=public_net,
|
||||
virtual_interface=public_vif, floating_ips=[])
|
||||
fixed_ips.append(entry)
|
||||
return fixed_ips
|
||||
|
||||
|
||||
def stub_instance(id, user_id='fake', project_id='fake', host=None,
|
||||
vm_state=None, task_state=None,
|
||||
reservation_id="", uuid=FAKE_UUID, image_ref="10",
|
||||
flavor_id="1", name=None, key_name='',
|
||||
access_ipv4=None, access_ipv6=None, progress=0,
|
||||
public_ips=None, private_ips=None,
|
||||
public_ips_are_floating=False):
|
||||
|
||||
metadata = []
|
||||
metadata.append(InstanceMetadata(key='seq', value=id))
|
||||
|
||||
inst_type = instance_types.get_instance_type_by_flavor_id(int(flavor_id))
|
||||
|
||||
if host is not None:
|
||||
host = str(host)
|
||||
|
||||
if key_name:
|
||||
key_data = 'FAKE'
|
||||
else:
|
||||
key_data = ''
|
||||
|
||||
fixed_ips = create_fixed_ips(project_id, public_ips, private_ips,
|
||||
public_ips_are_floating)
|
||||
|
||||
# ReservationID isn't sent back, hack it in there.
|
||||
server_name = name or "server%s" % id
|
||||
if reservation_id != "":
|
||||
server_name = "reservation_%s" % (reservation_id, )
|
||||
|
||||
instance = {
|
||||
"name": str(id),
|
||||
"id": int(id),
|
||||
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
|
||||
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
|
||||
"admin_pass": "",
|
||||
"user_id": user_id,
|
||||
"project_id": project_id,
|
||||
"image_ref": image_ref,
|
||||
"kernel_id": "",
|
||||
"ramdisk_id": "",
|
||||
"launch_index": 0,
|
||||
"key_name": key_name,
|
||||
"key_data": key_data,
|
||||
"vm_state": vm_state or vm_states.BUILDING,
|
||||
"task_state": task_state,
|
||||
"memory_mb": 0,
|
||||
"vcpus": 0,
|
||||
"local_gb": 0,
|
||||
"hostname": "",
|
||||
"host": host,
|
||||
"instance_type": dict(inst_type),
|
||||
"user_data": "",
|
||||
"reservation_id": reservation_id,
|
||||
"mac_address": "",
|
||||
"scheduled_at": utils.utcnow(),
|
||||
"launched_at": utils.utcnow(),
|
||||
"terminated_at": utils.utcnow(),
|
||||
"availability_zone": "",
|
||||
"display_name": server_name,
|
||||
"display_description": "",
|
||||
"locked": False,
|
||||
"metadata": metadata,
|
||||
"access_ip_v4": access_ipv4,
|
||||
"access_ip_v6": access_ipv6,
|
||||
"uuid": uuid,
|
||||
"progress": progress,
|
||||
"fixed_ips": fixed_ips}
|
||||
|
||||
return instance
|
||||
|
||||
|
||||
def fake_compute_api(cls, req, id):
|
||||
return True
|
||||
|
||||
@ -845,7 +744,7 @@ class ServersControllerTest(test.TestCase):
|
||||
server_uuid = str(utils.gen_uuid())
|
||||
|
||||
def fake_get_all(compute_self, context, search_opts=None):
|
||||
return [stub_instance(100, uuid=server_uuid)]
|
||||
return [fakes.stub_instance(100, uuid=server_uuid)]
|
||||
|
||||
self.stubs.Set(nova.compute.API, 'get_all', fake_get_all)
|
||||
|
||||
@ -862,7 +761,7 @@ class ServersControllerTest(test.TestCase):
|
||||
self.assertNotEqual(search_opts, None)
|
||||
self.assertTrue('image' in search_opts)
|
||||
self.assertEqual(search_opts['image'], '12345')
|
||||
return [stub_instance(100, uuid=server_uuid)]
|
||||
return [fakes.stub_instance(100, uuid=server_uuid)]
|
||||
|
||||
self.stubs.Set(nova.compute.API, 'get_all', fake_get_all)
|
||||
self.flags(allow_admin_api=False)
|
||||
@ -878,7 +777,7 @@ class ServersControllerTest(test.TestCase):
|
||||
self.assertNotEqual(filters, None)
|
||||
self.assertEqual(filters['project_id'], 'fake')
|
||||
self.assertFalse(filters.get('tenant_id'))
|
||||
return [stub_instance(100)]
|
||||
return [fakes.stub_instance(100)]
|
||||
|
||||
self.stubs.Set(nova.db, 'instance_get_all_by_filters',
|
||||
fake_get_all)
|
||||
@ -898,7 +797,7 @@ class ServersControllerTest(test.TestCase):
|
||||
self.assertTrue('flavor' in search_opts)
|
||||
# flavor is an integer ID
|
||||
self.assertEqual(search_opts['flavor'], '12345')
|
||||
return [stub_instance(100, uuid=server_uuid)]
|
||||
return [fakes.stub_instance(100, uuid=server_uuid)]
|
||||
|
||||
self.stubs.Set(nova.compute.API, 'get_all', fake_get_all)
|
||||
self.flags(allow_admin_api=False)
|
||||
@ -916,7 +815,7 @@ class ServersControllerTest(test.TestCase):
|
||||
self.assertNotEqual(search_opts, None)
|
||||
self.assertTrue('vm_state' in search_opts)
|
||||
self.assertEqual(search_opts['vm_state'], vm_states.ACTIVE)
|
||||
return [stub_instance(100, uuid=server_uuid)]
|
||||
return [fakes.stub_instance(100, uuid=server_uuid)]
|
||||
|
||||
self.stubs.Set(nova.compute.API, 'get_all', fake_get_all)
|
||||
self.flags(allow_admin_api=False)
|
||||
@ -940,7 +839,7 @@ class ServersControllerTest(test.TestCase):
|
||||
self.assertNotEqual(search_opts, None)
|
||||
self.assertTrue('name' in search_opts)
|
||||
self.assertEqual(search_opts['name'], 'whee.*')
|
||||
return [stub_instance(100, uuid=server_uuid)]
|
||||
return [fakes.stub_instance(100, uuid=server_uuid)]
|
||||
|
||||
self.stubs.Set(nova.compute.API, 'get_all', fake_get_all)
|
||||
self.flags(allow_admin_api=False)
|
||||
@ -960,7 +859,7 @@ class ServersControllerTest(test.TestCase):
|
||||
changes_since = datetime.datetime(2011, 1, 24, 17, 8, 1)
|
||||
self.assertEqual(search_opts['changes-since'], changes_since)
|
||||
self.assertTrue('deleted' not in search_opts)
|
||||
return [stub_instance(100, uuid=server_uuid)]
|
||||
return [fakes.stub_instance(100, uuid=server_uuid)]
|
||||
|
||||
self.stubs.Set(nova.compute.API, 'get_all', fake_get_all)
|
||||
|
||||
@ -995,7 +894,7 @@ class ServersControllerTest(test.TestCase):
|
||||
# Allowed only by admins with admin API on
|
||||
self.assertFalse('ip' in search_opts)
|
||||
self.assertFalse('unknown_option' in search_opts)
|
||||
return [stub_instance(100, uuid=server_uuid)]
|
||||
return [fakes.stub_instance(100, uuid=server_uuid)]
|
||||
|
||||
self.stubs.Set(nova.compute.API, 'get_all', fake_get_all)
|
||||
|
||||
@ -1027,7 +926,7 @@ class ServersControllerTest(test.TestCase):
|
||||
# Allowed only by admins with admin API on
|
||||
self.assertFalse('ip' in search_opts)
|
||||
self.assertFalse('unknown_option' in search_opts)
|
||||
return [stub_instance(100, uuid=server_uuid)]
|
||||
return [fakes.stub_instance(100, uuid=server_uuid)]
|
||||
|
||||
self.stubs.Set(nova.compute.API, 'get_all', fake_get_all)
|
||||
|
||||
@ -1057,7 +956,7 @@ class ServersControllerTest(test.TestCase):
|
||||
# Allowed only by admins with admin API on
|
||||
self.assertTrue('ip' in search_opts)
|
||||
self.assertTrue('unknown_option' in search_opts)
|
||||
return [stub_instance(100, uuid=server_uuid)]
|
||||
return [fakes.stub_instance(100, uuid=server_uuid)]
|
||||
|
||||
self.stubs.Set(nova.compute.API, 'get_all', fake_get_all)
|
||||
|
||||
@ -1081,7 +980,7 @@ class ServersControllerTest(test.TestCase):
|
||||
self.assertNotEqual(search_opts, None)
|
||||
self.assertTrue('ip' in search_opts)
|
||||
self.assertEqual(search_opts['ip'], '10\..*')
|
||||
return [stub_instance(100, uuid=server_uuid)]
|
||||
return [fakes.stub_instance(100, uuid=server_uuid)]
|
||||
|
||||
self.stubs.Set(nova.compute.API, 'get_all', fake_get_all)
|
||||
|
||||
@ -1104,7 +1003,7 @@ class ServersControllerTest(test.TestCase):
|
||||
self.assertNotEqual(search_opts, None)
|
||||
self.assertTrue('ip6' in search_opts)
|
||||
self.assertEqual(search_opts['ip6'], 'ffff.*')
|
||||
return [stub_instance(100, uuid=server_uuid)]
|
||||
return [fakes.stub_instance(100, uuid=server_uuid)]
|
||||
|
||||
self.stubs.Set(nova.compute.API, 'get_all', fake_get_all)
|
||||
|
||||
@ -1246,7 +1145,7 @@ class ServersControllerTest(test.TestCase):
|
||||
'''
|
||||
|
||||
def return_servers_with_host(context, *args, **kwargs):
|
||||
return [stub_instance(i, 'fake', 'fake', i % 2,
|
||||
return [fakes.stub_instance(i, 'fake', 'fake', i % 2,
|
||||
uuid=get_fake_uuid(i))
|
||||
for i in xrange(5)]
|
||||
|
||||
@ -2386,59 +2285,18 @@ class ServersViewBuilderTest(test.TestCase):
|
||||
def setUp(self):
|
||||
super(ServersViewBuilderTest, self).setUp()
|
||||
self.flags(use_ipv6=True)
|
||||
self.instance = self._get_instance()
|
||||
self.instance = fakes.stub_instance(
|
||||
id=1,
|
||||
image_ref="5",
|
||||
uuid="deadbeef-feed-edee-beef-d0ea7beefedd",
|
||||
display_name="test_server",
|
||||
public_ips=["192.168.0.3"],
|
||||
private_ips=["172.19.0.1"],
|
||||
include_fake_metadata=False)
|
||||
|
||||
self.uuid = self.instance['uuid']
|
||||
self.view_builder = self._get_view_builder()
|
||||
|
||||
def _get_instance(self):
|
||||
created_at = datetime.datetime(2010, 10, 10, 12, 0, 0)
|
||||
updated_at = datetime.datetime(2010, 11, 11, 11, 0, 0)
|
||||
|
||||
public_ips = ['192.168.0.3']
|
||||
private_ips = ['172.19.0.1']
|
||||
fixed_ips = create_fixed_ips("fake", public_ips, private_ips, False)
|
||||
|
||||
instance = {
|
||||
"id": 1,
|
||||
"created_at": created_at,
|
||||
"updated_at": updated_at,
|
||||
"admin_pass": "",
|
||||
"user_id": "fake",
|
||||
"project_id": "fake",
|
||||
"image_ref": "5",
|
||||
"kernel_id": "",
|
||||
"ramdisk_id": "",
|
||||
"launch_index": 0,
|
||||
"key_name": "",
|
||||
"key_data": "",
|
||||
"vm_state": vm_states.BUILDING,
|
||||
"task_state": None,
|
||||
"memory_mb": 0,
|
||||
"vcpus": 0,
|
||||
"local_gb": 0,
|
||||
"hostname": "",
|
||||
"host": "",
|
||||
"instance_type": {
|
||||
"flavorid": '1',
|
||||
},
|
||||
"user_data": "",
|
||||
"reservation_id": "",
|
||||
"mac_address": "",
|
||||
"scheduled_at": utils.utcnow(),
|
||||
"launched_at": utils.utcnow(),
|
||||
"terminated_at": utils.utcnow(),
|
||||
"availability_zone": "",
|
||||
"display_name": "test_server",
|
||||
"locked": False,
|
||||
"metadata": [],
|
||||
"accessIPv4": "1.2.3.4",
|
||||
"accessIPv6": "fead::1234",
|
||||
"uuid": "deadbeef-feed-edee-beef-d0ea7beefedd",
|
||||
"progress": 0,
|
||||
"fixed_ips": fixed_ips}
|
||||
|
||||
return instance
|
||||
|
||||
def _get_view_builder(self, project_id=""):
|
||||
base_url = "http://localhost/v1.1"
|
||||
views = nova.api.openstack.views
|
||||
|
@ -1124,9 +1124,9 @@ class HostStateTestCase(test.TestCase):
|
||||
self.assertEquals(stats['host_memory_free_computed'], 40)
|
||||
|
||||
|
||||
class XenAPIManagedDiskTestCase(test.TestCase):
|
||||
class XenAPIAutoDiskConfigTestCase(test.TestCase):
|
||||
def setUp(self):
|
||||
super(XenAPIManagedDiskTestCase, self).setUp()
|
||||
super(XenAPIAutoDiskConfigTestCase, self).setUp()
|
||||
self.stubs = stubout.StubOutForTesting()
|
||||
self.flags(target_host='127.0.0.1',
|
||||
xenapi_connection_url='test_url',
|
||||
@ -1181,15 +1181,17 @@ class XenAPIManagedDiskTestCase(test.TestCase):
|
||||
|
||||
self.assertEqual(marker["partition_called"], called)
|
||||
|
||||
def test_instance_not_managed(self):
|
||||
"""Should not partition unless instance is marked as managed_disk"""
|
||||
self.instance_values['managed_disk'] = False
|
||||
def test_instance_not_auto_disk_config(self):
|
||||
"""Should not partition unless instance is marked as
|
||||
auto_disk_config.
|
||||
"""
|
||||
self.instance_values['auto_disk_config'] = False
|
||||
self.assertIsPartitionCalled(False)
|
||||
|
||||
@stub_vm_utils_with_vdi_attached_here
|
||||
def test_instance_managed_doesnt_pass_fail_safes(self):
|
||||
def test_instance_auto_disk_config_doesnt_pass_fail_safes(self):
|
||||
"""Should not partition unless fail safes pass"""
|
||||
self.instance_values['managed_disk'] = True
|
||||
self.instance_values['auto_disk_config'] = True
|
||||
|
||||
@classmethod
|
||||
def fake_resize_partition_allowed(cls, dev_path, partition_path):
|
||||
@ -1201,11 +1203,11 @@ class XenAPIManagedDiskTestCase(test.TestCase):
|
||||
self.assertIsPartitionCalled(False)
|
||||
|
||||
@stub_vm_utils_with_vdi_attached_here
|
||||
def test_instance_managed_passes_fail_safes(self):
|
||||
"""Should partition if instance is marked as managed_disk=True and
|
||||
def test_instance_auto_disk_config_passes_fail_safes(self):
|
||||
"""Should partition if instance is marked as auto_disk_config=True and
|
||||
virt-layer specific fail-safe checks pass.
|
||||
"""
|
||||
self.instance_values['managed_disk'] = True
|
||||
self.instance_values['auto_disk_config'] = True
|
||||
|
||||
@classmethod
|
||||
def fake_resize_partition_allowed(cls, dev_path, partition_path):
|
||||
|
@ -359,8 +359,7 @@ class VMHelper(HelperBase):
|
||||
return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid)
|
||||
|
||||
@classmethod
|
||||
def upload_image(cls, context, session, instance, vdi_uuids, image_id,
|
||||
options=None):
|
||||
def upload_image(cls, context, session, instance, vdi_uuids, image_id):
|
||||
""" Requests that the Glance plugin bundle the specified VDIs and
|
||||
push them into Glance using the specified human-friendly name.
|
||||
"""
|
||||
@ -369,26 +368,28 @@ class VMHelper(HelperBase):
|
||||
logging.debug(_("Asking xapi to upload %(vdi_uuids)s as"
|
||||
" ID %(image_id)s") % locals())
|
||||
|
||||
os_type = instance.os_type or FLAGS.default_os_type
|
||||
|
||||
glance_host, glance_port = glance.pick_glance_api_server()
|
||||
|
||||
properties = {}
|
||||
properties['auto_disk_config'] = instance.auto_disk_config
|
||||
properties['os_type'] = instance.os_type or FLAGS.default_os_type
|
||||
|
||||
params = {'vdi_uuids': vdi_uuids,
|
||||
'image_id': image_id,
|
||||
'glance_host': glance_host,
|
||||
'glance_port': glance_port,
|
||||
'sr_path': cls.get_sr_path(session),
|
||||
'os_type': os_type,
|
||||
'auth_token': getattr(context, 'auth_token', None),
|
||||
'options': options}
|
||||
'properties': properties}
|
||||
|
||||
kwargs = {'params': pickle.dumps(params)}
|
||||
task = session.async_call_plugin('glance', 'upload_vhd', kwargs)
|
||||
session.wait_for_task(task, instance.id)
|
||||
|
||||
@classmethod
|
||||
def create_managed_disk(cls, session, vdi_ref):
|
||||
"""A 'managed disk' means that we'll resize the partition and fs to
|
||||
match the size specified by instance_types.local_gb.
|
||||
def auto_configure_disk(cls, session, vdi_ref):
|
||||
"""Partition and resize FS to match the size specified by
|
||||
instance_types.local_gb.
|
||||
"""
|
||||
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
|
||||
dev_path = utils.make_dev_path(dev)
|
||||
@ -401,12 +402,12 @@ class VMHelper(HelperBase):
|
||||
"""Determine whether we should resize the partition and the fs.
|
||||
|
||||
This is a fail-safe to prevent accidentally destroying data on a disk
|
||||
erroneously marked as managed_disk=True.
|
||||
erroneously marked as auto_disk_config=True.
|
||||
|
||||
The criteria for allowing resize are:
|
||||
|
||||
1. 'managed_disk' must be true for the instance (and image). (If
|
||||
we've made it here, then managed_disk=True.)
|
||||
1. 'auto_disk_config' must be true for the instance (and image).
|
||||
(If we've made it here, then auto_disk_config=True.)
|
||||
|
||||
2. The disk must have only one partition.
|
||||
|
||||
@ -504,7 +505,7 @@ w
|
||||
|
||||
# 4. Create VBD between instance VM and swap VDI
|
||||
volume_utils.VolumeHelper.create_vbd(
|
||||
session, vm_ref, vdi_ref, userdevice, bootable=False)
|
||||
session, vm_ref, vdi_ref, userdevice, bootable=False)
|
||||
except:
|
||||
with utils.save_and_reraise_exception():
|
||||
cls.destroy_vdi(session, vdi_ref)
|
||||
@ -1211,7 +1212,7 @@ def _wait_for_device(dev):
|
||||
time.sleep(1)
|
||||
|
||||
raise volume_utils.StorageError(
|
||||
_('Timeout waiting for device %s to be created') % dev)
|
||||
_('Timeout waiting for device %s to be created') % dev)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
|
@ -342,15 +342,12 @@ class VMOps(object):
|
||||
# set user device to next free value
|
||||
userdevice += 1
|
||||
else:
|
||||
if instance.managed_disk:
|
||||
LOG.debug(_("Managed disk set for instance %(instance_id)s,"
|
||||
" attempting to resize partition") % locals())
|
||||
VMHelper.create_managed_disk(session=self._session,
|
||||
if instance.auto_disk_config:
|
||||
LOG.debug(_("Auto configuring disk for instance"
|
||||
" %(instance_id)s, attempting to"
|
||||
" resize partition...") % locals())
|
||||
VMHelper.auto_configure_disk(session=self._session,
|
||||
vdi_ref=first_vdi_ref)
|
||||
else:
|
||||
LOG.debug(_("Managed disk NOT set for instance"
|
||||
" %(instance_id)s, skipping resize partition")
|
||||
% locals())
|
||||
|
||||
VolumeHelper.create_vbd(session=self._session, vm_ref=vm_ref,
|
||||
vdi_ref=first_vdi_ref,
|
||||
@ -590,16 +587,12 @@ class VMOps(object):
|
||||
|
||||
"""
|
||||
template_vm_ref = None
|
||||
options = None
|
||||
if instance['managed_disk']:
|
||||
options = {'managed_disk': instance['managed_disk']}
|
||||
try:
|
||||
template_vm_ref, template_vdi_uuids =\
|
||||
self._create_snapshot(instance)
|
||||
# call plugin to ship snapshot off to glance
|
||||
VMHelper.upload_image(context,
|
||||
self._session, instance, template_vdi_uuids, image_id,
|
||||
options)
|
||||
self._session, instance, template_vdi_uuids, image_id)
|
||||
finally:
|
||||
if template_vm_ref:
|
||||
self._destroy(instance, template_vm_ref,
|
||||
|
@ -307,8 +307,8 @@ def _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids):
|
||||
os.link(source, link_name)
|
||||
|
||||
|
||||
def _upload_tarball(staging_path, image_id, glance_host, glance_port, os_type,
|
||||
auth_token, options):
|
||||
def _upload_tarball(staging_path, image_id, glance_host, glance_port,
|
||||
auth_token, properties):
|
||||
"""
|
||||
Create a tarball of the image and then stream that into Glance
|
||||
using chunked-transfer-encoded HTTP.
|
||||
@ -352,11 +352,11 @@ def _upload_tarball(staging_path, image_id, glance_host, glance_port, os_type,
|
||||
'x-image-meta-is-public': 'False',
|
||||
'x-image-meta-status': 'queued',
|
||||
'x-image-meta-disk-format': 'vhd',
|
||||
'x-image-meta-container-format': 'ovf',
|
||||
'x-image-meta-property-os-type': os_type}
|
||||
'x-image-meta-container-format': 'ovf'}
|
||||
|
||||
if options and options.get('managed_disk'):
|
||||
headers['x-image-meta-property-managed-disk'] = options['managed_disk']
|
||||
for key, value in properties.items():
|
||||
header_key = "x-image-meta-property-%s" % key.replace('_', '-')
|
||||
headers[header_key] = str(value)
|
||||
|
||||
# If we have an auth_token, set an x-auth-token header
|
||||
if auth_token:
|
||||
@ -491,15 +491,14 @@ def upload_vhd(session, args):
|
||||
glance_host = params["glance_host"]
|
||||
glance_port = params["glance_port"]
|
||||
sr_path = params["sr_path"]
|
||||
os_type = params["os_type"]
|
||||
auth_token = params["auth_token"]
|
||||
options = params["options"]
|
||||
properties = params["properties"]
|
||||
|
||||
staging_path = _make_staging_area(sr_path)
|
||||
try:
|
||||
_prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids)
|
||||
_upload_tarball(staging_path, image_id, glance_host, glance_port,
|
||||
os_type, auth_token, options)
|
||||
auth_token, properties)
|
||||
finally:
|
||||
_cleanup_staging_area(staging_path)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user