Remove API v1
The v1 API has been deprecated for many releases now. We have not been able to remove it due to SDKs and tooling being slow to update. This is the latest attempt to see if it has been long enough. Change-Id: I03bf2db5bd7e2fdfb4f6032758ccaf2b348a82ba
This commit is contained in:
parent
55e2934722
commit
3e91de956e
@ -1,8 +1,11 @@
|
||||
:tocdepth: 2
|
||||
|
||||
=================================
|
||||
Block Storage API V1 (DEPRECATED)
|
||||
=================================
|
||||
=============================
|
||||
Block Storage API V1 (LEGACY)
|
||||
=============================
|
||||
|
||||
The v1 API was removed in the Queens release. API reference is included here
|
||||
for historical reference.
|
||||
|
||||
.. rest_expand_all::
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
"id": "2ef47aee-8844-490c-804d-2a8efe561c65",
|
||||
"links": [
|
||||
{
|
||||
"href": "http://localhost:8776/v1/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65",
|
||||
"href": "http://localhost:8776/v2/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65",
|
||||
"rel": "self"
|
||||
},
|
||||
{
|
||||
|
@ -9,7 +9,7 @@
|
||||
"id": "2ef47aee-8844-490c-804d-2a8efe561c65",
|
||||
"links": [
|
||||
{
|
||||
"href": "http://localhost:8776/v1/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65",
|
||||
"href": "http://localhost:8776/v2/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65",
|
||||
"rel": "self"
|
||||
},
|
||||
{
|
||||
@ -34,7 +34,7 @@
|
||||
"id": "4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8",
|
||||
"links": [
|
||||
{
|
||||
"href": "http://localhost:8776/v1/c95fc3e4afe248a49a28828f286a7b38/backups/4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8",
|
||||
"href": "http://localhost:8776/v2/c95fc3e4afe248a49a28828f286a7b38/backups/4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8",
|
||||
"rel": "self"
|
||||
},
|
||||
{
|
||||
|
@ -4,7 +4,7 @@
|
||||
"id": "2ef47aee-8844-490c-804d-2a8efe561c65",
|
||||
"links": [
|
||||
{
|
||||
"href": "http://localhost:8776/v1/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65",
|
||||
"href": "http://localhost:8776/v2/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65",
|
||||
"rel": "self"
|
||||
},
|
||||
{
|
||||
@ -18,7 +18,7 @@
|
||||
"id": "4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8",
|
||||
"links": [
|
||||
{
|
||||
"href": "http://localhost:8776/v1/c95fc3e4afe248a49a28828f286a7b38/backups/4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8",
|
||||
"href": "http://localhost:8776/v2/c95fc3e4afe248a49a28828f286a7b38/backups/4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8",
|
||||
"rel": "self"
|
||||
},
|
||||
{
|
||||
|
@ -142,7 +142,7 @@
|
||||
"links": [],
|
||||
"namespace": "http://docs.openstack.org/volume/ext/image-create/api/v1",
|
||||
"alias": "os-image-create",
|
||||
"description": "Allow creating a volume from an image in the Create Volume v1 API."
|
||||
"description": "Allow creating a volume from an image in the Create Volume API."
|
||||
},
|
||||
{
|
||||
"updated": "2014-01-10T00:00:00-00:00",
|
||||
|
@ -8,22 +8,6 @@
|
||||
"type": "application/vnd.openstack.volume+json;version=1"
|
||||
}
|
||||
],
|
||||
"id": "v1.0",
|
||||
"links": [
|
||||
{
|
||||
"href": "http://23.253.248.171:8776/v1/v2.json",
|
||||
"rel": "self"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"status": "CURRENT",
|
||||
"media-types": [
|
||||
{
|
||||
"base": "application/json",
|
||||
"type": "application/vnd.openstack.volume+json;version=1"
|
||||
}
|
||||
],
|
||||
"id": "v2.0",
|
||||
"links": [
|
||||
{
|
||||
|
@ -1,29 +1,5 @@
|
||||
{
|
||||
"versions": [
|
||||
{
|
||||
"status": "DEPRECATED",
|
||||
"updated": "2014-06-28T12:20:21Z",
|
||||
"links": [
|
||||
{
|
||||
"href": "http://docs.openstack.org/",
|
||||
"type": "text/html",
|
||||
"rel": "describedby"
|
||||
},
|
||||
{
|
||||
"href": "http://10.0.2.15:8776/v1/",
|
||||
"rel": "self"
|
||||
}
|
||||
],
|
||||
"min_version": "",
|
||||
"version": "",
|
||||
"media-types": [
|
||||
{
|
||||
"base": "application/json",
|
||||
"type": "application/vnd.openstack.volume+json;version=1"
|
||||
}
|
||||
],
|
||||
"id": "v1.0"
|
||||
},
|
||||
{
|
||||
"status": "SUPPORTED",
|
||||
"updated": "2014-06-28T12:20:21Z",
|
||||
|
@ -8,7 +8,7 @@
|
||||
"id": "2ef47aee-8844-490c-804d-2a8efe561c65",
|
||||
"links": [
|
||||
{
|
||||
"href": "http://localhost:8776/v1/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65",
|
||||
"href": "http://localhost:8776/v3/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65",
|
||||
"rel": "self"
|
||||
},
|
||||
{
|
||||
|
@ -9,7 +9,7 @@
|
||||
"id": "2ef47aee-8844-490c-804d-2a8efe561c65",
|
||||
"links": [
|
||||
{
|
||||
"href": "http://localhost:8776/v1/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65",
|
||||
"href": "http://localhost:8776/v3/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65",
|
||||
"rel": "self"
|
||||
},
|
||||
{
|
||||
@ -36,7 +36,7 @@
|
||||
"id": "4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8",
|
||||
"links": [
|
||||
{
|
||||
"href": "http://localhost:8776/v1/c95fc3e4afe248a49a28828f286a7b38/backups/4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8",
|
||||
"href": "http://localhost:8776/v3/c95fc3e4afe248a49a28828f286a7b38/backups/4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8",
|
||||
"rel": "self"
|
||||
},
|
||||
{
|
||||
|
@ -4,7 +4,7 @@
|
||||
"id": "2ef47aee-8844-490c-804d-2a8efe561c65",
|
||||
"links": [
|
||||
{
|
||||
"href": "http://localhost:8776/v1/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65",
|
||||
"href": "http://localhost:8776/v3/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65",
|
||||
"rel": "self"
|
||||
},
|
||||
{
|
||||
@ -18,7 +18,7 @@
|
||||
"id": "4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8",
|
||||
"links": [
|
||||
{
|
||||
"href": "http://localhost:8776/v1/c95fc3e4afe248a49a28828f286a7b38/backups/4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8",
|
||||
"href": "http://localhost:8776/v3/c95fc3e4afe248a49a28828f286a7b38/backups/4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8",
|
||||
"rel": "self"
|
||||
},
|
||||
{
|
||||
|
@ -142,7 +142,7 @@
|
||||
"links": [],
|
||||
"namespace": "http://docs.openstack.org/volume/ext/image-create/api/v1",
|
||||
"alias": "os-image-create",
|
||||
"description": "Allow creating a volume from an image in the Create Volume v1 API."
|
||||
"description": "Allow creating a volume from an image in the Create Volume API."
|
||||
},
|
||||
{
|
||||
"updated": "2014-01-10T00:00:00-00:00",
|
||||
|
@ -1,29 +1,5 @@
|
||||
{
|
||||
"versions": [
|
||||
{
|
||||
"id": "v1.0",
|
||||
"links": [
|
||||
{
|
||||
"href": "http://docs.openstack.org/",
|
||||
"rel": "describedby",
|
||||
"type": "text/html"
|
||||
},
|
||||
{
|
||||
"href": "http://23.253.248.171:8776/v1/",
|
||||
"rel": "self"
|
||||
}
|
||||
],
|
||||
"media-types": [
|
||||
{
|
||||
"base": "application/json",
|
||||
"type": "application/vnd.openstack.volume+json;version=1"
|
||||
}
|
||||
],
|
||||
"min_version": "",
|
||||
"status": "DEPRECATED",
|
||||
"updated": "2014-06-28T12:20:21Z",
|
||||
"version": ""
|
||||
},
|
||||
{
|
||||
"id": "v2.0",
|
||||
"links": [
|
||||
|
@ -1,29 +1,5 @@
|
||||
{
|
||||
"versions": [
|
||||
{
|
||||
"status": "DEPRECATED",
|
||||
"updated": "2014-06-28T12:20:21Z",
|
||||
"links": [
|
||||
{
|
||||
"href": "http://docs.openstack.org/",
|
||||
"type": "text/html",
|
||||
"rel": "describedby"
|
||||
},
|
||||
{
|
||||
"href": "http://10.0.2.15:8776/v1/",
|
||||
"rel": "self"
|
||||
}
|
||||
],
|
||||
"min_version": "",
|
||||
"version": "",
|
||||
"media-types": [
|
||||
{
|
||||
"base": "application/json",
|
||||
"type": "application/vnd.openstack.volume+json;version=1"
|
||||
}
|
||||
],
|
||||
"id": "v1.0"
|
||||
},
|
||||
{
|
||||
"status": "SUPPORTED",
|
||||
"updated": "2014-06-28T12:20:21Z",
|
||||
|
@ -25,12 +25,19 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def root_app_factory(loader, global_conf, **local_conf):
|
||||
if CONF.enable_v1_api:
|
||||
LOG.warning('The v1 API is deprecated and is not under active '
|
||||
'development. You should set enable_v1_api=false '
|
||||
# To support upgrades from previous api-paste config files, we need
|
||||
# to check for and remove any legacy references to the v1 API
|
||||
if '/v1' in local_conf:
|
||||
LOG.warning('The v1 API has been removed and is no longer '
|
||||
'available. Client applications should now be '
|
||||
'moving to v3. Ensure enable_v3_api=true in your '
|
||||
'cinder.conf file.')
|
||||
del local_conf['/v1']
|
||||
|
||||
if CONF.enable_v2_api:
|
||||
LOG.warning('The v2 API is deprecated and is not under active '
|
||||
'development. You should set enable_v2_api=false '
|
||||
'and enable_v3_api=true in your cinder.conf file.')
|
||||
else:
|
||||
del local_conf['/v1']
|
||||
if not CONF.enable_v2_api:
|
||||
del local_conf['/v2']
|
||||
return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf)
|
||||
|
@ -112,7 +112,7 @@ REST_API_VERSION_HISTORY = """
|
||||
# The minimum and maximum versions of the API supported
|
||||
# The default api version request is defined to be the
|
||||
# minimum version of the API supported.
|
||||
# Explicitly using /v1 or /v2 endpoints will still work
|
||||
# Explicitly using /v2 endpoints will still work
|
||||
_MIN_API_VERSION = "3.0"
|
||||
_MAX_API_VERSION = "3.44"
|
||||
_LEGACY_API_VERSION1 = "1.0"
|
||||
|
@ -283,10 +283,10 @@ class Request(webob.Request):
|
||||
|
||||
Microversions starts with /v3, so if a client sends a request for
|
||||
version 1.0 or 2.0 with the /v3 endpoint, throw an exception.
|
||||
Sending a header with any microversion to a /v1 or /v2 endpoint will
|
||||
Sending a header with any microversion to a /v2 endpoint will
|
||||
be ignored.
|
||||
Note that a microversion must be set for the legacy endpoints. This
|
||||
will appear as 1.0 and 2.0 for /v1 and /v2.
|
||||
Note that a microversion must be set for the legacy endpoint. This
|
||||
will appear as 2.0 for /v2.
|
||||
"""
|
||||
if API_VERSION_REQUEST_HEADER in self.headers and 'v3' in url:
|
||||
hdr_string = self.headers[API_VERSION_REQUEST_HEADER]
|
||||
@ -318,9 +318,7 @@ class Request(webob.Request):
|
||||
max_ver=api_version.max_api_version().get_string())
|
||||
|
||||
else:
|
||||
if 'v1' in url:
|
||||
self.api_version_request = api_version.legacy_api_version1()
|
||||
elif 'v2' in url:
|
||||
if 'v2' in url:
|
||||
self.api_version_request = api_version.legacy_api_version2()
|
||||
else:
|
||||
self.api_version_request = api_version.APIVersionRequest(
|
||||
|
@ -1,92 +0,0 @@
|
||||
# Copyright 2011 OpenStack Foundation
|
||||
# Copyright 2011 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
WSGI middleware for OpenStack Volume API.
|
||||
"""
|
||||
|
||||
from cinder.api import extensions
|
||||
import cinder.api.openstack
|
||||
from cinder.api.v1 import snapshots
|
||||
from cinder.api.v1 import volumes
|
||||
from cinder.api.v2 import limits
|
||||
from cinder.api.v2 import snapshot_metadata
|
||||
from cinder.api.v2 import types
|
||||
from cinder.api.v2 import volume_metadata
|
||||
from cinder.api import versions
|
||||
|
||||
|
||||
class APIRouter(cinder.api.openstack.APIRouter):
|
||||
"""Routes requests on the API to the appropriate controller and method."""
|
||||
ExtensionManager = extensions.ExtensionManager
|
||||
|
||||
def _setup_routes(self, mapper, ext_mgr):
|
||||
self.resources['versions'] = versions.create_resource()
|
||||
mapper.connect("versions", "/",
|
||||
controller=self.resources['versions'],
|
||||
action='index')
|
||||
|
||||
mapper.redirect("", "/")
|
||||
|
||||
self.resources['volumes'] = volumes.create_resource(ext_mgr)
|
||||
mapper.resource("volume", "volumes",
|
||||
controller=self.resources['volumes'],
|
||||
collection={'detail': 'GET'},
|
||||
member={'action': 'POST'})
|
||||
|
||||
self.resources['types'] = types.create_resource()
|
||||
mapper.resource("type", "types",
|
||||
controller=self.resources['types'])
|
||||
|
||||
self.resources['snapshots'] = snapshots.create_resource(ext_mgr)
|
||||
mapper.resource("snapshot", "snapshots",
|
||||
controller=self.resources['snapshots'],
|
||||
collection={'detail': 'GET'},
|
||||
member={'action': 'POST'})
|
||||
|
||||
self.resources['snapshot_metadata'] = \
|
||||
snapshot_metadata.create_resource()
|
||||
snapshot_metadata_controller = self.resources['snapshot_metadata']
|
||||
|
||||
mapper.resource("snapshot_metadata", "metadata",
|
||||
controller=snapshot_metadata_controller,
|
||||
parent_resource=dict(member_name='snapshot',
|
||||
collection_name='snapshots'))
|
||||
|
||||
mapper.connect("metadata",
|
||||
"/{project_id}/snapshots/{snapshot_id}/metadata",
|
||||
controller=snapshot_metadata_controller,
|
||||
action='update_all',
|
||||
conditions={"method": ['PUT']})
|
||||
|
||||
self.resources['limits'] = limits.create_resource()
|
||||
mapper.resource("limit", "limits",
|
||||
controller=self.resources['limits'])
|
||||
self.resources['volume_metadata'] = \
|
||||
volume_metadata.create_resource()
|
||||
volume_metadata_controller = self.resources['volume_metadata']
|
||||
|
||||
mapper.resource("volume_metadata", "metadata",
|
||||
controller=volume_metadata_controller,
|
||||
parent_resource=dict(member_name='volume',
|
||||
collection_name='volumes'))
|
||||
|
||||
mapper.connect("metadata",
|
||||
"/{project_id}/volumes/{volume_id}/metadata",
|
||||
controller=volume_metadata_controller,
|
||||
action='update_all',
|
||||
conditions={"method": ['PUT']})
|
@ -1,105 +0,0 @@
|
||||
# Copyright 2011 Justin Santa Barbara
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""The volumes snapshots api."""
|
||||
|
||||
|
||||
from six.moves import http_client
|
||||
from webob import exc
|
||||
|
||||
from cinder.api.openstack import wsgi
|
||||
from cinder.api.v2 import snapshots as snapshots_v2
|
||||
|
||||
|
||||
def _snapshot_v2_to_v1(snapv2_result):
|
||||
"""Transform a v2 snapshot dict to v1."""
|
||||
snapshots = snapv2_result.get('snapshots')
|
||||
if snapshots is None:
|
||||
snapshots = [snapv2_result['snapshot']]
|
||||
|
||||
for snapv1 in snapshots:
|
||||
# The updated_at property was added in v2
|
||||
snapv1.pop('updated_at', None)
|
||||
|
||||
# Name and description were renamed
|
||||
snapv1['display_name'] = snapv1.pop('name', '')
|
||||
snapv1['display_description'] = snapv1.pop('description', '')
|
||||
|
||||
return snapv2_result
|
||||
|
||||
|
||||
def _update_search_opts(req):
|
||||
"""Update the requested search options.
|
||||
|
||||
This is a little silly, as ``display_name`` needs to be switched
|
||||
to just ``name``, which internally to v2 gets switched to be
|
||||
``display_name``. Oh well.
|
||||
"""
|
||||
if 'display_name' in req.GET:
|
||||
req.GET['name'] = req.GET.pop('display_name')
|
||||
return req
|
||||
|
||||
|
||||
class SnapshotsController(snapshots_v2.SnapshotsController):
|
||||
"""The Snapshots API controller for the OpenStack API."""
|
||||
|
||||
def show(self, req, id):
|
||||
"""Return data about the given snapshot."""
|
||||
result = super(SnapshotsController, self).show(req, id)
|
||||
return _snapshot_v2_to_v1(result)
|
||||
|
||||
def index(self, req):
|
||||
"""Returns a summary list of snapshots."""
|
||||
return _snapshot_v2_to_v1(
|
||||
super(SnapshotsController, self).index(
|
||||
_update_search_opts(req)))
|
||||
|
||||
def detail(self, req):
|
||||
"""Returns a detailed list of snapshots."""
|
||||
return _snapshot_v2_to_v1(
|
||||
super(SnapshotsController, self).detail(
|
||||
_update_search_opts(req)))
|
||||
|
||||
@wsgi.response(http_client.OK)
|
||||
def create(self, req, body):
|
||||
"""Creates a new snapshot."""
|
||||
if (body is None or not body.get('snapshot') or
|
||||
not isinstance(body['snapshot'], dict)):
|
||||
raise exc.HTTPUnprocessableEntity()
|
||||
|
||||
if 'display_name' in body['snapshot']:
|
||||
body['snapshot']['name'] = body['snapshot'].pop('display_name')
|
||||
|
||||
if 'display_description' in body['snapshot']:
|
||||
body['snapshot']['description'] = body['snapshot'].pop(
|
||||
'display_description')
|
||||
|
||||
if 'metadata' not in body['snapshot']:
|
||||
body['snapshot']['metadata'] = {}
|
||||
|
||||
return _snapshot_v2_to_v1(
|
||||
super(SnapshotsController, self).create(req, body))
|
||||
|
||||
def update(self, req, id, body):
|
||||
"""Update a snapshot."""
|
||||
try:
|
||||
return _snapshot_v2_to_v1(
|
||||
super(SnapshotsController, self).update(req, id, body))
|
||||
except exc.HTTPBadRequest:
|
||||
raise exc.HTTPUnprocessableEntity()
|
||||
|
||||
|
||||
def create_resource(ext_mgr):
|
||||
return wsgi.Resource(SnapshotsController(ext_mgr))
|
@ -1,143 +0,0 @@
|
||||
# Copyright 2011 Justin Santa Barbara
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""The volumes api."""
|
||||
|
||||
from oslo_log import log as logging
|
||||
from six.moves import http_client
|
||||
from webob import exc
|
||||
|
||||
from cinder.api.openstack import wsgi
|
||||
from cinder.api.v2 import volumes as volumes_v2
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _attachment_v2_to_v1(vol):
|
||||
"""Converts v2 attachment details to v1 format."""
|
||||
d = []
|
||||
attachments = vol.pop('attachments', [])
|
||||
for attachment in attachments:
|
||||
a = {'id': attachment.get('id'),
|
||||
'attachment_id': attachment.get('attachment_id'),
|
||||
'volume_id': attachment.get('volume_id'),
|
||||
'server_id': attachment.get('server_id'),
|
||||
'host_name': attachment.get('host_name'),
|
||||
'device': attachment.get('device'),
|
||||
}
|
||||
d.append(a)
|
||||
|
||||
return d
|
||||
|
||||
|
||||
def _volume_v2_to_v1(volv2_results, image_id=None):
|
||||
"""Converts v2 volume details to v1 format."""
|
||||
volumes = volv2_results.get('volumes')
|
||||
if volumes is None:
|
||||
volumes = [volv2_results['volume']]
|
||||
|
||||
for vol in volumes:
|
||||
# Need to form the string true/false explicitly here to
|
||||
# maintain our API contract
|
||||
if vol.get('multiattach'):
|
||||
vol['multiattach'] = 'true'
|
||||
else:
|
||||
vol['multiattach'] = 'false'
|
||||
|
||||
if not vol.get('image_id') and image_id:
|
||||
vol['image_id'] = image_id
|
||||
|
||||
vol['attachments'] = _attachment_v2_to_v1(vol)
|
||||
|
||||
if not vol.get('metadata'):
|
||||
vol['metadata'] = {}
|
||||
|
||||
# Convert the name changes
|
||||
vol['display_name'] = vol.pop('name')
|
||||
vol['display_description'] = vol.pop('description', '')
|
||||
|
||||
# Remove the properties not present for v1
|
||||
vol.pop('consistencygroup_id', None)
|
||||
vol.pop('encryption_key_id', None)
|
||||
vol.pop('links', None)
|
||||
vol.pop('migration_status', None)
|
||||
vol.pop('replication_status', None)
|
||||
vol.pop('updated_at', None)
|
||||
vol.pop('user_id', None)
|
||||
|
||||
LOG.debug("vol=%s", vol)
|
||||
|
||||
return volv2_results
|
||||
|
||||
|
||||
class VolumeController(volumes_v2.VolumeController):
|
||||
"""The Volumes API controller for the OpenStack API."""
|
||||
|
||||
def show(self, req, id):
|
||||
"""Return data about the given volume."""
|
||||
return _volume_v2_to_v1(super(VolumeController, self).show(
|
||||
req, id))
|
||||
|
||||
def index(self, req):
|
||||
"""Returns a summary list of volumes."""
|
||||
|
||||
# The v1 info was much more detailed than the v2 non-detailed result
|
||||
return _volume_v2_to_v1(
|
||||
super(VolumeController, self).detail(req))
|
||||
|
||||
def detail(self, req):
|
||||
"""Returns a detailed list of volumes."""
|
||||
return _volume_v2_to_v1(
|
||||
super(VolumeController, self).detail(req))
|
||||
|
||||
@wsgi.response(http_client.OK)
|
||||
def create(self, req, body):
|
||||
"""Creates a new volume."""
|
||||
if (body is None or not body.get('volume') or
|
||||
not isinstance(body['volume'], dict)):
|
||||
raise exc.HTTPUnprocessableEntity()
|
||||
|
||||
image_id = None
|
||||
if body.get('volume'):
|
||||
image_id = body['volume'].get('imageRef')
|
||||
|
||||
try:
|
||||
return _volume_v2_to_v1(
|
||||
super(VolumeController, self).create(req, body),
|
||||
image_id=image_id)
|
||||
except exc.HTTPBadRequest as e:
|
||||
# Image failures are the only ones that actually used
|
||||
# HTTPBadRequest
|
||||
error_msg = '%s' % e
|
||||
if 'Invalid image' in error_msg:
|
||||
raise
|
||||
raise exc.HTTPUnprocessableEntity()
|
||||
|
||||
def update(self, req, id, body):
|
||||
"""Update a volume."""
|
||||
if (body is None or not body.get('volume') or
|
||||
not isinstance(body['volume'], dict)):
|
||||
raise exc.HTTPUnprocessableEntity()
|
||||
|
||||
try:
|
||||
return _volume_v2_to_v1(super(VolumeController, self).update(
|
||||
req, id, body))
|
||||
except exc.HTTPBadRequest:
|
||||
raise exc.HTTPUnprocessableEntity()
|
||||
|
||||
|
||||
def create_resource(ext_mgr):
|
||||
return wsgi.Resource(VolumeController(ext_mgr))
|
@ -34,18 +34,6 @@ _LINKS = [{
|
||||
|
||||
|
||||
_KNOWN_VERSIONS = {
|
||||
"v1.0": {
|
||||
"id": "v1.0",
|
||||
"status": "DEPRECATED",
|
||||
"version": "",
|
||||
"min_version": "",
|
||||
"updated": "2016-05-02T20:25:19Z",
|
||||
"links": _LINKS,
|
||||
"media-types": [{
|
||||
"base": "application/json",
|
||||
"type": "application/vnd.openstack.volume+json;version=1",
|
||||
}]
|
||||
},
|
||||
"v2.0": {
|
||||
"id": "v2.0",
|
||||
"status": "DEPRECATED",
|
||||
@ -97,21 +85,11 @@ class VersionsController(wsgi.Controller):
|
||||
def __init__(self):
|
||||
super(VersionsController, self).__init__(None)
|
||||
|
||||
@wsgi.Controller.api_version('1.0')
|
||||
@wsgi.Controller.api_version('2.0')
|
||||
def index(self, req): # pylint: disable=E0102
|
||||
"""Return versions supported prior to the microversions epoch."""
|
||||
builder = views_versions.get_view_builder(req)
|
||||
known_versions = copy.deepcopy(_KNOWN_VERSIONS)
|
||||
known_versions.pop('v2.0')
|
||||
known_versions.pop('v3.0')
|
||||
return builder.build_versions(known_versions)
|
||||
|
||||
@index.api_version('2.0')
|
||||
def index(self, req): # pylint: disable=E0102
|
||||
"""Return versions supported prior to the microversions epoch."""
|
||||
builder = views_versions.get_view_builder(req)
|
||||
known_versions = copy.deepcopy(_KNOWN_VERSIONS)
|
||||
known_versions.pop('v1.0')
|
||||
known_versions.pop('v3.0')
|
||||
return builder.build_versions(known_versions)
|
||||
|
||||
@ -120,12 +98,11 @@ class VersionsController(wsgi.Controller):
|
||||
"""Return versions supported after the start of microversions."""
|
||||
builder = views_versions.get_view_builder(req)
|
||||
known_versions = copy.deepcopy(_KNOWN_VERSIONS)
|
||||
known_versions.pop('v1.0')
|
||||
known_versions.pop('v2.0')
|
||||
return builder.build_versions(known_versions)
|
||||
|
||||
# NOTE (cknight): Calling the versions API without
|
||||
# /v1, /v2, or /v3 in the URL will lead to this unversioned
|
||||
# /v2 or /v3 in the URL will lead to this unversioned
|
||||
# method, which should always return info about all
|
||||
# available versions.
|
||||
@wsgi.response(http_client.MULTIPLE_CHOICES)
|
||||
|
@ -75,5 +75,5 @@ class ViewBuilder(object):
|
||||
return href
|
||||
|
||||
def _get_base_url_without_version(self):
|
||||
"""Get the base URL with out the /v1 suffix."""
|
||||
"""Get the base URL with out the /v3 suffix."""
|
||||
return re.sub('v[1-9]+/?$', '', self.base_url)
|
||||
|
@ -82,10 +82,6 @@ global_opts = [
|
||||
help='http/https timeout value for glance operations. If no '
|
||||
'value (None) is supplied here, the glanceclient default '
|
||||
'value is used.'),
|
||||
cfg.BoolOpt('enable_v1_api',
|
||||
default=False,
|
||||
deprecated_for_removal=True,
|
||||
help="DEPRECATED: Deploy v1 of the Cinder API."),
|
||||
cfg.BoolOpt('enable_v2_api',
|
||||
default=True,
|
||||
deprecated_for_removal=True,
|
||||
|
@ -21,7 +21,7 @@ from six.moves import http_client
|
||||
import webob
|
||||
|
||||
from cinder.api import extensions
|
||||
from cinder.api.v1 import router
|
||||
from cinder.api.v3 import router
|
||||
from cinder.tests.functional import functional_helpers
|
||||
|
||||
|
||||
|
@ -22,7 +22,7 @@ import webob
|
||||
|
||||
from cinder.api.openstack import api_version_request
|
||||
from cinder.api.openstack import wsgi
|
||||
from cinder.api.v1 import router
|
||||
from cinder.api.v3 import router
|
||||
from cinder.api import versions
|
||||
from cinder import exception
|
||||
from cinder import test
|
||||
@ -59,7 +59,7 @@ class VersionsControllerTestCase(test.TestCase):
|
||||
response.headers[VERSION_HEADER_NAME])
|
||||
self.assertEqual(VERSION_HEADER_NAME, response.headers['Vary'])
|
||||
|
||||
@ddt.data('1.0', '2.0', '3.0')
|
||||
@ddt.data('2.0', '3.0')
|
||||
def test_versions_root(self, version):
|
||||
req = self.build_request(base_url='http://localhost')
|
||||
|
||||
@ -69,11 +69,7 @@ class VersionsControllerTestCase(test.TestCase):
|
||||
version_list = body['versions']
|
||||
|
||||
ids = [v['id'] for v in version_list]
|
||||
self.assertEqual({'v1.0', 'v2.0', 'v3.0'}, set(ids))
|
||||
|
||||
v1 = [v for v in version_list if v['id'] == 'v1.0'][0]
|
||||
self.assertEqual('', v1.get('min_version'))
|
||||
self.assertEqual('', v1.get('version'))
|
||||
self.assertEqual({'v2.0', 'v3.0'}, set(ids))
|
||||
|
||||
v2 = [v for v in version_list if v['id'] == 'v2.0'][0]
|
||||
self.assertEqual('', v2.get('min_version'))
|
||||
@ -85,19 +81,13 @@ class VersionsControllerTestCase(test.TestCase):
|
||||
self.assertEqual(api_version_request._MIN_API_VERSION,
|
||||
v3.get('min_version'))
|
||||
|
||||
def test_versions_v1_no_header(self):
|
||||
req = self.build_request(base_url='http://localhost/v1')
|
||||
|
||||
response = req.get_response(router.APIRouter())
|
||||
self.assertEqual(http_client.OK, response.status_int)
|
||||
|
||||
def test_versions_v2_no_header(self):
|
||||
req = self.build_request(base_url='http://localhost/v2')
|
||||
|
||||
response = req.get_response(router.APIRouter())
|
||||
self.assertEqual(http_client.OK, response.status_int)
|
||||
|
||||
@ddt.data('1.0', '2.0', '3.0')
|
||||
@ddt.data('2.0', '3.0')
|
||||
def test_versions(self, version):
|
||||
req = self.build_request(
|
||||
base_url='http://localhost/v{}'.format(version[0]),
|
||||
@ -140,7 +130,7 @@ class VersionsControllerTestCase(test.TestCase):
|
||||
|
||||
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
|
||||
|
||||
@ddt.data('1.0', '2.0', '3.0')
|
||||
@ddt.data('2.0', '3.0')
|
||||
def test_versions_response_fault(self, version):
|
||||
req = self.build_request(header_version=version)
|
||||
req.api_version_request = (
|
||||
|
@ -1,618 +0,0 @@
|
||||
# Copyright 2011 Denali Systems, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ddt
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
from six.moves import http_client
|
||||
from six.moves.urllib import parse as urllib
|
||||
import webob
|
||||
|
||||
from cinder.api import common
|
||||
from cinder.api.v1 import snapshots
|
||||
from cinder import context
|
||||
from cinder import db
|
||||
from cinder import exception
|
||||
from cinder import objects
|
||||
from cinder.objects import fields
|
||||
from cinder import test
|
||||
from cinder.tests.unit.api import fakes
|
||||
from cinder.tests.unit.api.v2 import fakes as v2_fakes
|
||||
from cinder.tests.unit import fake_constants as fake
|
||||
from cinder.tests.unit import fake_snapshot
|
||||
from cinder.tests.unit import fake_volume
|
||||
from cinder.tests.unit import utils
|
||||
from cinder import volume
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
UUID = '00000000-0000-0000-0000-000000000003'
|
||||
INVALID_UUID = '00000000-0000-0000-0000-000000000004'
|
||||
|
||||
|
||||
def _get_default_snapshot_param():
|
||||
return {
|
||||
'id': UUID,
|
||||
'volume_id': fake.VOLUME_ID,
|
||||
'status': fields.SnapshotStatus.AVAILABLE,
|
||||
'volume_size': 100,
|
||||
'created_at': None,
|
||||
'updated_at': None,
|
||||
'user_id': 'bcb7746c7a41472d88a1ffac89ba6a9b',
|
||||
'project_id': '7ffe17a15c724e2aa79fc839540aec15',
|
||||
'display_name': 'Default name',
|
||||
'display_description': 'Default description',
|
||||
'deleted': None,
|
||||
'volume': {'availability_zone': 'test_zone'}
|
||||
}
|
||||
|
||||
|
||||
def fake_snapshot_delete(self, context, snapshot):
|
||||
if snapshot['id'] != UUID:
|
||||
raise exception.SnapshotNotFound(snapshot['id'])
|
||||
|
||||
|
||||
def fake_snapshot_get(self, context, snapshot_id):
|
||||
if snapshot_id != UUID:
|
||||
raise exception.SnapshotNotFound(snapshot_id)
|
||||
|
||||
param = _get_default_snapshot_param()
|
||||
return param
|
||||
|
||||
|
||||
def fake_snapshot_get_all(self, context, search_opts=None):
|
||||
param = _get_default_snapshot_param()
|
||||
return [param]
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class SnapshotApiTest(test.TestCase):
|
||||
def setUp(self):
|
||||
super(SnapshotApiTest, self).setUp()
|
||||
self.controller = snapshots.SnapshotsController()
|
||||
self.ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
|
||||
|
||||
@mock.patch(
|
||||
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
|
||||
def test_snapshot_create(self, mock_validate):
|
||||
volume = utils.create_volume(self.ctx)
|
||||
snapshot_name = 'Snapshot Test Name'
|
||||
snapshot_description = 'Snapshot Test Desc'
|
||||
snapshot = {
|
||||
"volume_id": volume.id,
|
||||
"force": False,
|
||||
"name": snapshot_name,
|
||||
"description": snapshot_description
|
||||
}
|
||||
|
||||
body = dict(snapshot=snapshot)
|
||||
req = fakes.HTTPRequest.blank('/v1/snapshots')
|
||||
resp_dict = self.controller.create(req, body)
|
||||
|
||||
self.assertIn('snapshot', resp_dict)
|
||||
self.assertEqual(snapshot_name, resp_dict['snapshot']['display_name'])
|
||||
self.assertEqual(snapshot_description,
|
||||
resp_dict['snapshot']['display_description'])
|
||||
self.assertTrue(mock_validate.called)
|
||||
self.assertNotIn('updated_at', resp_dict['snapshot'])
|
||||
db.volume_destroy(self.ctx, volume.id)
|
||||
|
||||
@ddt.data(True, 'y', 'true', 'trUE', 'yes', '1', 'on', 1, "1 ")
|
||||
def test_snapshot_create_force(self, force_param):
|
||||
volume = utils.create_volume(self.ctx, status='in-use')
|
||||
snapshot_name = 'Snapshot Test Name'
|
||||
snapshot_description = 'Snapshot Test Desc'
|
||||
snapshot = {
|
||||
"volume_id": volume.id,
|
||||
"force": force_param,
|
||||
"name": snapshot_name,
|
||||
"description": snapshot_description
|
||||
}
|
||||
body = dict(snapshot=snapshot)
|
||||
req = fakes.HTTPRequest.blank('/v1/snapshots')
|
||||
resp_dict = self.controller.create(req, body)
|
||||
|
||||
self.assertIn('snapshot', resp_dict)
|
||||
self.assertEqual(snapshot_name,
|
||||
resp_dict['snapshot']['display_name'])
|
||||
self.assertEqual(snapshot_description,
|
||||
resp_dict['snapshot']['display_description'])
|
||||
self.assertNotIn('updated_at', resp_dict['snapshot'])
|
||||
|
||||
db.volume_destroy(self.ctx, volume.id)
|
||||
|
||||
@ddt.data(False, 'n', 'false', 'falSE', 'No', '0', 'off', 0)
|
||||
def test_snapshot_create_force_failure(self, force_param):
|
||||
volume = utils.create_volume(self.ctx, status='in-use')
|
||||
snapshot_name = 'Snapshot Test Name'
|
||||
snapshot_description = 'Snapshot Test Desc'
|
||||
snapshot = {
|
||||
"volume_id": volume.id,
|
||||
"force": force_param,
|
||||
"name": snapshot_name,
|
||||
"description": snapshot_description
|
||||
}
|
||||
body = dict(snapshot=snapshot)
|
||||
req = fakes.HTTPRequest.blank('/v1/snapshots')
|
||||
self.assertRaises(exception.InvalidVolume,
|
||||
self.controller.create,
|
||||
req,
|
||||
body)
|
||||
|
||||
db.volume_destroy(self.ctx, volume.id)
|
||||
|
||||
@ddt.data("**&&^^%%$$##@@", '-1', 2, '01')
|
||||
def test_snapshot_create_invalid_force_param(self, force_param):
|
||||
volume = utils.create_volume(self.ctx, status='in-use')
|
||||
snapshot_name = 'Snapshot Test Name'
|
||||
snapshot_description = 'Snapshot Test Desc'
|
||||
|
||||
snapshot = {
|
||||
"volume_id": volume.id,
|
||||
"force": force_param,
|
||||
"name": snapshot_name,
|
||||
"description": snapshot_description
|
||||
}
|
||||
body = dict(snapshot=snapshot)
|
||||
req = fakes.HTTPRequest.blank('/v1/snapshots')
|
||||
self.assertRaises(exception.InvalidParameterValue,
|
||||
self.controller.create,
|
||||
req,
|
||||
body)
|
||||
|
||||
db.volume_destroy(self.ctx, volume.id)
|
||||
|
||||
def test_snapshot_create_without_volume_id(self):
|
||||
snapshot_name = 'Snapshot Test Name'
|
||||
snapshot_description = 'Snapshot Test Desc'
|
||||
body = {
|
||||
"snapshot": {
|
||||
"force": True,
|
||||
"name": snapshot_name,
|
||||
"description": snapshot_description
|
||||
}
|
||||
}
|
||||
req = fakes.HTTPRequest.blank('/v1/snapshots')
|
||||
self.assertRaises(webob.exc.HTTPBadRequest,
|
||||
self.controller.create, req, body)
|
||||
|
||||
@mock.patch.object(volume.api.API, "update_snapshot",
|
||||
side_effect=v2_fakes.fake_snapshot_update)
|
||||
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
|
||||
@mock.patch('cinder.db.volume_get')
|
||||
@mock.patch('cinder.objects.Snapshot.get_by_id')
|
||||
@mock.patch(
|
||||
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
|
||||
def test_snapshot_update(
|
||||
self, mock_validate, snapshot_get_by_id, volume_get_by_id,
|
||||
snapshot_metadata_get, update_snapshot):
|
||||
snapshot = {
|
||||
'id': UUID,
|
||||
'volume_id': fake.VOLUME_ID,
|
||||
'status': fields.SnapshotStatus.AVAILABLE,
|
||||
'volume_size': 100,
|
||||
'display_name': 'Default name',
|
||||
'display_description': 'Default description',
|
||||
'expected_attrs': ['metadata'],
|
||||
}
|
||||
ctx = context.RequestContext(fake.PROJECT_ID, fake.USER_ID, True)
|
||||
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
|
||||
fake_volume_obj = fake_volume.fake_volume_obj(ctx)
|
||||
snapshot_get_by_id.return_value = snapshot_obj
|
||||
volume_get_by_id.return_value = fake_volume_obj
|
||||
|
||||
updates = {
|
||||
"display_name": "Updated Test Name",
|
||||
}
|
||||
body = {"snapshot": updates}
|
||||
req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % UUID)
|
||||
res_dict = self.controller.update(req, UUID, body)
|
||||
expected = {
|
||||
'snapshot': {
|
||||
'id': UUID,
|
||||
'volume_id': fake.VOLUME_ID,
|
||||
'status': fields.SnapshotStatus.AVAILABLE,
|
||||
'size': 100,
|
||||
'created_at': None,
|
||||
'display_name': u'Updated Test Name',
|
||||
'display_description': u'Default description',
|
||||
'metadata': {},
|
||||
}
|
||||
}
|
||||
self.assertEqual(expected, res_dict)
|
||||
|
||||
def test_snapshot_update_missing_body(self):
|
||||
body = {}
|
||||
req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % UUID)
|
||||
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
|
||||
self.controller.update, req, UUID, body)
|
||||
|
||||
def test_snapshot_update_invalid_body(self):
|
||||
body = {'name': 'missing top level snapshot key'}
|
||||
req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % UUID)
|
||||
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
|
||||
self.controller.update, req, UUID, body)
|
||||
|
||||
def test_snapshot_update_not_found(self):
|
||||
self.mock_object(volume.api.API, "get_snapshot", fake_snapshot_get)
|
||||
updates = {
|
||||
"display_name": "Updated Test Name",
|
||||
}
|
||||
body = {"snapshot": updates}
|
||||
req = fakes.HTTPRequest.blank('/v1/snapshots/not-the-uuid')
|
||||
self.assertRaises(exception.SnapshotNotFound, self.controller.update,
|
||||
req, 'not-the-uuid', body)
|
||||
|
||||
@mock.patch.object(volume.api.API, "delete_snapshot",
|
||||
side_effect=v2_fakes.fake_snapshot_update)
|
||||
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
|
||||
@mock.patch('cinder.objects.Volume.get_by_id')
|
||||
@mock.patch('cinder.objects.Snapshot.get_by_id')
|
||||
def test_snapshot_delete(self, snapshot_get_by_id, volume_get_by_id,
|
||||
snapshot_metadata_get, delete_snapshot):
|
||||
snapshot = {
|
||||
'id': UUID,
|
||||
'volume_id': fake.VOLUME_ID,
|
||||
'status': fields.SnapshotStatus.AVAILABLE,
|
||||
'volume_size': 100,
|
||||
'display_name': 'Default name',
|
||||
'display_description': 'Default description',
|
||||
'expected_attrs': ['metadata'],
|
||||
}
|
||||
ctx = context.RequestContext(fake.PROJECT_ID, fake.USER_ID, True)
|
||||
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
|
||||
fake_volume_obj = fake_volume.fake_volume_obj(ctx)
|
||||
snapshot_get_by_id.return_value = snapshot_obj
|
||||
volume_get_by_id.return_value = fake_volume_obj
|
||||
|
||||
snapshot_id = UUID
|
||||
req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % snapshot_id)
|
||||
resp = self.controller.delete(req, snapshot_id)
|
||||
self.assertEqual(http_client.ACCEPTED, resp.status_int)
|
||||
|
||||
def test_snapshot_delete_invalid_id(self):
|
||||
self.mock_object(volume.api.API, "delete_snapshot",
|
||||
fake_snapshot_delete)
|
||||
snapshot_id = INVALID_UUID
|
||||
req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % snapshot_id)
|
||||
self.assertRaises(exception.SnapshotNotFound, self.controller.delete,
|
||||
req, snapshot_id)
|
||||
|
||||
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
|
||||
@mock.patch('cinder.objects.Volume.get_by_id')
|
||||
@mock.patch('cinder.objects.Snapshot.get_by_id')
|
||||
def test_snapshot_show(self, snapshot_get_by_id, volume_get_by_id,
|
||||
snapshot_metadata_get):
|
||||
snapshot = {
|
||||
'id': UUID,
|
||||
'volume_id': fake.VOLUME_ID,
|
||||
'status': fields.SnapshotStatus.AVAILABLE,
|
||||
'volume_size': 100,
|
||||
'display_name': 'Default name',
|
||||
'display_description': 'Default description',
|
||||
'expected_attrs': ['metadata'],
|
||||
}
|
||||
ctx = context.RequestContext(fake.PROJECT_ID, fake.USER_ID, True)
|
||||
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
|
||||
fake_volume_obj = fake_volume.fake_volume_obj(ctx)
|
||||
snapshot_get_by_id.return_value = snapshot_obj
|
||||
volume_get_by_id.return_value = fake_volume_obj
|
||||
req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % UUID)
|
||||
resp_dict = self.controller.show(req, UUID)
|
||||
|
||||
self.assertIn('snapshot', resp_dict)
|
||||
self.assertEqual(UUID, resp_dict['snapshot']['id'])
|
||||
self.assertNotIn('updated_at', resp_dict['snapshot'])
|
||||
|
||||
def test_snapshot_show_invalid_id(self):
|
||||
snapshot_id = INVALID_UUID
|
||||
req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % snapshot_id)
|
||||
self.assertRaises(exception.SnapshotNotFound,
|
||||
self.controller.show, req, snapshot_id)
|
||||
|
||||
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
|
||||
@mock.patch('cinder.objects.Volume.get_by_id')
|
||||
@mock.patch('cinder.objects.Snapshot.get_by_id')
|
||||
@mock.patch('cinder.volume.api.API.get_all_snapshots')
|
||||
def test_snapshot_detail(self, get_all_snapshots, snapshot_get_by_id,
|
||||
volume_get_by_id, snapshot_metadata_get):
|
||||
snapshot = {
|
||||
'id': UUID,
|
||||
'volume_id': fake.VOLUME_ID,
|
||||
'status': fields.SnapshotStatus.AVAILABLE,
|
||||
'volume_size': 100,
|
||||
'display_name': 'Default name',
|
||||
'display_description': 'Default description',
|
||||
'expected_attrs': ['metadata']
|
||||
}
|
||||
ctx = context.RequestContext(fake.PROJECT_ID, fake.USER_ID, True)
|
||||
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
|
||||
fake_volume_obj = fake_volume.fake_volume_obj(ctx)
|
||||
snapshot_get_by_id.return_value = snapshot_obj
|
||||
volume_get_by_id.return_value = fake_volume_obj
|
||||
snapshots = objects.SnapshotList(objects=[snapshot_obj])
|
||||
get_all_snapshots.return_value = snapshots
|
||||
|
||||
req = fakes.HTTPRequest.blank('/v1/snapshots/detail')
|
||||
resp_dict = self.controller.detail(req)
|
||||
|
||||
self.assertIn('snapshots', resp_dict)
|
||||
resp_snapshots = resp_dict['snapshots']
|
||||
self.assertEqual(1, len(resp_snapshots))
|
||||
self.assertNotIn('updated_at', resp_snapshots[0])
|
||||
|
||||
resp_snapshot = resp_snapshots.pop()
|
||||
self.assertEqual(UUID, resp_snapshot['id'])
|
||||
|
||||
@mock.patch.object(db, 'snapshot_get_all_by_project',
|
||||
v2_fakes.fake_snapshot_get_all_by_project)
|
||||
@mock.patch.object(db, 'snapshot_get_all',
|
||||
v2_fakes.fake_snapshot_get_all)
|
||||
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
|
||||
def test_admin_list_snapshots_limited_to_project(self,
|
||||
snapshot_metadata_get):
|
||||
req = fakes.HTTPRequest.blank('/v1/%s/snapshots' % fake.PROJECT_ID,
|
||||
use_admin_context=True)
|
||||
res = self.controller.index(req)
|
||||
|
||||
self.assertIn('snapshots', res)
|
||||
self.assertEqual(1, len(res['snapshots']))
|
||||
|
||||
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
|
||||
def test_list_snapshots_with_limit_and_offset(self,
|
||||
snapshot_metadata_get):
|
||||
def list_snapshots_with_limit_and_offset(snaps, is_admin):
|
||||
req = fakes.HTTPRequest.blank('/v1/%s/snapshots?limit=1'
|
||||
'&offset=1' % fake.PROJECT_ID,
|
||||
use_admin_context=is_admin)
|
||||
res = self.controller.index(req)
|
||||
|
||||
self.assertIn('snapshots', res)
|
||||
self.assertEqual(1, len(res['snapshots']))
|
||||
self.assertEqual(snaps[1].id, res['snapshots'][0]['id'])
|
||||
self.assertNotIn('updated_at', res['snapshots'][0])
|
||||
|
||||
# Test that we get an empty list with an offset greater than the
|
||||
# number of items
|
||||
req = fakes.HTTPRequest.blank('/v1/snapshots?limit=1&offset=3')
|
||||
self.assertEqual({'snapshots': []}, self.controller.index(req))
|
||||
|
||||
volume, snaps = self._create_db_snapshots(3)
|
||||
# admin case
|
||||
list_snapshots_with_limit_and_offset(snaps, is_admin=True)
|
||||
# non-admin case
|
||||
list_snapshots_with_limit_and_offset(snaps, is_admin=False)
|
||||
|
||||
@mock.patch.object(db, 'snapshot_get_all_by_project')
|
||||
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
|
||||
def test_list_snpashots_with_wrong_limit_and_offset(self,
|
||||
mock_metadata_get,
|
||||
mock_snapshot_get_all):
|
||||
"""Test list with negative and non numeric limit and offset."""
|
||||
mock_snapshot_get_all.return_value = []
|
||||
|
||||
# Negative limit
|
||||
req = fakes.HTTPRequest.blank('/v1/snapshots?limit=-1&offset=1')
|
||||
self.assertRaises(webob.exc.HTTPBadRequest,
|
||||
self.controller.index,
|
||||
req)
|
||||
|
||||
# Non numeric limit
|
||||
req = fakes.HTTPRequest.blank('/v1/snapshots?limit=a&offset=1')
|
||||
self.assertRaises(webob.exc.HTTPBadRequest,
|
||||
self.controller.index,
|
||||
req)
|
||||
|
||||
# Negative offset
|
||||
req = fakes.HTTPRequest.blank('/v1/snapshots?limit=1&offset=-1')
|
||||
self.assertRaises(webob.exc.HTTPBadRequest,
|
||||
self.controller.index,
|
||||
req)
|
||||
|
||||
# Non numeric offset
|
||||
req = fakes.HTTPRequest.blank('/v1/snapshots?limit=1&offset=a')
|
||||
self.assertRaises(webob.exc.HTTPBadRequest,
|
||||
self.controller.index,
|
||||
req)
|
||||
|
||||
# Test that we get an exception HTTPBadRequest(400) with an offset
|
||||
# greater than the maximum offset value.
|
||||
url = '/v1/snapshots?limit=1&offset=323245324356534235'
|
||||
req = fakes.HTTPRequest.blank(url)
|
||||
self.assertRaises(webob.exc.HTTPBadRequest,
|
||||
self.controller.index, req)
|
||||
|
||||
def _assert_list_next(self, expected_query=None, project=fake.PROJECT_ID,
|
||||
**kwargs):
|
||||
"""Check a page of snapshots list."""
|
||||
# Since we are accessing v2 api directly we don't need to specify
|
||||
# v2 in the request path, if we did, we'd get /v2/v2 links back
|
||||
request_path = '/v2/%s/snapshots' % project
|
||||
expected_path = request_path
|
||||
|
||||
# Construct the query if there are kwargs
|
||||
if kwargs:
|
||||
request_str = request_path + '?' + urllib.urlencode(kwargs)
|
||||
else:
|
||||
request_str = request_path
|
||||
|
||||
# Make the request
|
||||
req = fakes.HTTPRequest.blank(request_str)
|
||||
res = self.controller.index(req)
|
||||
|
||||
# We only expect to have a next link if there is an actual expected
|
||||
# query.
|
||||
if expected_query:
|
||||
# We must have the links
|
||||
self.assertIn('snapshots_links', res)
|
||||
links = res['snapshots_links']
|
||||
|
||||
# Must be a list of links, even if we only get 1 back
|
||||
self.assertTrue(list, type(links))
|
||||
next_link = links[0]
|
||||
|
||||
# rel entry must be next
|
||||
self.assertIn('rel', next_link)
|
||||
self.assertIn('next', next_link['rel'])
|
||||
|
||||
# href entry must have the right path
|
||||
self.assertIn('href', next_link)
|
||||
href_parts = urllib.urlparse(next_link['href'])
|
||||
self.assertEqual(expected_path, href_parts.path)
|
||||
|
||||
# And the query from the next link must match what we were
|
||||
# expecting
|
||||
params = urllib.parse_qs(href_parts.query)
|
||||
self.assertDictEqual(expected_query, params)
|
||||
|
||||
# Make sure we don't have links if we were not expecting them
|
||||
else:
|
||||
self.assertNotIn('snapshots_links', res)
|
||||
|
||||
def _create_db_snapshots(self, num_snaps):
|
||||
volume = utils.create_volume(self.ctx)
|
||||
snaps = [utils.create_snapshot(self.ctx,
|
||||
volume.id,
|
||||
display_name='snap' + str(i))
|
||||
for i in range(num_snaps)]
|
||||
|
||||
self.addCleanup(db.volume_destroy, self.ctx, volume.id)
|
||||
for snap in snaps:
|
||||
self.addCleanup(db.snapshot_destroy, self.ctx, snap.id)
|
||||
|
||||
snaps.reverse()
|
||||
return volume, snaps
|
||||
|
||||
def test_list_snapshots_next_link_default_limit(self):
|
||||
"""Test that snapshot list pagination is limited by osapi_max_limit."""
|
||||
volume, snaps = self._create_db_snapshots(3)
|
||||
|
||||
# NOTE(geguileo): Since cinder.api.common.limited has already been
|
||||
# imported his argument max_limit already has a default value of 1000
|
||||
# so it doesn't matter that we change it to 2. That's why we need to
|
||||
# mock it and send it current value. We still need to set the default
|
||||
# value because other sections of the code use it, for example
|
||||
# _get_collection_links
|
||||
CONF.set_default('osapi_max_limit', 2)
|
||||
|
||||
def get_pagination_params(params, max_limit=CONF.osapi_max_limit,
|
||||
original_call=common.get_pagination_params):
|
||||
return original_call(params, max_limit)
|
||||
|
||||
def _get_limit_param(params, max_limit=CONF.osapi_max_limit,
|
||||
original_call=common._get_limit_param):
|
||||
return original_call(params, max_limit)
|
||||
|
||||
with mock.patch.object(common, 'get_pagination_params',
|
||||
get_pagination_params), \
|
||||
mock.patch.object(common, '_get_limit_param',
|
||||
_get_limit_param):
|
||||
# The link from the first page should link to the second
|
||||
self._assert_list_next({'marker': [snaps[1].id]})
|
||||
|
||||
# Second page should have no next link
|
||||
self._assert_list_next(marker=snaps[1].id)
|
||||
|
||||
def test_list_snapshots_next_link_with_limit(self):
|
||||
"""Test snapshot list pagination with specific limit."""
|
||||
volume, snaps = self._create_db_snapshots(2)
|
||||
|
||||
# The link from the first page should link to the second
|
||||
self._assert_list_next({'limit': ['1'], 'marker': [snaps[0].id]},
|
||||
limit=1)
|
||||
|
||||
# Even though there are no more elements, we should get a next element
|
||||
# per specification.
|
||||
expected = {'limit': ['1'], 'marker': [snaps[1].id]}
|
||||
self._assert_list_next(expected, limit=1, marker=snaps[0].id)
|
||||
|
||||
# When we go beyond the number of elements there should be no more
|
||||
# next links
|
||||
self._assert_list_next(limit=1, marker=snaps[1].id)
|
||||
|
||||
@mock.patch.object(db, 'snapshot_get_all_by_project',
|
||||
v2_fakes.fake_snapshot_get_all_by_project)
|
||||
@mock.patch.object(db, 'snapshot_get_all',
|
||||
v2_fakes.fake_snapshot_get_all)
|
||||
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
|
||||
def test_admin_list_snapshots_all_tenants(self, snapshot_metadata_get):
|
||||
req = fakes.HTTPRequest.blank('/v1/%s/snapshots?all_tenants=1' %
|
||||
fake.PROJECT_ID,
|
||||
use_admin_context=True)
|
||||
res = self.controller.index(req)
|
||||
self.assertIn('snapshots', res)
|
||||
self.assertEqual(3, len(res['snapshots']))
|
||||
|
||||
@mock.patch.object(db, 'snapshot_get_all')
|
||||
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
|
||||
def test_admin_list_snapshots_by_tenant_id(self, snapshot_metadata_get,
|
||||
snapshot_get_all):
|
||||
def get_all(context, filters=None, marker=None, limit=None,
|
||||
sort_keys=None, sort_dirs=None, offset=None):
|
||||
if 'project_id' in filters and 'tenant1' in filters['project_id']:
|
||||
return [v2_fakes.fake_snapshot(fake.VOLUME_ID,
|
||||
tenant_id='tenant1')]
|
||||
else:
|
||||
return []
|
||||
|
||||
snapshot_get_all.side_effect = get_all
|
||||
|
||||
req = fakes.HTTPRequest.blank('/v1/%s/snapshots?all_tenants=1'
|
||||
'&project_id=tenant1' % fake.PROJECT_ID,
|
||||
use_admin_context=True)
|
||||
res = self.controller.index(req)
|
||||
self.assertIn('snapshots', res)
|
||||
self.assertEqual(1, len(res['snapshots']))
|
||||
|
||||
@mock.patch.object(db, 'snapshot_get_all_by_project',
|
||||
v2_fakes.fake_snapshot_get_all_by_project)
|
||||
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
|
||||
def test_all_tenants_non_admin_gets_all_tenants(self,
|
||||
snapshot_metadata_get):
|
||||
req = fakes.HTTPRequest.blank('/v1/%s/snapshots?all_tenants=1' %
|
||||
fake.PROJECT_ID)
|
||||
res = self.controller.index(req)
|
||||
self.assertIn('snapshots', res)
|
||||
self.assertEqual(1, len(res['snapshots']))
|
||||
|
||||
@mock.patch.object(db, 'snapshot_get_all_by_project',
|
||||
v2_fakes.fake_snapshot_get_all_by_project)
|
||||
@mock.patch.object(db, 'snapshot_get_all',
|
||||
v2_fakes.fake_snapshot_get_all)
|
||||
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
|
||||
def test_non_admin_get_by_project(self, snapshot_metadata_get):
|
||||
req = fakes.HTTPRequest.blank('/v1/%s/snapshots' % fake.PROJECT_ID)
|
||||
res = self.controller.index(req)
|
||||
self.assertIn('snapshots', res)
|
||||
self.assertEqual(1, len(res['snapshots']))
|
||||
|
||||
def _create_snapshot_bad_body(self, body):
|
||||
req = fakes.HTTPRequest.blank('/v1/%s/snapshots' % fake.PROJECT_ID)
|
||||
req.method = 'POST'
|
||||
|
||||
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
|
||||
self.controller.create, req, body)
|
||||
|
||||
def test_create_no_body(self):
|
||||
self._create_snapshot_bad_body(body=None)
|
||||
|
||||
def test_create_missing_snapshot(self):
|
||||
body = {'foo': {'a': 'b'}}
|
||||
self._create_snapshot_bad_body(body=body)
|
||||
|
||||
def test_create_malformed_entity(self):
|
||||
body = {'snapshot': 'string'}
|
||||
self._create_snapshot_bad_body(body=body)
|
@ -1,805 +0,0 @@
|
||||
# Copyright 2013 Josh Durgin
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import datetime
|
||||
import iso8601
|
||||
|
||||
import ddt
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
from six.moves import http_client
|
||||
from six.moves import range
|
||||
import webob
|
||||
|
||||
from cinder.api import extensions
|
||||
from cinder.api.v1 import volumes
|
||||
from cinder import context
|
||||
from cinder import db
|
||||
from cinder import exception as exc
|
||||
from cinder.objects import fields
|
||||
from cinder import test
|
||||
from cinder.tests.unit.api import fakes
|
||||
from cinder.tests.unit.api.v2 import fakes as v2_fakes
|
||||
from cinder.tests.unit import fake_constants as fake
|
||||
from cinder.tests.unit import fake_volume
|
||||
from cinder.tests.unit.image import fake as fake_image
|
||||
from cinder.tests.unit import utils
|
||||
from cinder.volume import api as volume_api
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class VolumeApiTest(test.TestCase):
|
||||
def setUp(self):
|
||||
super(VolumeApiTest, self).setUp()
|
||||
self.ext_mgr = extensions.ExtensionManager()
|
||||
self.ext_mgr.extensions = {}
|
||||
fake_image.mock_image_service(self)
|
||||
self.controller = volumes.VolumeController(self.ext_mgr)
|
||||
self.maxDiff = None
|
||||
self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
|
||||
|
||||
def test_volume_create(self):
|
||||
self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_get)
|
||||
self.mock_object(volume_api.API, "create",
|
||||
v2_fakes.fake_volume_api_create)
|
||||
self.mock_object(db.sqlalchemy.api, '_volume_type_get_full',
|
||||
v2_fakes.fake_volume_type_get)
|
||||
|
||||
vol = {"size": 100,
|
||||
"display_name": "Volume Test Name",
|
||||
"display_description": "Volume Test Desc",
|
||||
"availability_zone": "zone1:host1"}
|
||||
body = {"volume": vol}
|
||||
req = fakes.HTTPRequest.blank('/v1/volumes')
|
||||
res_dict = self.controller.create(req, body)
|
||||
expected = {'volume': {'status': 'fakestatus',
|
||||
'display_description': 'Volume Test Desc',
|
||||
'availability_zone': 'zone1:host1',
|
||||
'display_name': 'Volume Test Name',
|
||||
'attachments': [],
|
||||
'multiattach': 'false',
|
||||
'bootable': 'false',
|
||||
'volume_type': 'vol_type_name',
|
||||
'snapshot_id': None,
|
||||
'source_volid': None,
|
||||
'metadata': {},
|
||||
'id': fake.VOLUME_ID,
|
||||
'created_at': datetime.datetime(
|
||||
1900, 1, 1, 1, 1, 1,
|
||||
tzinfo=iso8601.iso8601.Utc()),
|
||||
'size': 100,
|
||||
'encrypted': False}}
|
||||
self.assertEqual(expected, res_dict)
|
||||
|
||||
@mock.patch.object(db, 'service_get_all',
|
||||
return_value=v2_fakes.fake_service_get_all_by_topic(
|
||||
None, None),
|
||||
autospec=True)
|
||||
def test_volume_create_with_type(self, mock_service_get):
|
||||
vol_type = db.volume_type_create(
|
||||
context.get_admin_context(),
|
||||
dict(name=CONF.default_volume_type, extra_specs={})
|
||||
)
|
||||
db_vol_type = db.volume_type_get(context.get_admin_context(),
|
||||
vol_type.id)
|
||||
|
||||
vol = {"size": 100,
|
||||
"display_name": "Volume Test Name",
|
||||
"display_description": "Volume Test Desc",
|
||||
"availability_zone": "zone1:host1",
|
||||
"volume_type": "FakeTypeName"}
|
||||
body = {"volume": vol}
|
||||
req = fakes.HTTPRequest.blank('/v1/volumes')
|
||||
# Raise 404 when type name isn't valid
|
||||
self.assertRaises(exc.VolumeTypeNotFoundByName,
|
||||
self.controller.create, req, body)
|
||||
|
||||
# Use correct volume type name
|
||||
vol.update(dict(volume_type=CONF.default_volume_type))
|
||||
body.update(dict(volume=vol))
|
||||
res_dict = self.controller.create(req, body)
|
||||
self.assertIn('id', res_dict['volume'])
|
||||
self.assertEqual(1, len(res_dict))
|
||||
self.assertEqual(db_vol_type['name'],
|
||||
res_dict['volume']['volume_type'])
|
||||
|
||||
# Use correct volume type id
|
||||
vol.update(dict(volume_type=db_vol_type['id']))
|
||||
body.update(dict(volume=vol))
|
||||
res_dict = self.controller.create(req, body)
|
||||
self.assertIn('id', res_dict['volume'])
|
||||
self.assertEqual(1, len(res_dict))
|
||||
self.assertEqual(db_vol_type['name'],
|
||||
res_dict['volume']['volume_type'])
|
||||
|
||||
def test_volume_creation_fails_with_bad_size(self):
|
||||
vol = {"size": '',
|
||||
"display_name": "Volume Test Name",
|
||||
"display_description": "Volume Test Desc",
|
||||
"availability_zone": "zone1:host1"}
|
||||
body = {"volume": vol}
|
||||
req = fakes.HTTPRequest.blank('/v1/volumes')
|
||||
self.assertRaises(exc.InvalidInput,
|
||||
self.controller.create,
|
||||
req,
|
||||
body)
|
||||
|
||||
def test_volume_creation_fails_with_bad_availability_zone(self):
|
||||
vol = {"size": '1',
|
||||
"name": "Volume Test Name",
|
||||
"description": "Volume Test Desc",
|
||||
"availability_zone": "zonen:hostn"}
|
||||
body = {"volume": vol}
|
||||
req = fakes.HTTPRequest.blank('/v1/volumes')
|
||||
self.assertRaises(exc.InvalidAvailabilityZone,
|
||||
self.controller.create,
|
||||
req, body)
|
||||
|
||||
def test_volume_create_with_image_id(self):
|
||||
self.mock_object(volume_api.API, "create",
|
||||
v2_fakes.fake_volume_api_create)
|
||||
self.mock_object(db.sqlalchemy.api, '_volume_type_get_full',
|
||||
v2_fakes.fake_volume_type_get)
|
||||
|
||||
self.ext_mgr.extensions = {'os-image-create': 'fake'}
|
||||
test_id = "c905cedb-7281-47e4-8a62-f26bc5fc4c77"
|
||||
vol = {"size": '1',
|
||||
"display_name": "Volume Test Name",
|
||||
"display_description": "Volume Test Desc",
|
||||
"availability_zone": "nova",
|
||||
"imageRef": test_id}
|
||||
expected = {'volume': {'status': 'fakestatus',
|
||||
'display_description': 'Volume Test Desc',
|
||||
'availability_zone': 'nova',
|
||||
'display_name': 'Volume Test Name',
|
||||
'encrypted': False,
|
||||
'attachments': [],
|
||||
'multiattach': 'false',
|
||||
'bootable': 'false',
|
||||
'volume_type': 'vol_type_name',
|
||||
'image_id': test_id,
|
||||
'snapshot_id': None,
|
||||
'source_volid': None,
|
||||
'metadata': {},
|
||||
'id': fake.VOLUME_ID,
|
||||
'created_at': datetime.datetime(
|
||||
1900, 1, 1, 1, 1, 1,
|
||||
tzinfo=iso8601.iso8601.Utc()),
|
||||
'size': 1}}
|
||||
body = {"volume": vol}
|
||||
req = fakes.HTTPRequest.blank('/v1/volumes')
|
||||
res_dict = self.controller.create(req, body)
|
||||
self.assertEqual(expected, res_dict)
|
||||
|
||||
def test_volume_create_with_image_id_is_integer(self):
|
||||
self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create)
|
||||
self.ext_mgr.extensions = {'os-image-create': 'fake'}
|
||||
|
||||
vol = {"size": '1',
|
||||
"display_name": "Volume Test Name",
|
||||
"display_description": "Volume Test Desc",
|
||||
"availability_zone": "cinder",
|
||||
"imageRef": 1234}
|
||||
body = {"volume": vol}
|
||||
req = fakes.HTTPRequest.blank('/v1/volumes')
|
||||
self.assertRaises(webob.exc.HTTPBadRequest,
|
||||
self.controller.create,
|
||||
req,
|
||||
body)
|
||||
|
||||
def test_volume_create_with_image_id_not_uuid_format(self):
|
||||
self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create)
|
||||
self.mock_object(fake_image._FakeImageService,
|
||||
"detail",
|
||||
v2_fakes.fake_image_service_detail)
|
||||
self.ext_mgr.extensions = {'os-image-create': 'fake'}
|
||||
vol = {"size": '1',
|
||||
"display_name": "Volume Test Name",
|
||||
"display_description": "Volume Test Desc",
|
||||
"availability_zone": "cinder",
|
||||
"imageRef": '12345'}
|
||||
body = {"volume": vol}
|
||||
req = fakes.HTTPRequest.blank('/v1/volumes')
|
||||
self.assertRaises(webob.exc.HTTPBadRequest,
|
||||
self.controller.create,
|
||||
req,
|
||||
body)
|
||||
|
||||
def test_volume_create_with_image_id_with_empty_string(self):
|
||||
self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create)
|
||||
self.mock_object(fake_image._FakeImageService,
|
||||
"detail",
|
||||
v2_fakes.fake_image_service_detail)
|
||||
self.ext_mgr.extensions = {'os-image-create': 'fake'}
|
||||
vol = {"size": 1,
|
||||
"display_name": "Volume Test Name",
|
||||
"display_description": "Volume Test Desc",
|
||||
"availability_zone": "cinder",
|
||||
"imageRef": ''}
|
||||
body = {"volume": vol}
|
||||
req = fakes.HTTPRequest.blank('/v1/volumes')
|
||||
self.assertRaises(webob.exc.HTTPBadRequest,
|
||||
self.controller.create,
|
||||
req,
|
||||
body)
|
||||
|
||||
def test_volume_update(self):
|
||||
self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_api_get)
|
||||
self.mock_object(volume_api.API, "update", v2_fakes.fake_volume_update)
|
||||
self.mock_object(db.sqlalchemy.api, '_volume_type_get_full',
|
||||
v2_fakes.fake_volume_type_get)
|
||||
self.mock_object(db, 'volume_admin_metadata_get',
|
||||
return_value={'attached_mode': 'rw',
|
||||
'readonly': 'False'})
|
||||
|
||||
updates = {
|
||||
"display_name": "Updated Test Name",
|
||||
}
|
||||
body = {"volume": updates}
|
||||
req = fakes.HTTPRequest.blank('/v1/volumes/%s' % fake.VOLUME_ID)
|
||||
self.assertEqual(0, len(self.notifier.notifications))
|
||||
res_dict = self.controller.update(req, fake.VOLUME_ID, body)
|
||||
expected = {'volume': {
|
||||
'status': 'fakestatus',
|
||||
'display_description': 'displaydesc',
|
||||
'availability_zone': 'fakeaz',
|
||||
'display_name': 'Updated Test Name',
|
||||
'encrypted': False,
|
||||
'attachments': [],
|
||||
'multiattach': 'false',
|
||||
'bootable': 'false',
|
||||
'volume_type': 'vol_type_name',
|
||||
'snapshot_id': None,
|
||||
'source_volid': None,
|
||||
'metadata': {'attached_mode': 'rw',
|
||||
'readonly': 'False'},
|
||||
'id': fake.VOLUME_ID,
|
||||
'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1,
|
||||
tzinfo=iso8601.iso8601.Utc()),
|
||||
'size': 1}}
|
||||
self.assertEqual(expected, res_dict)
|
||||
self.assertEqual(2, len(self.notifier.notifications))
|
||||
|
||||
def test_volume_update_metadata(self):
|
||||
self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_api_get)
|
||||
self.mock_object(volume_api.API, "update", v2_fakes.fake_volume_update)
|
||||
self.mock_object(db.sqlalchemy.api, '_volume_type_get_full',
|
||||
v2_fakes.fake_volume_type_get)
|
||||
|
||||
updates = {
|
||||
"metadata": {"qos_max_iops": '2000'}
|
||||
}
|
||||
body = {"volume": updates}
|
||||
req = fakes.HTTPRequest.blank('/v1/volumes/%s' % fake.VOLUME_ID)
|
||||
self.assertEqual(0, len(self.notifier.notifications))
|
||||
res_dict = self.controller.update(req, fake.VOLUME_ID, body)
|
||||
expected = {'volume': {
|
||||
'status': 'fakestatus',
|
||||
'display_description': 'displaydesc',
|
||||
'availability_zone': 'fakeaz',
|
||||
'display_name': 'displayname',
|
||||
'encrypted': False,
|
||||
'attachments': [],
|
||||
'multiattach': 'false',
|
||||
'bootable': 'false',
|
||||
'volume_type': 'vol_type_name',
|
||||
'snapshot_id': None,
|
||||
'source_volid': None,
|
||||
'metadata': {"qos_max_iops": '2000',
|
||||
"readonly": "False",
|
||||
"attached_mode": "rw"},
|
||||
'id': fake.VOLUME_ID,
|
||||
'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1,
|
||||
tzinfo=iso8601.iso8601.Utc()),
|
||||
'size': 1
|
||||
}}
|
||||
self.assertEqual(expected, res_dict)
|
||||
self.assertEqual(2, len(self.notifier.notifications))
|
||||
|
||||
def test_volume_update_with_admin_metadata(self):
|
||||
self.mock_object(volume_api.API, "update", v2_fakes.fake_volume_update)
|
||||
|
||||
volume = v2_fakes.create_fake_volume(fake.VOLUME_ID)
|
||||
del volume['name']
|
||||
del volume['volume_type']
|
||||
del volume['volume_type_id']
|
||||
volume['metadata'] = {'key': 'value'}
|
||||
db.volume_create(context.get_admin_context(), volume)
|
||||
db.volume_admin_metadata_update(context.get_admin_context(),
|
||||
fake.VOLUME_ID,
|
||||
{"readonly": "True",
|
||||
"invisible_key": "invisible_value"},
|
||||
False)
|
||||
values = {'volume_id': fake.VOLUME_ID, }
|
||||
attachment = db.volume_attach(context.get_admin_context(), values)
|
||||
db.volume_attached(context.get_admin_context(),
|
||||
attachment['id'], fake.INSTANCE_ID,
|
||||
None, '/')
|
||||
|
||||
updates = {
|
||||
"display_name": "Updated Test Name",
|
||||
}
|
||||
body = {"volume": updates}
|
||||
req = fakes.HTTPRequest.blank('/v1/volumes/%s' % fake.VOLUME_ID)
|
||||
self.assertEqual(0, len(self.notifier.notifications))
|
||||
admin_ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
|
||||
req.environ['cinder.context'] = admin_ctx
|
||||
res_dict = self.controller.update(req, fake.VOLUME_ID, body)
|
||||
expected = {'volume': {
|
||||
'status': 'in-use',
|
||||
'display_description': 'displaydesc',
|
||||
'availability_zone': 'fakeaz',
|
||||
'display_name': 'Updated Test Name',
|
||||
'encrypted': False,
|
||||
'attachments': [{
|
||||
'attachment_id': attachment['id'],
|
||||
'id': fake.VOLUME_ID,
|
||||
'volume_id': fake.VOLUME_ID,
|
||||
'server_id': fake.INSTANCE_ID,
|
||||
'host_name': None,
|
||||
'device': '/'
|
||||
}],
|
||||
'multiattach': 'false',
|
||||
'bootable': 'false',
|
||||
'volume_type': None,
|
||||
'snapshot_id': None,
|
||||
'source_volid': None,
|
||||
'metadata': {'key': 'value',
|
||||
'readonly': 'True'},
|
||||
'id': fake.VOLUME_ID,
|
||||
'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1,
|
||||
tzinfo=iso8601.iso8601.Utc()),
|
||||
'size': 1}}
|
||||
self.assertEqual(expected, res_dict)
|
||||
self.assertEqual(2, len(self.notifier.notifications))
|
||||
|
||||
def test_update_empty_body(self):
|
||||
body = {}
|
||||
req = fakes.HTTPRequest.blank('/v1/volumes/1')
|
||||
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
|
||||
self.controller.update,
|
||||
req, fake.VOLUME_ID, body)
|
||||
|
||||
def test_update_invalid_body(self):
|
||||
body = {'display_name': 'missing top level volume key'}
|
||||
req = fakes.HTTPRequest.blank('/v1/volumes/1')
|
||||
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
|
||||
self.controller.update,
|
||||
req, fake.VOLUME_ID, body)
|
||||
|
||||
def test_update_not_found(self):
|
||||
self.mock_object(volume_api.API, "get",
|
||||
v2_fakes.fake_volume_get_notfound)
|
||||
updates = {
|
||||
"name": "Updated Test Name",
|
||||
}
|
||||
|
||||
body = {"volume": updates}
|
||||
req = fakes.HTTPRequest.blank(
|
||||
'/v1/volumes/%s' % fake.WILL_NOT_BE_FOUND_ID)
|
||||
self.assertRaises(exc.VolumeNotFound,
|
||||
self.controller.update,
|
||||
req, fake.WILL_NOT_BE_FOUND_ID, body)
|
||||
|
||||
def test_volume_list(self):
|
||||
self.mock_object(volume_api.API, 'get_all',
|
||||
v2_fakes.fake_volume_api_get_all_by_project)
|
||||
self.mock_object(db.sqlalchemy.api, '_volume_type_get_full',
|
||||
v2_fakes.fake_volume_type_get)
|
||||
|
||||
req = fakes.HTTPRequest.blank('/v1/volumes')
|
||||
res_dict = self.controller.index(req)
|
||||
expected = {'volumes': [{'status': 'fakestatus',
|
||||
'display_description': 'displaydesc',
|
||||
'availability_zone': 'fakeaz',
|
||||
'display_name': 'displayname',
|
||||
'encrypted': False,
|
||||
'attachments': [],
|
||||
'multiattach': 'false',
|
||||
'bootable': 'false',
|
||||
'volume_type': 'vol_type_name',
|
||||
'snapshot_id': None,
|
||||
'source_volid': None,
|
||||
'metadata': {'attached_mode': 'rw',
|
||||
'readonly': 'False'},
|
||||
'id': fake.VOLUME_ID,
|
||||
'created_at': datetime.datetime(
|
||||
1900, 1, 1, 1, 1, 1,
|
||||
tzinfo=iso8601.iso8601.Utc()),
|
||||
'size': 1}]}
|
||||
self.assertEqual(expected, res_dict)
|
||||
# Finally test that we cached the returned volumes
|
||||
self.assertEqual(1, len(req.cached_resource()))
|
||||
|
||||
def test_volume_list_with_admin_metadata(self):
|
||||
volume = v2_fakes.create_fake_volume(fake.VOLUME_ID)
|
||||
del volume['name']
|
||||
del volume['volume_type']
|
||||
del volume['volume_type_id']
|
||||
volume['metadata'] = {'key': 'value'}
|
||||
db.volume_create(context.get_admin_context(), volume)
|
||||
db.volume_admin_metadata_update(context.get_admin_context(),
|
||||
fake.VOLUME_ID,
|
||||
{"readonly": "True",
|
||||
"invisible_key": "invisible_value"},
|
||||
False)
|
||||
values = {'volume_id': fake.VOLUME_ID, }
|
||||
attachment = db.volume_attach(context.get_admin_context(), values)
|
||||
db.volume_attached(context.get_admin_context(),
|
||||
attachment['id'], fake.INSTANCE_ID, None, '/')
|
||||
|
||||
req = fakes.HTTPRequest.blank('/v1/volumes')
|
||||
admin_ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
|
||||
req.environ['cinder.context'] = admin_ctx
|
||||
res_dict = self.controller.index(req)
|
||||
expected = {'volumes': [{'status': 'in-use',
|
||||
'display_description': 'displaydesc',
|
||||
'availability_zone': 'fakeaz',
|
||||
'display_name': 'displayname',
|
||||
'encrypted': False,
|
||||
'attachments': [
|
||||
{'attachment_id': attachment['id'],
|
||||
'device': '/',
|
||||
'server_id': fake.INSTANCE_ID,
|
||||
'host_name': None,
|
||||
'id': fake.VOLUME_ID,
|
||||
'volume_id': fake.VOLUME_ID}],
|
||||
'multiattach': 'false',
|
||||
'bootable': 'false',
|
||||
'volume_type': None,
|
||||
'snapshot_id': None,
|
||||
'source_volid': None,
|
||||
'metadata': {'key': 'value',
|
||||
'readonly': 'True'},
|
||||
'id': fake.VOLUME_ID,
|
||||
'created_at': datetime.datetime(
|
||||
1900, 1, 1, 1, 1, 1,
|
||||
tzinfo=iso8601.iso8601.Utc()),
|
||||
'size': 1}]}
|
||||
self.assertEqual(expected, res_dict)
|
||||
|
||||
def test_volume_list_detail(self):
|
||||
self.mock_object(volume_api.API, 'get_all',
|
||||
v2_fakes.fake_volume_api_get_all_by_project)
|
||||
self.mock_object(db.sqlalchemy.api, '_volume_type_get_full',
|
||||
v2_fakes.fake_volume_type_get)
|
||||
|
||||
req = fakes.HTTPRequest.blank('/v1/volumes/detail')
|
||||
res_dict = self.controller.detail(req)
|
||||
expected = {'volumes': [{'status': 'fakestatus',
|
||||
'display_description': 'displaydesc',
|
||||
'availability_zone': 'fakeaz',
|
||||
'display_name': 'displayname',
|
||||
'encrypted': False,
|
||||
'attachments': [],
|
||||
'multiattach': 'false',
|
||||
'bootable': 'false',
|
||||
'volume_type': 'vol_type_name',
|
||||
'snapshot_id': None,
|
||||
'source_volid': None,
|
||||
'metadata': {'attached_mode': 'rw',
|
||||
'readonly': 'False'},
|
||||
'id': fake.VOLUME_ID,
|
||||
'created_at': datetime.datetime(
|
||||
1900, 1, 1, 1, 1, 1,
|
||||
tzinfo=iso8601.iso8601.Utc()),
|
||||
'size': 1}]}
|
||||
self.assertEqual(expected, res_dict)
|
||||
# Finally test that we cached the returned volumes
|
||||
self.assertEqual(1, len(req.cached_resource()))
|
||||
|
||||
def test_volume_list_detail_with_admin_metadata(self):
|
||||
volume = v2_fakes.create_fake_volume(fake.VOLUME_ID)
|
||||
del volume['name']
|
||||
del volume['volume_type']
|
||||
del volume['volume_type_id']
|
||||
volume['metadata'] = {'key': 'value'}
|
||||
db.volume_create(context.get_admin_context(), volume)
|
||||
db.volume_admin_metadata_update(context.get_admin_context(),
|
||||
fake.VOLUME_ID,
|
||||
{"readonly": "True",
|
||||
"invisible_key": "invisible_value"},
|
||||
False)
|
||||
values = {'volume_id': fake.VOLUME_ID, }
|
||||
attachment = db.volume_attach(context.get_admin_context(), values)
|
||||
db.volume_attached(context.get_admin_context(),
|
||||
attachment['id'], fake.INSTANCE_ID, None, '/')
|
||||
|
||||
req = fakes.HTTPRequest.blank('/v1/volumes/detail')
|
||||
admin_ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
|
||||
req.environ['cinder.context'] = admin_ctx
|
||||
res_dict = self.controller.index(req)
|
||||
expected = {'volumes': [{'status': 'in-use',
|
||||
'display_description': 'displaydesc',
|
||||
'availability_zone': 'fakeaz',
|
||||
'display_name': 'displayname',
|
||||
'encrypted': False,
|
||||
'attachments': [
|
||||
{'attachment_id': attachment['id'],
|
||||
'device': '/',
|
||||
'server_id': fake.INSTANCE_ID,
|
||||
'host_name': None,
|
||||
'id': fake.VOLUME_ID,
|
||||
'volume_id': fake.VOLUME_ID}],
|
||||
'multiattach': 'false',
|
||||
'bootable': 'false',
|
||||
'volume_type': None,
|
||||
'snapshot_id': None,
|
||||
'source_volid': None,
|
||||
'metadata': {'key': 'value',
|
||||
'readonly': 'True'},
|
||||
'id': fake.VOLUME_ID,
|
||||
'created_at': datetime.datetime(
|
||||
1900, 1, 1, 1, 1, 1,
|
||||
tzinfo=iso8601.iso8601.Utc()),
|
||||
'size': 1}]}
|
||||
self.assertEqual(expected, res_dict)
|
||||
|
||||
def test_volume_show(self):
|
||||
self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_api_get)
|
||||
self.mock_object(db.sqlalchemy.api, '_volume_type_get_full',
|
||||
v2_fakes.fake_volume_type_get)
|
||||
|
||||
req = fakes.HTTPRequest.blank('/v1/volumes/%s' % fake.VOLUME_ID)
|
||||
res_dict = self.controller.show(req, fake.VOLUME_ID)
|
||||
expected = {'volume': {'status': 'fakestatus',
|
||||
'display_description': 'displaydesc',
|
||||
'availability_zone': 'fakeaz',
|
||||
'display_name': 'displayname',
|
||||
'encrypted': False,
|
||||
'attachments': [],
|
||||
'multiattach': 'false',
|
||||
'bootable': 'false',
|
||||
'volume_type': 'vol_type_name',
|
||||
'snapshot_id': None,
|
||||
'source_volid': None,
|
||||
'metadata': {'attached_mode': 'rw',
|
||||
'readonly': 'False'},
|
||||
'id': fake.VOLUME_ID,
|
||||
'created_at': datetime.datetime(
|
||||
1900, 1, 1, 1, 1, 1,
|
||||
tzinfo=iso8601.iso8601.Utc()),
|
||||
'size': 1}}
|
||||
self.assertEqual(expected, res_dict)
|
||||
# Finally test that we cached the returned volume
|
||||
self.assertIsNotNone(req.cached_resource_by_id(fake.VOLUME_ID))
|
||||
|
||||
def test_volume_show_no_attachments(self):
|
||||
def fake_volume_get(self, context, volume_id, **kwargs):
|
||||
vol = v2_fakes.create_fake_volume(
|
||||
volume_id,
|
||||
attach_status=fields.VolumeAttachStatus.DETACHED)
|
||||
return fake_volume.fake_volume_obj(context, **vol)
|
||||
|
||||
def fake_volume_admin_metadata_get(context, volume_id, **kwargs):
|
||||
return v2_fakes.fake_volume_admin_metadata_get(
|
||||
context, volume_id,
|
||||
attach_status=fields.VolumeAttachStatus.DETACHED)
|
||||
|
||||
self.mock_object(volume_api.API, 'get', fake_volume_get)
|
||||
self.mock_object(db, 'volume_admin_metadata_get',
|
||||
fake_volume_admin_metadata_get)
|
||||
self.mock_object(db.sqlalchemy.api, '_volume_type_get_full',
|
||||
v2_fakes.fake_volume_type_get)
|
||||
|
||||
req = fakes.HTTPRequest.blank('/v1/volumes/%s' % fake.VOLUME_ID)
|
||||
res_dict = self.controller.show(req, fake.VOLUME_ID)
|
||||
expected = {'volume': {'status': 'fakestatus',
|
||||
'display_description': 'displaydesc',
|
||||
'availability_zone': 'fakeaz',
|
||||
'display_name': 'displayname',
|
||||
'encrypted': False,
|
||||
'attachments': [],
|
||||
'multiattach': 'false',
|
||||
'bootable': 'false',
|
||||
'volume_type': 'vol_type_name',
|
||||
'snapshot_id': None,
|
||||
'source_volid': None,
|
||||
'metadata': {'readonly': 'False'},
|
||||
'id': fake.VOLUME_ID,
|
||||
'created_at': datetime.datetime(
|
||||
1900, 1, 1, 1, 1, 1,
|
||||
tzinfo=iso8601.iso8601.Utc()),
|
||||
'size': 1}}
|
||||
|
||||
self.assertEqual(expected, res_dict)
|
||||
|
||||
def test_volume_show_no_volume(self):
|
||||
self.mock_object(volume_api.API, "get",
|
||||
v2_fakes.fake_volume_get_notfound)
|
||||
|
||||
req = fakes.HTTPRequest.blank(
|
||||
'/v1/volumes/%s' % fake.WILL_NOT_BE_FOUND_ID)
|
||||
self.assertRaises(exc.VolumeNotFound,
|
||||
self.controller.show,
|
||||
req,
|
||||
fake.WILL_NOT_BE_FOUND_ID)
|
||||
# Finally test that nothing was cached
|
||||
self.assertIsNone(req.cached_resource_by_id(fake.WILL_NOT_BE_FOUND_ID))
|
||||
|
||||
def _create_db_volumes(self, num_volumes):
|
||||
volumes = [utils.create_volume(self.ctxt, display_name='vol%s' % i)
|
||||
for i in range(num_volumes)]
|
||||
for vol in volumes:
|
||||
self.addCleanup(db.volume_destroy, self.ctxt, vol.id)
|
||||
volumes.reverse()
|
||||
return volumes
|
||||
|
||||
def test_volume_detail_limit_offset(self):
|
||||
created_volumes = self._create_db_volumes(2)
|
||||
|
||||
def volume_detail_limit_offset(is_admin):
|
||||
req = fakes.HTTPRequest.blank('/v1/volumes/detail?limit=2'
|
||||
'&offset=1',
|
||||
use_admin_context=is_admin)
|
||||
res_dict = self.controller.index(req)
|
||||
volumes = res_dict['volumes']
|
||||
self.assertEqual(1, len(volumes))
|
||||
self.assertEqual(created_volumes[1].id, volumes[0]['id'])
|
||||
|
||||
# admin case
|
||||
volume_detail_limit_offset(is_admin=True)
|
||||
# non_admin case
|
||||
volume_detail_limit_offset(is_admin=False)
|
||||
|
||||
def test_volume_show_with_admin_metadata(self):
|
||||
volume = v2_fakes.create_fake_volume(fake.VOLUME_ID)
|
||||
del volume['name']
|
||||
del volume['volume_type']
|
||||
del volume['volume_type_id']
|
||||
volume['metadata'] = {'key': 'value'}
|
||||
db.volume_create(context.get_admin_context(), volume)
|
||||
db.volume_admin_metadata_update(context.get_admin_context(),
|
||||
fake.VOLUME_ID,
|
||||
{"readonly": "True",
|
||||
"invisible_key": "invisible_value"},
|
||||
False)
|
||||
values = {'volume_id': fake.VOLUME_ID, }
|
||||
attachment = db.volume_attach(context.get_admin_context(), values)
|
||||
db.volume_attached(context.get_admin_context(),
|
||||
attachment['id'], fake.INSTANCE_ID, None, '/')
|
||||
|
||||
req = fakes.HTTPRequest.blank('/v1/volumes/%s' % fake.VOLUME_ID)
|
||||
admin_ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
|
||||
req.environ['cinder.context'] = admin_ctx
|
||||
res_dict = self.controller.show(req, fake.VOLUME_ID)
|
||||
expected = {'volume': {'status': 'in-use',
|
||||
'display_description': 'displaydesc',
|
||||
'availability_zone': 'fakeaz',
|
||||
'display_name': 'displayname',
|
||||
'encrypted': False,
|
||||
'attachments': [
|
||||
{'attachment_id': attachment['id'],
|
||||
'device': '/',
|
||||
'server_id': fake.INSTANCE_ID,
|
||||
'host_name': None,
|
||||
'id': fake.VOLUME_ID,
|
||||
'volume_id': fake.VOLUME_ID}],
|
||||
'multiattach': 'false',
|
||||
'bootable': 'false',
|
||||
'volume_type': None,
|
||||
'snapshot_id': None,
|
||||
'source_volid': None,
|
||||
'metadata': {'key': 'value',
|
||||
'readonly': 'True'},
|
||||
'id': fake.VOLUME_ID,
|
||||
'created_at': datetime.datetime(
|
||||
1900, 1, 1, 1, 1, 1,
|
||||
tzinfo=iso8601.iso8601.Utc()),
|
||||
'size': 1}}
|
||||
self.assertEqual(expected, res_dict)
|
||||
|
||||
def test_volume_show_with_encrypted_volume(self):
|
||||
def fake_volume_get(self, context, volume_id, **kwargs):
|
||||
vol = v2_fakes.create_fake_volume(volume_id,
|
||||
encryption_key_id=fake.KEY_ID)
|
||||
return fake_volume.fake_volume_obj(context, **vol)
|
||||
|
||||
self.mock_object(volume_api.API, 'get', fake_volume_get)
|
||||
self.mock_object(db.sqlalchemy.api, '_volume_type_get_full',
|
||||
v2_fakes.fake_volume_type_get)
|
||||
|
||||
req = fakes.HTTPRequest.blank('/v1/volumes/%s' % fake.VOLUME_ID)
|
||||
res_dict = self.controller.show(req, fake.VOLUME_ID)
|
||||
self.assertTrue(res_dict['volume']['encrypted'])
|
||||
|
||||
def test_volume_show_with_unencrypted_volume(self):
|
||||
self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_api_get)
|
||||
self.mock_object(db.sqlalchemy.api, '_volume_type_get_full',
|
||||
v2_fakes.fake_volume_type_get)
|
||||
|
||||
req = fakes.HTTPRequest.blank('/v1/volumes/%s' % fake.VOLUME_ID)
|
||||
res_dict = self.controller.show(req, fake.VOLUME_ID)
|
||||
self.assertEqual(False, res_dict['volume']['encrypted'])
|
||||
|
||||
@mock.patch.object(volume_api.API, 'delete', v2_fakes.fake_volume_delete)
|
||||
@mock.patch.object(volume_api.API, 'get', v2_fakes.fake_volume_get)
|
||||
def test_volume_delete(self):
|
||||
req = fakes.HTTPRequest.blank('/v1/volumes/%s' % fake.VOLUME_ID)
|
||||
resp = self.controller.delete(req, fake.VOLUME_ID)
|
||||
self.assertEqual(http_client.ACCEPTED, resp.status_int)
|
||||
|
||||
def test_volume_delete_no_volume(self):
|
||||
self.mock_object(volume_api.API, "get",
|
||||
v2_fakes.fake_volume_get_notfound)
|
||||
|
||||
req = fakes.HTTPRequest.blank(
|
||||
'/v1/volumes/%s' % fake.WILL_NOT_BE_FOUND_ID)
|
||||
self.assertRaises(exc.VolumeNotFound,
|
||||
self.controller.delete,
|
||||
req, fake.WILL_NOT_BE_FOUND_ID)
|
||||
|
||||
def test_admin_list_volumes_limited_to_project(self):
|
||||
self.mock_object(db, 'volume_get_all_by_project',
|
||||
v2_fakes.fake_volume_get_all_by_project)
|
||||
|
||||
req = fakes.HTTPRequest.blank('/v1/%s/volumes' % fake.PROJECT_ID,
|
||||
use_admin_context=True)
|
||||
res = self.controller.index(req)
|
||||
|
||||
self.assertIn('volumes', res)
|
||||
self.assertEqual(1, len(res['volumes']))
|
||||
|
||||
@mock.patch.object(db, 'volume_get_all', v2_fakes.fake_volume_get_all)
|
||||
@mock.patch.object(db, 'volume_get_all_by_project',
|
||||
v2_fakes.fake_volume_get_all_by_project)
|
||||
def test_admin_list_volumes_all_tenants(self):
|
||||
req = fakes.HTTPRequest.blank(
|
||||
'/v1/%s/volumes?all_tenants=1' % fake.PROJECT_ID,
|
||||
use_admin_context=True)
|
||||
res = self.controller.index(req)
|
||||
self.assertIn('volumes', res)
|
||||
self.assertEqual(3, len(res['volumes']))
|
||||
|
||||
@mock.patch.object(db, 'volume_get_all', v2_fakes.fake_volume_get_all)
|
||||
@mock.patch.object(db, 'volume_get_all_by_project',
|
||||
v2_fakes.fake_volume_get_all_by_project)
|
||||
@mock.patch.object(volume_api.API, 'get', v2_fakes.fake_volume_get)
|
||||
def test_all_tenants_non_admin_gets_all_tenants(self):
|
||||
req = fakes.HTTPRequest.blank(
|
||||
'/v1/%s/volumes?all_tenants=1' % fake.PROJECT_ID)
|
||||
res = self.controller.index(req)
|
||||
self.assertIn('volumes', res)
|
||||
self.assertEqual(1, len(res['volumes']))
|
||||
|
||||
@mock.patch.object(db, 'volume_get_all_by_project',
|
||||
v2_fakes.fake_volume_get_all_by_project)
|
||||
@mock.patch.object(volume_api.API, 'get', v2_fakes.fake_volume_get)
|
||||
def test_non_admin_get_by_project(self):
|
||||
req = fakes.HTTPRequest.blank('/v1/%s/volumes' % fake.PROJECT_ID)
|
||||
res = self.controller.index(req)
|
||||
self.assertIn('volumes', res)
|
||||
self.assertEqual(1, len(res['volumes']))
|
||||
|
||||
def _unprocessable_volume_create(self, body):
|
||||
req = fakes.HTTPRequest.blank('/v1/%s/volumes' % fake.PROJECT_ID)
|
||||
req.method = 'POST'
|
||||
|
||||
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
|
||||
self.controller.create, req, body)
|
||||
|
||||
def test_create_no_body(self):
|
||||
self._unprocessable_volume_create(body=None)
|
||||
|
||||
def test_create_missing_volume(self):
|
||||
body = {'foo': {'a': 'b'}}
|
||||
self._unprocessable_volume_create(body=body)
|
||||
|
||||
def test_create_malformed_entity(self):
|
||||
body = {'volume': 'string'}
|
||||
self._unprocessable_volume_create(body=body)
|
@ -5,16 +5,9 @@
|
||||
[composite:osapi_volume]
|
||||
use = call:cinder.api:root_app_factory
|
||||
/: apiversions
|
||||
/v1: openstack_volume_api_v1
|
||||
/v2: openstack_volume_api_v2
|
||||
/v3: openstack_volume_api_v3
|
||||
|
||||
[composite:openstack_volume_api_v1]
|
||||
use = call:cinder.api.middleware.auth:pipeline_factory
|
||||
noauth = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv1
|
||||
keystone = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
|
||||
keystone_nolimit = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
|
||||
|
||||
[composite:openstack_volume_api_v2]
|
||||
use = call:cinder.api.middleware.auth:pipeline_factory
|
||||
noauth = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv2
|
||||
@ -49,9 +42,6 @@ paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
|
||||
[filter:sizelimit]
|
||||
paste.filter_factory = oslo_middleware.sizelimit:RequestBodySizeLimiter.factory
|
||||
|
||||
[app:apiv1]
|
||||
paste.app_factory = cinder.api.v1.router:APIRouter.factory
|
||||
|
||||
[app:apiv2]
|
||||
paste.app_factory = cinder.api.v2.router:APIRouter.factory
|
||||
|
||||
|
@ -5,16 +5,9 @@
|
||||
[composite:osapi_volume]
|
||||
use = call:cinder.api:root_app_factory
|
||||
/: apiversions
|
||||
/v1: openstack_volume_api_v1
|
||||
/v2: openstack_volume_api_v2
|
||||
/v3: openstack_volume_api_v3
|
||||
|
||||
[composite:openstack_volume_api_v1]
|
||||
use = call:cinder.api.middleware.auth:pipeline_factory
|
||||
noauth = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv1
|
||||
keystone = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
|
||||
keystone_nolimit = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
|
||||
|
||||
[composite:openstack_volume_api_v2]
|
||||
use = call:cinder.api.middleware.auth:pipeline_factory
|
||||
noauth = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv2
|
||||
@ -49,9 +42,6 @@ paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
|
||||
[filter:sizelimit]
|
||||
paste.filter_factory = oslo_middleware.sizelimit:RequestBodySizeLimiter.factory
|
||||
|
||||
[app:apiv1]
|
||||
paste.app_factory = cinder.api.v1.router:APIRouter.factory
|
||||
|
||||
[app:apiv2]
|
||||
paste.app_factory = cinder.api.v2.router:APIRouter.factory
|
||||
|
||||
|
@ -5,16 +5,9 @@
|
||||
[composite:osapi_volume]
|
||||
use = call:cinder.api:root_app_factory
|
||||
/: apiversions
|
||||
/v1: openstack_volume_api_v1
|
||||
/v2: openstack_volume_api_v2
|
||||
/v3: openstack_volume_api_v3
|
||||
|
||||
[composite:openstack_volume_api_v1]
|
||||
use = call:cinder.api.middleware.auth:pipeline_factory
|
||||
noauth = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv1
|
||||
keystone = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
|
||||
keystone_nolimit = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
|
||||
|
||||
[composite:openstack_volume_api_v2]
|
||||
use = call:cinder.api.middleware.auth:pipeline_factory
|
||||
noauth = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv2
|
||||
@ -49,9 +42,6 @@ paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
|
||||
[filter:sizelimit]
|
||||
paste.filter_factory = oslo_middleware.sizelimit:RequestBodySizeLimiter.factory
|
||||
|
||||
[app:apiv1]
|
||||
paste.app_factory = cinder.api.v1.router:APIRouter.factory
|
||||
|
||||
[app:apiv2]
|
||||
paste.app_factory = cinder.api.v2.router:APIRouter.factory
|
||||
|
||||
|
7
releasenotes/notes/removed-apiv1-616b1b76a15521cf.yaml
Normal file
7
releasenotes/notes/removed-apiv1-616b1b76a15521cf.yaml
Normal file
@ -0,0 +1,7 @@
|
||||
---
|
||||
upgrade:
|
||||
- |
|
||||
The Cinder API v1 was deprecated in the Juno release and defaulted to be
|
||||
disabled in the Ocata release. It is now removed completely.
|
||||
If upgrading from a previous version, it is recommended you edit your
|
||||
`/etc/cinder/api-paste.ini` file to remove all references to v1.
|
Loading…
Reference in New Issue
Block a user