Merge "Remove API-GW related code from the Tricricle"
This commit is contained in:
commit
4b0b897805
@ -1,63 +0,0 @@
|
||||
# Copyright 2015 Huawei Technologies Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# Much of this module is based on the work of the Ironic team
|
||||
# see http://git.openstack.org/cgit/openstack/ironic/tree/ironic/cmd/api.py
|
||||
|
||||
import logging as std_logging
|
||||
import sys
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_service import wsgi
|
||||
|
||||
from tricircle.common import config
|
||||
from tricircle.common.i18n import _LI
|
||||
from tricircle.common.i18n import _LW
|
||||
from tricircle.common import restapp
|
||||
|
||||
from tricircle.cinder_apigw import app
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def main():
|
||||
config.init(app.common_opts, sys.argv[1:])
|
||||
application = app.setup_app()
|
||||
|
||||
host = CONF.bind_host
|
||||
port = CONF.bind_port
|
||||
workers = CONF.api_workers
|
||||
|
||||
if workers < 1:
|
||||
LOG.warning(_LW("Wrong worker number, worker = %(workers)s"), workers)
|
||||
workers = 1
|
||||
|
||||
LOG.info(_LI("Cinder_APIGW on http://%(host)s:%(port)s with %(workers)s"),
|
||||
{'host': host, 'port': port, 'workers': workers})
|
||||
|
||||
service = wsgi.Server(CONF, 'Tricircle Cinder_APIGW',
|
||||
application, host, port)
|
||||
restapp.serve(service, CONF, workers)
|
||||
|
||||
LOG.info(_LI("Configuration:"))
|
||||
CONF.log_opt_values(LOG, std_logging.INFO)
|
||||
|
||||
restapp.wait()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,68 +0,0 @@
|
||||
# Copyright 2015 Huawei Technologies Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# Much of this module is based on the work of the Ironic team
|
||||
# see http://git.openstack.org/cgit/openstack/ironic/tree/ironic/cmd/api.py
|
||||
|
||||
import eventlet
|
||||
|
||||
if __name__ == "__main__":
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import logging as std_logging
|
||||
import sys
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_service import wsgi
|
||||
|
||||
from tricircle.common import config
|
||||
from tricircle.common.i18n import _LI
|
||||
from tricircle.common.i18n import _LW
|
||||
from tricircle.common import restapp
|
||||
|
||||
from tricircle.nova_apigw import app
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def main():
|
||||
config.init(app.common_opts, sys.argv[1:])
|
||||
application = app.setup_app()
|
||||
|
||||
host = CONF.bind_host
|
||||
port = CONF.bind_port
|
||||
workers = CONF.api_workers
|
||||
|
||||
if workers < 1:
|
||||
LOG.warning(_LW("Wrong worker number, worker = %(workers)s"), workers)
|
||||
workers = 1
|
||||
|
||||
LOG.info(_LI("Nova_APIGW on http://%(host)s:%(port)s with %(workers)s"),
|
||||
{'host': host, 'port': port, 'workers': workers})
|
||||
|
||||
service = wsgi.Server(CONF, 'Tricircle Nova_APIGW',
|
||||
application, host, port)
|
||||
restapp.serve(service, CONF, workers)
|
||||
|
||||
LOG.info(_LI("Configuration:"))
|
||||
CONF.log_opt_values(LOG, std_logging.INFO)
|
||||
|
||||
restapp.wait()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,16 +0,0 @@
|
||||
[DEFAULT]
|
||||
output_file = etc/cinder_apigw.conf.sample
|
||||
wrap_width = 79
|
||||
namespace = tricircle.cinder_apigw
|
||||
namespace = tricircle.common
|
||||
namespace = tricircle.db
|
||||
namespace = oslo.log
|
||||
namespace = oslo.messaging
|
||||
namespace = oslo.policy
|
||||
namespace = oslo.service.periodic_task
|
||||
namespace = oslo.service.service
|
||||
namespace = oslo.service.sslutils
|
||||
namespace = oslo.db
|
||||
namespace = oslo.middleware
|
||||
namespace = oslo.concurrency
|
||||
namespace = keystonemiddleware.auth_token
|
@ -1,16 +0,0 @@
|
||||
[DEFAULT]
|
||||
output_file = etc/nova_apigw.conf.sample
|
||||
wrap_width = 79
|
||||
namespace = tricircle.nova_apigw
|
||||
namespace = tricircle.common
|
||||
namespace = tricircle.db
|
||||
namespace = oslo.log
|
||||
namespace = oslo.messaging
|
||||
namespace = oslo.policy
|
||||
namespace = oslo.service.periodic_task
|
||||
namespace = oslo.service.service
|
||||
namespace = oslo.service.sslutils
|
||||
namespace = oslo.db
|
||||
namespace = oslo.middleware
|
||||
namespace = oslo.concurrency
|
||||
namespace = keystonemiddleware.auth_token
|
@ -50,9 +50,6 @@ oslo.config.opts =
|
||||
tricircle.common = tricircle.common.opts:list_opts
|
||||
tricircle.db = tricircle.db.opts:list_opts
|
||||
tricircle.network = tricircle.network.opts:list_opts
|
||||
|
||||
tricircle.nova_apigw = tricircle.nova_apigw.opts:list_opts
|
||||
tricircle.cinder_apigw = tricircle.cinder_apigw.opts:list_opts
|
||||
tricircle.xjob = tricircle.xjob.opts:list_opts
|
||||
|
||||
tempest.test_plugins =
|
||||
@ -61,8 +58,3 @@ tempest.test_plugins =
|
||||
tricircle.network.type_drivers =
|
||||
local = tricircle.network.drivers.type_local:LocalTypeDriver
|
||||
shared_vlan = tricircle.network.drivers.type_shared_vlan:SharedVLANTypeDriver
|
||||
|
||||
tricircle.common.schedulers =
|
||||
pod_manager = tricircle.common.scheduler.pod_manager:PodManager
|
||||
bottom_pod_filter = tricircle.common.scheduler.filters.bottom_pod_filter:BottomPodFilter
|
||||
filter_scheduler = tricircle.common.scheduler.filter_scheduler:FilterScheduler
|
||||
|
2
tox.ini
2
tox.ini
@ -28,8 +28,6 @@ commands = python setup.py testr --coverage --testr-args='{posargs}'
|
||||
|
||||
[testenv:genconfig]
|
||||
commands = oslo-config-generator --config-file=etc/api-cfg-gen.conf
|
||||
oslo-config-generator --config-file=etc/nova_apigw-cfg-gen.conf
|
||||
oslo-config-generator --config-file=etc/cinder_apigw-cfg-gen.conf
|
||||
oslo-config-generator --config-file=etc/xjob-cfg-gen.conf
|
||||
oslo-config-generator --config-file=etc/tricircle_plugin-cfg-gen.conf
|
||||
|
||||
|
@ -1,76 +0,0 @@
|
||||
# Copyright (c) 2015 Huawei, Tech. Co,. Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pecan
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from tricircle.common.i18n import _
|
||||
from tricircle.common import restapp
|
||||
|
||||
|
||||
common_opts = [
|
||||
cfg.IPOpt('bind_host', default='0.0.0.0',
|
||||
help=_("The host IP to bind to")),
|
||||
cfg.PortOpt('bind_port', default=19997,
|
||||
help=_("The port to bind to")),
|
||||
cfg.IntOpt('api_workers', default=1,
|
||||
help=_("number of api workers")),
|
||||
cfg.StrOpt('api_extensions_path', default="",
|
||||
help=_("The path for API extensions")),
|
||||
cfg.StrOpt('auth_strategy', default='keystone',
|
||||
help=_("The type of authentication to use")),
|
||||
cfg.BoolOpt('allow_bulk', default=True,
|
||||
help=_("Allow the usage of the bulk API")),
|
||||
cfg.BoolOpt('allow_pagination', default=False,
|
||||
help=_("Allow the usage of the pagination")),
|
||||
cfg.BoolOpt('allow_sorting', default=False,
|
||||
help=_("Allow the usage of the sorting")),
|
||||
cfg.StrOpt('pagination_max_limit', default="-1",
|
||||
help=_("The maximum number of items returned in a single "
|
||||
"response, value was 'infinite' or negative integer "
|
||||
"means no limit")),
|
||||
]
|
||||
|
||||
|
||||
def setup_app(*args, **kwargs):
|
||||
config = {
|
||||
'server': {
|
||||
'port': cfg.CONF.bind_port,
|
||||
'host': cfg.CONF.bind_host
|
||||
},
|
||||
'app': {
|
||||
'root': 'tricircle.cinder_apigw.controllers.root.RootController',
|
||||
'modules': ['tricircle.cinder_apigw'],
|
||||
'errors': {
|
||||
400: '/error',
|
||||
'__force_dict__': True
|
||||
}
|
||||
}
|
||||
}
|
||||
pecan_config = pecan.configuration.conf_from_dict(config)
|
||||
|
||||
# app_hooks = [], hook collection will be put here later
|
||||
|
||||
app = pecan.make_app(
|
||||
pecan_config.app.root,
|
||||
debug=False,
|
||||
wrap_app=restapp.auth_app,
|
||||
force_canonical=False,
|
||||
hooks=[],
|
||||
guess_content_type_from_ext=True
|
||||
)
|
||||
|
||||
return app
|
@ -1,136 +0,0 @@
|
||||
# Copyright (c) 2015 Huawei Tech. Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pecan
|
||||
|
||||
import oslo_log.log as logging
|
||||
|
||||
from tricircle.cinder_apigw.controllers import volume
|
||||
from tricircle.cinder_apigw.controllers import volume_actions
|
||||
from tricircle.cinder_apigw.controllers import volume_metadata
|
||||
from tricircle.cinder_apigw.controllers import volume_type
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RootController(object):
|
||||
|
||||
@pecan.expose()
|
||||
def _lookup(self, version, *remainder):
|
||||
if version == 'v2':
|
||||
return V2Controller(), remainder
|
||||
|
||||
@pecan.expose(generic=True, template='json')
|
||||
def index(self):
|
||||
return {
|
||||
"versions": [
|
||||
{
|
||||
"status": "CURRENT",
|
||||
"updated": "2012-11-21T11:33:21Z",
|
||||
"id": "v2.0",
|
||||
"links": [
|
||||
{
|
||||
"href": pecan.request.application_url + "/v2/",
|
||||
"rel": "self"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@index.when(method='POST')
|
||||
@index.when(method='PUT')
|
||||
@index.when(method='DELETE')
|
||||
@index.when(method='HEAD')
|
||||
@index.when(method='PATCH')
|
||||
def not_supported(self):
|
||||
pecan.abort(405)
|
||||
|
||||
|
||||
class V2Controller(object):
|
||||
|
||||
_media_type1 = "application/vnd.openstack.volume+xml;version=1"
|
||||
_media_type2 = "application/vnd.openstack.volume+json;version=1"
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.resource_controller = {
|
||||
'volumes': volume.VolumeController,
|
||||
'types': volume_type.VolumeTypeController
|
||||
}
|
||||
|
||||
self.volumes_sub_controller = {
|
||||
'metadata': volume_metadata.VolumeMetaDataController,
|
||||
'action': volume_actions.VolumeActionController,
|
||||
}
|
||||
|
||||
@pecan.expose()
|
||||
def _lookup(self, tenant_id, *remainder):
|
||||
if not remainder:
|
||||
pecan.abort(404)
|
||||
return
|
||||
resource = remainder[0]
|
||||
if resource not in self.resource_controller:
|
||||
pecan.abort(404)
|
||||
return
|
||||
if resource == 'volumes' and len(remainder) >= 3:
|
||||
volume_id = remainder[1]
|
||||
sub_resource = remainder[2]
|
||||
if sub_resource not in self.volumes_sub_controller:
|
||||
pecan.abort(404)
|
||||
return
|
||||
return self.volumes_sub_controller[sub_resource](
|
||||
tenant_id, volume_id), remainder[3:]
|
||||
return self.resource_controller[resource](tenant_id), remainder[1:]
|
||||
|
||||
@pecan.expose(generic=True, template='json')
|
||||
def index(self):
|
||||
return {
|
||||
"version": {
|
||||
"status": "CURRENT",
|
||||
"updated": "2012-11-21T11:33:21Z",
|
||||
"media-types": [
|
||||
{
|
||||
"base": "application/xml",
|
||||
"type": self._media_type1
|
||||
},
|
||||
{
|
||||
"base": "application/json",
|
||||
"type": self._media_type2
|
||||
}
|
||||
],
|
||||
"id": "v2.0",
|
||||
"links": [
|
||||
{
|
||||
"href": pecan.request.application_url + "/v2/",
|
||||
"rel": "self"
|
||||
},
|
||||
{
|
||||
"href": "http://docs.openstack.org/",
|
||||
"type": "text/html",
|
||||
"rel": "describedby"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@index.when(method='POST')
|
||||
@index.when(method='PUT')
|
||||
@index.when(method='DELETE')
|
||||
@index.when(method='HEAD')
|
||||
@index.when(method='PATCH')
|
||||
def not_supported(self):
|
||||
pecan.abort(405)
|
@ -1,387 +0,0 @@
|
||||
# Copyright (c) 2015 Huawei Tech. Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import urlparse
|
||||
|
||||
from pecan import expose
|
||||
from pecan import request
|
||||
from pecan import response
|
||||
from pecan import rest
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from tricircle.common import az_ag
|
||||
from tricircle.common import constants as cons
|
||||
import tricircle.common.context as t_context
|
||||
from tricircle.common import httpclient as hclient
|
||||
from tricircle.common.i18n import _
|
||||
from tricircle.common.i18n import _LE
|
||||
from tricircle.common.scheduler import filter_scheduler
|
||||
from tricircle.common import utils
|
||||
|
||||
import tricircle.db.api as db_api
|
||||
from tricircle.db import core
|
||||
from tricircle.db import models
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VolumeController(rest.RestController):
|
||||
|
||||
def __init__(self, tenant_id):
|
||||
self.tenant_id = tenant_id
|
||||
self.filter_scheduler = filter_scheduler.FilterScheduler()
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def post(self, **kw):
|
||||
context = t_context.extract_context_from_environ()
|
||||
|
||||
if 'volume' not in kw:
|
||||
return utils.format_cinder_error(
|
||||
400, _("Missing required element 'volume' in request body."))
|
||||
|
||||
az = kw['volume'].get('availability_zone', '')
|
||||
pod, pod_az = self.filter_scheduler.select_destination(
|
||||
context, az, self.tenant_id, pod_group='')
|
||||
|
||||
if not pod:
|
||||
LOG.error(_LE("Pod not configured or scheduling failure"))
|
||||
return utils.format_cinder_error(
|
||||
500, _('Pod not configured or scheduling failure'))
|
||||
|
||||
t_pod = db_api.get_top_pod(context)
|
||||
if not t_pod:
|
||||
LOG.error(_LE("Top Pod not configured"))
|
||||
return utils.format_cinder_error(500, _('Top Pod not configured'))
|
||||
|
||||
# TODO(joehuang): get release from pod configuration,
|
||||
# to convert the content
|
||||
# b_release = pod['release']
|
||||
# t_release = t_pod['release']
|
||||
t_release = cons.R_MITAKA
|
||||
b_release = cons.R_MITAKA
|
||||
|
||||
s_ctx = hclient.get_pod_service_ctx(
|
||||
context,
|
||||
request.url,
|
||||
pod['pod_name'],
|
||||
s_type=cons.ST_CINDER)
|
||||
|
||||
if s_ctx['b_url'] == '':
|
||||
LOG.error(_LE("Bottom Pod endpoint incorrect %s") %
|
||||
pod['pod_name'])
|
||||
return utils.format_cinder_error(
|
||||
500, _('Bottom Pod endpoint incorrect'))
|
||||
|
||||
b_headers = hclient.convert_header(t_release,
|
||||
b_release,
|
||||
request.headers)
|
||||
|
||||
t_vol = kw['volume']
|
||||
|
||||
# add or remove key-value in the request for diff. version
|
||||
b_vol_req = hclient.convert_object(t_release, b_release, t_vol,
|
||||
res_type=cons.RT_VOLUME)
|
||||
|
||||
# convert az to the configured one
|
||||
# remove the AZ parameter to bottom request for default one
|
||||
b_vol_req['availability_zone'] = pod['pod_az_name']
|
||||
if b_vol_req['availability_zone'] == '':
|
||||
b_vol_req.pop("availability_zone", None)
|
||||
|
||||
b_body = jsonutils.dumps({'volume': b_vol_req})
|
||||
|
||||
resp = hclient.forward_req(
|
||||
context,
|
||||
'POST',
|
||||
b_headers,
|
||||
s_ctx['b_url'],
|
||||
b_body)
|
||||
b_status = resp.status_code
|
||||
b_ret_body = jsonutils.loads(resp.content)
|
||||
|
||||
# build routing and convert response from the bottom pod
|
||||
# for different version.
|
||||
response.status = b_status
|
||||
if b_status == 202:
|
||||
if b_ret_body.get('volume') is not None:
|
||||
b_vol_ret = b_ret_body['volume']
|
||||
|
||||
try:
|
||||
with context.session.begin():
|
||||
core.create_resource(
|
||||
context, models.ResourceRouting,
|
||||
{'top_id': b_vol_ret['id'],
|
||||
'bottom_id': b_vol_ret['id'],
|
||||
'pod_id': pod['pod_id'],
|
||||
'project_id': self.tenant_id,
|
||||
'resource_type': cons.RT_VOLUME})
|
||||
except Exception as e:
|
||||
LOG.exception(_LE('Failed to create volume '
|
||||
'resource routing'
|
||||
'top_id: %(top_id)s ,'
|
||||
'bottom_id: %(bottom_id)s ,'
|
||||
'pod_id: %(pod_id)s ,'
|
||||
'%(exception)s '),
|
||||
{'top_id': b_vol_ret['id'],
|
||||
'bottom_id': b_vol_ret['id'],
|
||||
'pod_id': pod['pod_id'],
|
||||
'exception': e})
|
||||
return utils.format_cinder_error(
|
||||
500, _('Failed to create volume resource routing'))
|
||||
|
||||
ret_vol = hclient.convert_object(b_release, t_release,
|
||||
b_vol_ret,
|
||||
res_type=cons.RT_VOLUME)
|
||||
|
||||
ret_vol['availability_zone'] = pod['az_name']
|
||||
|
||||
return {'volume': ret_vol}
|
||||
|
||||
return b_ret_body
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def get_one(self, _id):
|
||||
context = t_context.extract_context_from_environ()
|
||||
|
||||
if _id == 'detail':
|
||||
return {'volumes': self._get_all(context)}
|
||||
|
||||
# TODO(joehuang): get the release of top and bottom
|
||||
t_release = cons.R_MITAKA
|
||||
b_release = cons.R_MITAKA
|
||||
|
||||
b_headers = hclient.convert_header(t_release,
|
||||
b_release,
|
||||
request.headers)
|
||||
|
||||
s_ctx = hclient.get_res_routing_ref(context, _id, request.url,
|
||||
cons.ST_CINDER)
|
||||
if not s_ctx:
|
||||
return utils.format_cinder_error(
|
||||
404, _('Volume %s could not be found.') % _id)
|
||||
|
||||
if s_ctx['b_url'] == '':
|
||||
return utils.format_cinder_error(
|
||||
404, _('Bottom Pod endpoint incorrect'))
|
||||
|
||||
resp = hclient.forward_req(context, 'GET',
|
||||
b_headers,
|
||||
s_ctx['b_url'],
|
||||
request.body)
|
||||
|
||||
b_ret_body = jsonutils.loads(resp.content)
|
||||
|
||||
b_status = resp.status_code
|
||||
response.status = b_status
|
||||
if b_status == 200:
|
||||
if b_ret_body.get('volume') is not None:
|
||||
b_vol_ret = b_ret_body['volume']
|
||||
ret_vol = hclient.convert_object(b_release, t_release,
|
||||
b_vol_ret,
|
||||
res_type=cons.RT_VOLUME)
|
||||
|
||||
pod = utils.get_pod_by_top_id(context, _id)
|
||||
if pod:
|
||||
ret_vol['availability_zone'] = pod['az_name']
|
||||
|
||||
return {'volume': ret_vol}
|
||||
|
||||
# resource not find but routing exist, remove the routing
|
||||
if b_status == 404:
|
||||
filters = [{'key': 'top_id', 'comparator': 'eq', 'value': _id},
|
||||
{'key': 'resource_type',
|
||||
'comparator': 'eq',
|
||||
'value': cons.RT_VOLUME}]
|
||||
with context.session.begin():
|
||||
core.delete_resources(context,
|
||||
models.ResourceRouting,
|
||||
filters)
|
||||
return b_ret_body
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def get_all(self):
|
||||
|
||||
# TODO(joehuang): here should return link instead,
|
||||
# now combined with 'detail'
|
||||
|
||||
context = t_context.extract_context_from_environ()
|
||||
return {'volumes': self._get_all(context)}
|
||||
|
||||
def _get_all(self, context):
|
||||
|
||||
# TODO(joehuang): query optimization for pagination, sort, etc
|
||||
ret = []
|
||||
pods = az_ag.list_pods_by_tenant(context, self.tenant_id)
|
||||
for pod in pods:
|
||||
if pod['pod_name'] == '':
|
||||
continue
|
||||
|
||||
query = urlparse.urlsplit(request.url).query
|
||||
query_filters = urlparse.parse_qsl(query)
|
||||
skip_pod = False
|
||||
for k, v in query_filters:
|
||||
if k == 'availability_zone' and v != pod['az_name']:
|
||||
skip_pod = True
|
||||
break
|
||||
if skip_pod:
|
||||
continue
|
||||
|
||||
s_ctx = hclient.get_pod_service_ctx(
|
||||
context,
|
||||
request.url,
|
||||
pod['pod_name'],
|
||||
s_type=cons.ST_CINDER)
|
||||
if s_ctx['b_url'] == '':
|
||||
LOG.error(_LE("bottom pod endpoint incorrect %s")
|
||||
% pod['pod_name'])
|
||||
continue
|
||||
|
||||
# TODO(joehuang): get the release of top and bottom
|
||||
t_release = cons.R_MITAKA
|
||||
b_release = cons.R_MITAKA
|
||||
b_headers = hclient.convert_header(t_release,
|
||||
b_release,
|
||||
request.headers)
|
||||
|
||||
resp = hclient.forward_req(context, 'GET',
|
||||
b_headers,
|
||||
s_ctx['b_url'],
|
||||
request.body)
|
||||
|
||||
if resp.status_code == 200:
|
||||
|
||||
routings = db_api.get_bottom_mappings_by_tenant_pod(
|
||||
context, self.tenant_id,
|
||||
pod['pod_id'], cons.RT_VOLUME
|
||||
)
|
||||
|
||||
b_ret_body = jsonutils.loads(resp.content)
|
||||
if b_ret_body.get('volumes'):
|
||||
for vol in b_ret_body['volumes']:
|
||||
|
||||
if not routings.get(vol['id']):
|
||||
b_ret_body['volumes'].remove(vol)
|
||||
continue
|
||||
|
||||
vol['availability_zone'] = pod['az_name']
|
||||
|
||||
ret.extend(b_ret_body['volumes'])
|
||||
return ret
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def put(self, _id, **kw):
|
||||
context = t_context.extract_context_from_environ()
|
||||
|
||||
# TODO(joehuang): Implement API multi-version compatibility
|
||||
# currently _convert_header and _convert_object are both dummy
|
||||
# functions and API versions are hard coded. After multi-version
|
||||
# compatibility is implemented, API versions will be retrieved from
|
||||
# top and bottom API server, also, _convert_header and _convert_object
|
||||
# will do the real job to convert the request header and body
|
||||
# according to the API versions.
|
||||
t_release = cons.R_MITAKA
|
||||
b_release = cons.R_MITAKA
|
||||
|
||||
s_ctx = hclient.get_res_routing_ref(context, _id, request.url,
|
||||
cons.ST_CINDER)
|
||||
if not s_ctx:
|
||||
return utils.format_cinder_error(
|
||||
404, _('Volume %s could not be found.') % _id)
|
||||
|
||||
if s_ctx['b_url'] == '':
|
||||
return utils.format_cinder_error(
|
||||
404, _('Bottom Pod endpoint incorrect'))
|
||||
|
||||
b_headers = hclient.convert_header(t_release,
|
||||
b_release,
|
||||
request.headers)
|
||||
|
||||
t_vol = kw['volume']
|
||||
|
||||
# add or remove key-value in the request for diff. version
|
||||
b_vol_req = hclient.convert_object(t_release, b_release, t_vol,
|
||||
res_type=cons.RT_VOLUME)
|
||||
|
||||
b_body = jsonutils.dumps({'volume': b_vol_req})
|
||||
|
||||
resp = hclient.forward_req(context, 'PUT',
|
||||
b_headers,
|
||||
s_ctx['b_url'],
|
||||
b_body)
|
||||
|
||||
b_status = resp.status_code
|
||||
b_ret_body = jsonutils.loads(resp.content)
|
||||
response.status = b_status
|
||||
|
||||
if b_status == 200:
|
||||
if b_ret_body.get('volume') is not None:
|
||||
b_vol_ret = b_ret_body['volume']
|
||||
ret_vol = hclient.convert_object(b_release, t_release,
|
||||
b_vol_ret,
|
||||
res_type=cons.RT_VOLUME)
|
||||
|
||||
pod = utils.get_pod_by_top_id(context, _id)
|
||||
if pod:
|
||||
ret_vol['availability_zone'] = pod['az_name']
|
||||
|
||||
return {'volume': ret_vol}
|
||||
|
||||
# resource not found but routing exist, remove the routing
|
||||
if b_status == 404:
|
||||
filters = [{'key': 'top_id', 'comparator': 'eq', 'value': _id},
|
||||
{'key': 'resource_type',
|
||||
'comparator': 'eq',
|
||||
'value': cons.RT_VOLUME}]
|
||||
with context.session.begin():
|
||||
core.delete_resources(context,
|
||||
models.ResourceRouting,
|
||||
filters)
|
||||
return b_ret_body
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def delete(self, _id):
|
||||
context = t_context.extract_context_from_environ()
|
||||
|
||||
# TODO(joehuang): get the release of top and bottom
|
||||
t_release = cons.R_MITAKA
|
||||
b_release = cons.R_MITAKA
|
||||
|
||||
s_ctx = hclient.get_res_routing_ref(context, _id, request.url,
|
||||
cons.ST_CINDER)
|
||||
if not s_ctx:
|
||||
return utils.format_cinder_error(
|
||||
404, _('Volume %s could not be found.') % _id)
|
||||
|
||||
if s_ctx['b_url'] == '':
|
||||
return utils.format_cinder_error(
|
||||
404, _('Bottom Pod endpoint incorrect'))
|
||||
|
||||
b_headers = hclient.convert_header(t_release,
|
||||
b_release,
|
||||
request.headers)
|
||||
|
||||
resp = hclient.forward_req(context, 'DELETE',
|
||||
b_headers,
|
||||
s_ctx['b_url'],
|
||||
request.body)
|
||||
|
||||
response.status = resp.status_code
|
||||
|
||||
# don't remove the resource routing for delete is async. operation
|
||||
# remove the routing when query is executed but not find
|
||||
# No content in the resp actually
|
||||
return response
|
@ -1,244 +0,0 @@
|
||||
# Copyright 2016 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pecan
|
||||
from pecan import expose
|
||||
from pecan import rest
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
import tricircle.common.client as t_client
|
||||
from tricircle.common import constants
|
||||
import tricircle.common.context as t_context
|
||||
from tricircle.common.i18n import _
|
||||
from tricircle.common.i18n import _LE
|
||||
from tricircle.common import utils
|
||||
import tricircle.db.api as db_api
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VolumeActionController(rest.RestController):
|
||||
|
||||
def __init__(self, project_id, volume_id):
|
||||
self.project_id = project_id
|
||||
self.volume_id = volume_id
|
||||
self.clients = {constants.TOP: t_client.Client()}
|
||||
self.handle_map = {
|
||||
'os-attach': self._attach,
|
||||
'os-extend': self._extend,
|
||||
'os-reset_status': self._reset_status,
|
||||
'os-set_image_metadata': self._set_image_metadata,
|
||||
'os-unset_image_metadata': self._unset_image_metadata,
|
||||
'os-show_image_metadata': self._show_image_metadata,
|
||||
'os-force_detach': self._force_detach
|
||||
}
|
||||
|
||||
def _get_client(self, pod_name=constants.TOP):
|
||||
if pod_name not in self.clients:
|
||||
self.clients[pod_name] = t_client.Client(pod_name)
|
||||
return self.clients[pod_name]
|
||||
|
||||
def _action(self, context, pod_name, action, info=None, **kwargs):
|
||||
"""Perform a volume "action".
|
||||
|
||||
:param pod_name: the bottom pod name.
|
||||
:param action: volume action name.
|
||||
:param info: action parameters body.
|
||||
"""
|
||||
body = {action: info}
|
||||
url = '/volumes/%s/action' % self.volume_id
|
||||
api = self._get_client(pod_name).get_native_client('volume', context)
|
||||
return api.client.post(url, body=body)
|
||||
|
||||
def _attach(self, context, pod_name, kw):
|
||||
"""Add attachment metadata.
|
||||
|
||||
:param pod_name: the bottom pod name.
|
||||
:param kw: request body.
|
||||
"""
|
||||
try:
|
||||
mountpoint = None
|
||||
if 'mountpoint' in kw['os-attach']:
|
||||
mountpoint = kw['os-attach']['mountpoint']
|
||||
body = {'mountpoint': mountpoint}
|
||||
instance_uuid = None
|
||||
if 'instance_uuid' in kw['os-attach']:
|
||||
instance_uuid = kw['os-attach']['instance_uuid']
|
||||
host_name = None
|
||||
if 'host_name' in kw['os-attach']:
|
||||
host_name = kw['os-attach']['host_name']
|
||||
except (KeyError, ValueError, TypeError):
|
||||
msg = _('The server could not comply with the request since '
|
||||
'it is either malformed or otherwise incorrect.')
|
||||
return utils.format_cinder_error(400, msg)
|
||||
|
||||
if instance_uuid is not None:
|
||||
body.update({'instance_uuid': instance_uuid})
|
||||
if host_name is not None:
|
||||
body.update({'host_name': host_name})
|
||||
return self._action(context, pod_name, 'os-attach', body)
|
||||
|
||||
def _extend(self, context, pod_name, kw):
|
||||
"""Extend the size of the specified volume.
|
||||
|
||||
:param pod_name: the bottom pod name.
|
||||
:param kw: request body.
|
||||
"""
|
||||
try:
|
||||
new_size = int(kw['os-extend']['new_size'])
|
||||
except (KeyError, ValueError, TypeError):
|
||||
msg = _("New volume size must be specified as an integer.")
|
||||
return utils.format_cinder_error(400, msg)
|
||||
return self._action(context, pod_name, 'os-extend',
|
||||
{'new_size': new_size})
|
||||
|
||||
def _force_detach(self, context, pod_name, kw):
|
||||
"""Forces a volume to detach
|
||||
|
||||
:param pod_name: the bottom pod name.
|
||||
:param kw: request body.
|
||||
"""
|
||||
body = kw['os-force_detach']
|
||||
return self._action(context, pod_name, 'os-force_detach', body)
|
||||
|
||||
def _reset_status(self, context, pod_name, kw):
|
||||
"""Update the provided volume with the provided state.
|
||||
|
||||
:param pod_name: the bottom pod name.
|
||||
:param kw: request body.
|
||||
"""
|
||||
try:
|
||||
status = None
|
||||
if 'status' in kw['os-reset_status']:
|
||||
status = kw['os-reset_status']['status']
|
||||
attach_status = None
|
||||
if 'attach_status' in kw['os-reset_status']:
|
||||
attach_status = kw['os-reset_status']['attach_status']
|
||||
migration_status = None
|
||||
if 'migration_status' in kw['os-reset_status']:
|
||||
migration_status = kw['os-reset_status']['migration_status']
|
||||
except (TypeError, KeyError, ValueError):
|
||||
msg = _('The server has either erred or is incapable of '
|
||||
'performing the requested operation.')
|
||||
return utils.format_cinder_error(500, msg)
|
||||
|
||||
body = {'status': status} if status else {}
|
||||
if attach_status:
|
||||
body.update({'attach_status': attach_status})
|
||||
if migration_status:
|
||||
body.update({'migration_status': migration_status})
|
||||
return self._action(context, pod_name, 'os-reset_status', body)
|
||||
|
||||
def _set_image_metadata(self, context, pod_name, kw):
|
||||
"""Set a volume's image metadata.
|
||||
|
||||
:param pod_name: the bottom pod name.
|
||||
:param kw: request body.
|
||||
"""
|
||||
try:
|
||||
metadata = kw['os-set_image_metadata']['metadata']
|
||||
except (KeyError, TypeError):
|
||||
msg = _("Malformed request body.")
|
||||
return utils.format_cinder_error(400, msg)
|
||||
return self._action(context, pod_name, 'os-set_image_metadata',
|
||||
{'metadata': metadata})
|
||||
|
||||
def _unset_image_metadata(self, context, pod_name, kw):
|
||||
"""Unset specified keys from volume's image metadata.
|
||||
|
||||
:param pod_name: the bottom pod name.
|
||||
:param kw: request body.
|
||||
"""
|
||||
try:
|
||||
key = kw['os-unset_image_metadata']['key']
|
||||
except (KeyError, TypeError):
|
||||
msg = _("Malformed request body.")
|
||||
return utils.format_cinder_error(400, msg)
|
||||
return self._action(
|
||||
context, pod_name, 'os-unset_image_metadata', {'key': key})
|
||||
|
||||
def _show_image_metadata(self, context, pod_name, kw):
|
||||
"""Show a volume's image metadata.
|
||||
|
||||
:param pod_name: the bottom pod name.
|
||||
:param kw: request body.
|
||||
"""
|
||||
return self._action(context, pod_name, 'os-show_image_metadata')
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def post(self, **kw):
|
||||
context = t_context.extract_context_from_environ()
|
||||
|
||||
action_handle = None
|
||||
action_type = None
|
||||
for _type in self.handle_map:
|
||||
if _type in kw:
|
||||
action_handle = self.handle_map[_type]
|
||||
action_type = _type
|
||||
if not action_handle:
|
||||
return utils.format_cinder_error(
|
||||
400, _('Volume action not supported'))
|
||||
|
||||
volume_mappings = db_api.get_bottom_mappings_by_top_id(
|
||||
context, self.volume_id, constants.RT_VOLUME)
|
||||
if not volume_mappings:
|
||||
return utils.format_cinder_error(
|
||||
404, _('Volume %(volume_id)s could not be found.') % {
|
||||
'volume_id': self.volume_id
|
||||
})
|
||||
|
||||
pod_name = volume_mappings[0][0]['pod_name']
|
||||
|
||||
if action_type == 'os-attach':
|
||||
instance_uuid = kw['os-attach'].get('instance_uuid')
|
||||
if instance_uuid is not None:
|
||||
server_mappings = db_api.get_bottom_mappings_by_top_id(
|
||||
context, instance_uuid, constants.RT_SERVER)
|
||||
if not server_mappings:
|
||||
return utils.format_cinder_error(
|
||||
404, _('Server not found'))
|
||||
server_pod_name = server_mappings[0][0]['pod_name']
|
||||
if server_pod_name != pod_name:
|
||||
LOG.error(_LE('Server %(server)s is in pod %(server_pod)s'
|
||||
'and volume %(volume)s is in pod'
|
||||
'%(volume_pod)s, which '
|
||||
'are not the same.'),
|
||||
{'server': instance_uuid,
|
||||
'server_pod': server_pod_name,
|
||||
'volume': self.volume_id,
|
||||
'volume_pod': pod_name})
|
||||
return utils.format_cinder_error(
|
||||
400, _('Server and volume not in the same pod'))
|
||||
|
||||
try:
|
||||
resp, body = action_handle(context, pod_name, kw)
|
||||
pecan.response.status = resp.status_code
|
||||
if not body:
|
||||
return pecan.response
|
||||
else:
|
||||
return body
|
||||
except Exception as e:
|
||||
code = 500
|
||||
message = _('Action %(action)s on volume %(volume_id)s fails') % {
|
||||
'action': action_type,
|
||||
'volume_id': self.volume_id}
|
||||
if hasattr(e, 'code'):
|
||||
code = e.code
|
||||
ex_message = str(e)
|
||||
if ex_message:
|
||||
message = ex_message
|
||||
LOG.error(message)
|
||||
return utils.format_cinder_error(code, message)
|
@ -1,287 +0,0 @@
|
||||
# Copyright 2016 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
from pecan import expose
|
||||
from pecan import request
|
||||
from pecan import response
|
||||
from pecan import rest
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from tricircle.common import constants as cons
|
||||
import tricircle.common.context as t_context
|
||||
from tricircle.common import httpclient as hclient
|
||||
from tricircle.common.i18n import _
|
||||
from tricircle.common.i18n import _LE
|
||||
from tricircle.common import utils
|
||||
import tricircle.db.api as db_api
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VolumeMetaDataController(rest.RestController):
|
||||
|
||||
def __init__(self, tenant_id, volume_id):
|
||||
self.tenant_id = tenant_id
|
||||
self.volume_id = volume_id
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def post(self, **kw):
|
||||
"""Create volume metadata associated with a volume.
|
||||
|
||||
:param kw: dictionary of values to be created
|
||||
:returns: created volume metadata
|
||||
"""
|
||||
context = t_context.extract_context_from_environ()
|
||||
|
||||
if 'metadata' not in kw:
|
||||
return utils.format_cinder_error(
|
||||
400, _("Missing required element 'metadata' in "
|
||||
"request body."))
|
||||
|
||||
try:
|
||||
pod = utils.get_pod_by_top_id(context, self.volume_id)
|
||||
if pod is None:
|
||||
return utils.format_cinder_error(
|
||||
404, _('Volume %(volume_id)s could not be found.') % {
|
||||
'volume_id': self.volume_id
|
||||
})
|
||||
|
||||
t_pod = db_api.get_top_pod(context)
|
||||
if not t_pod:
|
||||
LOG.error(_LE("Top Pod not configured"))
|
||||
return utils.format_cinder_error(
|
||||
500, _('Top Pod not configured'))
|
||||
except Exception as e:
|
||||
LOG.exception(_LE('Fail to create metadata for a volume:'
|
||||
'%(volume_id)s'
|
||||
'%(exception)s'),
|
||||
{'volume_id': self.volume_id,
|
||||
'exception': e})
|
||||
return utils.format_cinder_error(500, _('Fail to create metadata'))
|
||||
|
||||
t_release = cons.R_MITAKA
|
||||
b_release = cons.R_MITAKA
|
||||
|
||||
s_ctx = hclient.get_pod_service_ctx(
|
||||
context,
|
||||
request.url,
|
||||
pod['pod_name'],
|
||||
s_type=cons.ST_CINDER)
|
||||
|
||||
if s_ctx['b_url'] == '':
|
||||
LOG.error(_LE("Bottom pod endpoint incorrect %s") %
|
||||
pod['pod_name'])
|
||||
return utils.format_cinder_error(
|
||||
500, _('Bottom pod endpoint incorrect'))
|
||||
|
||||
b_headers = hclient.convert_header(t_release, b_release,
|
||||
request.headers)
|
||||
|
||||
t_metadata = kw['metadata']
|
||||
|
||||
# add or remove key-value in the request for diff. version
|
||||
b_vol_req = hclient.convert_object(t_release, b_release, t_metadata,
|
||||
res_type=cons.RT_VOl_METADATA)
|
||||
|
||||
b_body = jsonutils.dumps({'metadata': b_vol_req})
|
||||
|
||||
resp = hclient.forward_req(
|
||||
context,
|
||||
'POST',
|
||||
b_headers,
|
||||
s_ctx['b_url'],
|
||||
b_body)
|
||||
b_status = resp.status_code
|
||||
b_body_ret = jsonutils.loads(resp.content)
|
||||
|
||||
# convert response from the bottom pod
|
||||
# for different version.
|
||||
response.status = b_status
|
||||
if b_status == 200:
|
||||
if b_body_ret.get('metadata') is not None:
|
||||
b_metadata_ret = b_body_ret['metadata']
|
||||
|
||||
vol_ret = hclient.convert_object(b_release, t_release,
|
||||
b_metadata_ret,
|
||||
res_type=cons.
|
||||
RT_VOl_METADATA)
|
||||
|
||||
return {'metadata': vol_ret}
|
||||
|
||||
return b_body_ret
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def get_one(self):
|
||||
"""Get all metadata associated with a volume."""
|
||||
context = t_context.extract_context_from_environ()
|
||||
|
||||
t_release = cons.R_MITAKA
|
||||
b_release = cons.R_MITAKA
|
||||
|
||||
b_headers = hclient.convert_header(t_release,
|
||||
b_release,
|
||||
request.headers)
|
||||
|
||||
try:
|
||||
s_ctx = hclient.get_res_routing_ref(context, self.volume_id,
|
||||
request.url, cons.ST_CINDER)
|
||||
if not s_ctx:
|
||||
return utils.format_cinder_error(
|
||||
500, _('Fail to find resource'))
|
||||
except Exception as e:
|
||||
LOG.exception(_LE('Fail to get metadata for a volume:'
|
||||
'%(volume_id)s'
|
||||
'%(exception)s'),
|
||||
{'volume_id': self.volume_id,
|
||||
'exception': e})
|
||||
return utils.format_cinder_error(500, _('Fail to get metadata'))
|
||||
|
||||
if s_ctx['b_url'] == '':
|
||||
return utils.format_cinder_error(
|
||||
500, _('Bottom pod endpoint incorrect'))
|
||||
|
||||
resp = hclient.forward_req(context, 'GET',
|
||||
b_headers,
|
||||
s_ctx['b_url'],
|
||||
request.body)
|
||||
|
||||
b_body_ret = jsonutils.loads(resp.content)
|
||||
|
||||
b_status = resp.status_code
|
||||
response.status = b_status
|
||||
if b_status == 200:
|
||||
if b_body_ret.get('metadata') is not None:
|
||||
b_metadata_ret = b_body_ret['metadata']
|
||||
vol_ret = hclient.convert_object(b_release, t_release,
|
||||
b_metadata_ret,
|
||||
res_type=cons.
|
||||
RT_VOl_METADATA)
|
||||
return {'metadata': vol_ret}
|
||||
|
||||
return b_body_ret
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def put(self, **kw):
|
||||
"""Update volume metadata.
|
||||
|
||||
:param kw: dictionary of values to be updated
|
||||
:returns: updated volume type
|
||||
"""
|
||||
context = t_context.extract_context_from_environ()
|
||||
|
||||
if 'metadata' not in kw:
|
||||
return utils.format_cinder_error(
|
||||
400, _("Missing required element 'metadata' in "
|
||||
"request body."))
|
||||
|
||||
t_release = cons.R_MITAKA
|
||||
b_release = cons.R_MITAKA
|
||||
|
||||
try:
|
||||
s_ctx = hclient.get_res_routing_ref(context, self.volume_id,
|
||||
request.url, cons.ST_CINDER)
|
||||
if not s_ctx:
|
||||
return utils.format_cinder_error(
|
||||
404, _('Resource not found'))
|
||||
except Exception as e:
|
||||
LOG.exception(_LE('Fail to update metadata for a volume: '
|
||||
'%(volume_id)s'
|
||||
'%(exception)s'),
|
||||
{'volume_id': self.volume_id,
|
||||
'exception': e})
|
||||
return utils.format_cinder_error(
|
||||
500, _('Fail to update metadata'))
|
||||
|
||||
if s_ctx['b_url'] == '':
|
||||
return utils.format_cinder_error(
|
||||
500, _('Bottom pod endpoint incorrect'))
|
||||
|
||||
b_headers = hclient.convert_header(t_release,
|
||||
b_release,
|
||||
request.headers)
|
||||
|
||||
t_metadata = kw['metadata']
|
||||
|
||||
# add or remove key/value in the request for diff. version
|
||||
b_vol_req = hclient.convert_object(t_release, b_release, t_metadata,
|
||||
res_type=cons.RT_VOl_METADATA)
|
||||
|
||||
b_body = jsonutils.dumps({'metadata': b_vol_req})
|
||||
|
||||
resp = hclient.forward_req(context, 'PUT',
|
||||
b_headers,
|
||||
s_ctx['b_url'],
|
||||
b_body)
|
||||
|
||||
b_status = resp.status_code
|
||||
b_body_ret = jsonutils.loads(resp.content)
|
||||
response.status = b_status
|
||||
|
||||
if b_status == 200:
|
||||
if b_body_ret.get('metadata') is not None:
|
||||
b_metadata_ret = b_body_ret['metadata']
|
||||
vol_ret = hclient.convert_object(b_release, t_release,
|
||||
b_metadata_ret,
|
||||
res_type=cons.
|
||||
RT_VOl_METADATA)
|
||||
return {'metadata': vol_ret}
|
||||
|
||||
return b_body_ret
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def delete(self, key):
|
||||
"""Delete the given metadata item from a volume."""
|
||||
context = t_context.extract_context_from_environ()
|
||||
|
||||
t_release = cons.R_MITAKA
|
||||
b_release = cons.R_MITAKA
|
||||
|
||||
try:
|
||||
s_ctx = hclient.get_res_routing_ref(context, self.volume_id,
|
||||
request.url, cons.ST_CINDER)
|
||||
if not s_ctx:
|
||||
return utils.format_cinder_error(
|
||||
404, _('Fail to find resource'))
|
||||
except Exception as e:
|
||||
LOG.exception(_LE('Fail to delete metadata from a volume: '
|
||||
'%(volume_id)s'
|
||||
'%(exception)s'),
|
||||
{'volume_id': self.volume_id,
|
||||
'exception': e})
|
||||
return utils.format_cinder_error(
|
||||
500, _('Fail to delete metadata'))
|
||||
|
||||
if s_ctx['b_url'] == '':
|
||||
return utils.format_cinder_error(
|
||||
500, _('Bottom pod endpoint incorrect'))
|
||||
|
||||
b_headers = hclient.convert_header(t_release,
|
||||
b_release,
|
||||
request.headers)
|
||||
|
||||
resp = hclient.forward_req(context, 'DELETE',
|
||||
b_headers,
|
||||
s_ctx['b_url'],
|
||||
request.body)
|
||||
|
||||
response.status = resp.status_code
|
||||
|
||||
# don't remove the resource routing for delete is async. operation
|
||||
# remove the routing when query is executed but not found
|
||||
# No content in the resp actually
|
||||
return response
|
@ -1,286 +0,0 @@
|
||||
# Copyright 2016 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pecan
|
||||
from pecan import expose
|
||||
from pecan import rest
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
import tricircle.common.context as t_context
|
||||
from tricircle.common import exceptions
|
||||
from tricircle.common.i18n import _
|
||||
from tricircle.common.i18n import _LE
|
||||
from tricircle.common import utils
|
||||
import tricircle.db.api as db_api
|
||||
from tricircle.db import core
|
||||
from tricircle.db import models
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VolumeTypeController(rest.RestController):
|
||||
|
||||
def __init__(self, tenant_id):
|
||||
self.tenant_id = tenant_id
|
||||
|
||||
def _metadata_refs(self, metadata_dict, meta_class):
|
||||
metadata_refs = []
|
||||
|
||||
if metadata_dict:
|
||||
for k, v in metadata_dict.items():
|
||||
metadata_ref = meta_class()
|
||||
metadata_ref['key'] = k
|
||||
metadata_ref['value'] = v
|
||||
metadata_refs.append(metadata_ref)
|
||||
return metadata_refs
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def post(self, **kw):
|
||||
"""Creates volume types."""
|
||||
context = t_context.extract_context_from_environ()
|
||||
|
||||
if not context.is_admin:
|
||||
return utils.format_cinder_error(
|
||||
403, _("Policy doesn't allow volume_extension:types_manage "
|
||||
"to be performed."))
|
||||
|
||||
if 'volume_type' not in kw:
|
||||
return utils.format_cinder_error(
|
||||
400, _("Missing required element 'volume_type' in "
|
||||
"request body."))
|
||||
|
||||
projects = []
|
||||
|
||||
if self.tenant_id is not None:
|
||||
projects = [self.tenant_id]
|
||||
|
||||
vol_type = kw['volume_type']
|
||||
name = vol_type.get('name', None)
|
||||
description = vol_type.get('description')
|
||||
specs = vol_type.get('extra_specs', {})
|
||||
is_public = vol_type.pop('os-volume-type-access:is_public', True)
|
||||
|
||||
if name is None or len(name.strip()) == 0:
|
||||
return utils.format_cinder_error(
|
||||
400, _("Volume type name can not be empty."))
|
||||
|
||||
try:
|
||||
utils.check_string_length(name, 'Type name',
|
||||
min_len=1, max_len=255)
|
||||
except exceptions.InvalidInput as e:
|
||||
return utils.format_cinder_error(
|
||||
400, e.message)
|
||||
|
||||
if description is not None:
|
||||
try:
|
||||
utils.check_string_length(description, 'Type description',
|
||||
min_len=0, max_len=255)
|
||||
except exceptions.InvalidInput as e:
|
||||
return utils.format_cinder_error(400, e.message)
|
||||
|
||||
if not utils.is_valid_boolstr(is_public):
|
||||
msg = _("Invalid value '%(is_public)s' for is_public. "
|
||||
"Accepted values: True or False.") % {
|
||||
'is_public': is_public}
|
||||
return utils.format_cinder_error(400, msg)
|
||||
|
||||
vol_type['extra_specs'] = specs
|
||||
vol_type['is_public'] = is_public
|
||||
vol_type['id'] = uuidutils.generate_uuid()
|
||||
|
||||
session = core.get_session()
|
||||
with session.begin():
|
||||
try:
|
||||
db_api.volume_type_get_by_name(context, vol_type['name'],
|
||||
session)
|
||||
return utils.format_cinder_error(
|
||||
409, _("Volume Type %(id)s already exists.") % {
|
||||
'id': vol_type['id']})
|
||||
except exceptions.VolumeTypeNotFoundByName:
|
||||
pass
|
||||
try:
|
||||
extra_specs = vol_type['extra_specs']
|
||||
vol_type['extra_specs'] = \
|
||||
self._metadata_refs(vol_type.get('extra_specs'),
|
||||
models.VolumeTypeExtraSpecs)
|
||||
volume_type_ref = models.VolumeTypes()
|
||||
volume_type_ref.update(vol_type)
|
||||
session.add(volume_type_ref)
|
||||
for project in set(projects):
|
||||
access_ref = models.VolumeTypeProjects()
|
||||
access_ref.update({"volume_type_id": volume_type_ref.id,
|
||||
"project_id": project})
|
||||
access_ref.save(session=session)
|
||||
except Exception as e:
|
||||
LOG.exception(_LE('Fail to create volume type: %(name)s,'
|
||||
'%(exception)s'),
|
||||
{'name': vol_type['name'],
|
||||
'exception': e})
|
||||
return utils.format_cinder_error(
|
||||
500, _('Fail to create volume type'))
|
||||
|
||||
vol_type['extra_specs'] = extra_specs
|
||||
return {'volume_type': vol_type}
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def get_one(self, _id):
|
||||
"""Retrieves single volume type by id.
|
||||
|
||||
:param _id: id of volume type to be retrieved
|
||||
:returns: retrieved volume type
|
||||
"""
|
||||
context = t_context.extract_context_from_environ()
|
||||
try:
|
||||
result = db_api.volume_type_get(context, _id)
|
||||
except exceptions.VolumeTypeNotFound as e:
|
||||
return utils.format_cinder_error(404, e.message)
|
||||
except Exception as e:
|
||||
LOG.exception(_LE('Volume type not found: %(id)s,'
|
||||
'%(exception)s'),
|
||||
{'id': _id,
|
||||
'exception': e})
|
||||
return utils.format_cinder_error(
|
||||
404, _("Volume type %(id)s could not be found.") % {
|
||||
'id': _id})
|
||||
return {'volume_type': result}
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def get_all(self):
|
||||
"""Get all non-deleted volume_types."""
|
||||
filters = {}
|
||||
context = t_context.extract_context_from_environ()
|
||||
if not context.is_admin:
|
||||
# Only admin has query access to all volume types
|
||||
filters['is_public'] = True
|
||||
try:
|
||||
list_result = db_api.volume_type_get_all(context,
|
||||
list_result=True,
|
||||
filters=filters)
|
||||
except Exception as e:
|
||||
LOG.exception(_LE('Fail to retrieve volume types: %(exception)s'),
|
||||
{'exception': e})
|
||||
return utils.format_cinder_error(500, e)
|
||||
|
||||
return {'volume_types': list_result}
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def put(self, _id, **kw):
|
||||
"""Update volume type by id.
|
||||
|
||||
:param _id: id of volume type to be updated
|
||||
:param kw: dictionary of values to be updated
|
||||
:returns: updated volume type
|
||||
"""
|
||||
context = t_context.extract_context_from_environ()
|
||||
|
||||
if not context.is_admin:
|
||||
return utils.format_cinder_error(
|
||||
403, _("Policy doesn't allow volume_extension:types_manage "
|
||||
"to be performed."))
|
||||
|
||||
if 'volume_type' not in kw:
|
||||
return utils.format_cinder_error(
|
||||
400, _("Missing required element 'volume_type' in "
|
||||
"request body."))
|
||||
|
||||
values = kw['volume_type']
|
||||
name = values.get('name')
|
||||
description = values.get('description')
|
||||
is_public = values.get('os-volume-type-access:is_public')
|
||||
|
||||
# Name and description can not be both None.
|
||||
# If name specified, name can not be empty.
|
||||
if name and len(name.strip()) == 0:
|
||||
return utils.format_cinder_error(
|
||||
400, _("Volume type name can not be empty."))
|
||||
|
||||
if name is None and description is None and is_public is None:
|
||||
msg = _("Specify volume type name, description, is_public or "
|
||||
"a combination thereof.")
|
||||
return utils.format_cinder_error(400, msg)
|
||||
|
||||
if is_public is not None and not utils.is_valid_boolstr(is_public):
|
||||
msg = _("Invalid value '%(is_public)s' for is_public. Accepted "
|
||||
"values: True or False.") % {'is_public': is_public}
|
||||
return utils.format_cinder_error(400, msg)
|
||||
|
||||
if name:
|
||||
try:
|
||||
utils.check_string_length(name, 'Type name',
|
||||
min_len=1, max_len=255)
|
||||
except exceptions.InvalidInput as e:
|
||||
return utils.format_cinder_error(400, e.message)
|
||||
|
||||
if description is not None:
|
||||
try:
|
||||
utils.check_string_length(description, 'Type description',
|
||||
min_len=0, max_len=255)
|
||||
except exceptions.InvalidInput as e:
|
||||
return utils.format_cinder_error(400, e.message)
|
||||
|
||||
try:
|
||||
type_updated = \
|
||||
db_api.volume_type_update(context, _id,
|
||||
dict(name=name,
|
||||
description=description,
|
||||
is_public=is_public))
|
||||
except exceptions.VolumeTypeNotFound as e:
|
||||
return utils.format_cinder_error(404, e.message)
|
||||
except exceptions.VolumeTypeExists as e:
|
||||
return utils.format_cinder_error(409, e.message)
|
||||
except exceptions.VolumeTypeUpdateFailed as e:
|
||||
return utils.format_cinder_error(500, e.message)
|
||||
except Exception as e:
|
||||
LOG.exception(_LE('Fail to update volume type: %(name)s,'
|
||||
'%(exception)s'),
|
||||
{'name': values['name'],
|
||||
'exception': e})
|
||||
return utils.format_cinder_error(
|
||||
500, _("Fail to update volume type."))
|
||||
return {'volume_type': type_updated}
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def delete(self, _id):
|
||||
"""Marks volume types as deleted.
|
||||
|
||||
:param _id: id of volume type to be deleted
|
||||
"""
|
||||
context = t_context.extract_context_from_environ()
|
||||
|
||||
if not context.is_admin:
|
||||
return utils.format_cinder_error(
|
||||
403, _("Policy doesn't allow volume_extension:types_manage "
|
||||
"to be performed."))
|
||||
|
||||
session = core.get_session()
|
||||
with session.begin():
|
||||
try:
|
||||
db_api.volume_type_get(context, _id, session)
|
||||
except exceptions.VolumeTypeNotFound as e:
|
||||
return utils.format_cinder_error(404, e.message)
|
||||
try:
|
||||
db_api.volume_type_delete(context, _id, session)
|
||||
except Exception as e:
|
||||
LOG.exception(_LE('Fail to update volume type: %(id)s,'
|
||||
'%(exception)s'),
|
||||
{'id': _id,
|
||||
'exception': e})
|
||||
return utils.format_cinder_error(
|
||||
500, _('Fail to delete volume type.'))
|
||||
|
||||
pecan.response.status = 202
|
||||
return pecan.response
|
@ -1,22 +0,0 @@
|
||||
# Copyright 2015 Huawei Technologies Co., Ltd.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import tricircle.cinder_apigw.app
|
||||
|
||||
|
||||
def list_opts():
|
||||
return [
|
||||
('DEFAULT', tricircle.cinder_apigw.app.common_opts),
|
||||
]
|
@ -1,31 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
import six
|
||||
|
||||
from stevedore import driver
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class Scheduler(object):
|
||||
|
||||
def __init__(self):
|
||||
self.pod_manager = driver.DriverManager(
|
||||
namespace='tricircle.common.schedulers',
|
||||
name='pod_manager',
|
||||
invoke_on_load=True
|
||||
).driver
|
||||
|
||||
@abc.abstractmethod
|
||||
def select_destination(self, context, az_name, tenant_id, spec_obj):
|
||||
return None, None
|
@ -1,58 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tricircle.common.scheduler import driver
|
||||
|
||||
|
||||
class FilterScheduler(driver.Scheduler):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(FilterScheduler, self).__init__(*args, **kwargs)
|
||||
|
||||
def select_destination(self, context, az_name, tenant_id, pod_group):
|
||||
current_binding, current_pod = \
|
||||
self.pod_manager.get_current_binding_and_pod(
|
||||
context, az_name, tenant_id, pod_group)
|
||||
|
||||
if current_binding and current_pod:
|
||||
return current_pod, current_pod['pod_az_name']
|
||||
else:
|
||||
pods = self.pod_manager.get_available_pods(
|
||||
context, az_name, pod_group)
|
||||
if not pods:
|
||||
return None, None
|
||||
# TODO(Yipei): Weigh pods and select one whose weight
|
||||
# is the maximum. Here we chose one randomly.
|
||||
is_current = False
|
||||
best_pod = None
|
||||
# select the pod by a circle in pods
|
||||
for pod in pods:
|
||||
if is_current:
|
||||
best_pod = pod
|
||||
break
|
||||
if current_binding \
|
||||
and pod['pod_id'] == current_binding['pod_id']:
|
||||
is_current = True
|
||||
if is_current and len(pods) == 1:
|
||||
return None, None
|
||||
if not best_pod:
|
||||
best_pod = pods[0]
|
||||
|
||||
if current_binding:
|
||||
is_successful = self.pod_manager.update_binding(
|
||||
context, current_binding, best_pod['pod_id'])
|
||||
else:
|
||||
is_successful = self.pod_manager.create_binding(
|
||||
context, tenant_id, best_pod['pod_id'])
|
||||
if not is_successful:
|
||||
return None, None
|
||||
return best_pod, best_pod['pod_az_name']
|
@ -1,31 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
class BaseFilter(object):
|
||||
"""Base class for all pod filter classes."""
|
||||
def _filter_one(self, obj, pod_group):
|
||||
return True
|
||||
|
||||
def filter_all(self, filter_obj_list, pod_group):
|
||||
for obj in filter_obj_list:
|
||||
if self._filter_one(obj, pod_group):
|
||||
yield obj
|
||||
|
||||
|
||||
class BasePodFilter(BaseFilter):
|
||||
|
||||
def _filter_one(self, obj, pod_group):
|
||||
return self.is_pod_passed(obj, pod_group)
|
||||
|
||||
def is_pod_passed(self, pod, pod_group):
|
||||
raise NotImplementedError()
|
@ -1,23 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tricircle.common.scheduler.filters import base_filters
|
||||
|
||||
|
||||
class BottomPodFilter(base_filters.BasePodFilter):
|
||||
"""Returns all bottom pods."""
|
||||
|
||||
def is_pod_passed(self, pod, pod_group):
|
||||
flag = False
|
||||
if pod['az_name'] != '':
|
||||
flag = True
|
||||
return flag
|
@ -1,109 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log as logging
|
||||
from stevedore import driver
|
||||
|
||||
from tricircle.common.i18n import _LE
|
||||
from tricircle.db import api as db_api
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PodManager(object):
|
||||
def __init__(self):
|
||||
filter_names = ['bottom_pod_filter']
|
||||
self.default_filters = self._choose_pod_filters(filter_names)
|
||||
|
||||
@staticmethod
|
||||
def _choose_pod_filters(filter_names):
|
||||
good_filters = []
|
||||
for filter_name in filter_names:
|
||||
filter_ = driver.DriverManager(
|
||||
'tricircle.common.schedulers',
|
||||
filter_name,
|
||||
invoke_on_load=True
|
||||
).driver
|
||||
good_filters.append(filter_)
|
||||
return good_filters
|
||||
|
||||
@staticmethod
|
||||
def get_current_binding_and_pod(context, az_name, tenant_id, pod_group):
|
||||
filter_b = [{'key': 'tenant_id', 'comparator': 'eq',
|
||||
'value': tenant_id}]
|
||||
current_bindings = db_api.get_pod_binding_by_tenant_id(
|
||||
context, filter_b)
|
||||
if not current_bindings:
|
||||
return None, None
|
||||
|
||||
has_available_pods = False
|
||||
for pod_b in current_bindings:
|
||||
if pod_b['is_binding']:
|
||||
pod = db_api.get_pod_by_pod_id(context, pod_b['pod_id'])
|
||||
if az_name and pod['az_name'] == az_name:
|
||||
has_available_pods = True
|
||||
elif az_name == '' and pod['az_name'] != '':
|
||||
# if the az_name is not specified, a default bottom
|
||||
# pod will be selected
|
||||
has_available_pods = True
|
||||
|
||||
if has_available_pods:
|
||||
# TODO(Yipei): check resource_affinity_tag
|
||||
# if the resource utilization of the pod reaches the limit,
|
||||
# return [], []. Considering the feature of checking
|
||||
# resource utilization is not implemented, we use
|
||||
# resource_affinity_tag to test the logic of updating
|
||||
# a binding relationship.
|
||||
if pod_group != '':
|
||||
return pod_b, None
|
||||
# TODO(Yipei): check resource utilization of the pod
|
||||
# if the resource utilization of the pod reaches the limit,
|
||||
# return pod_b, []
|
||||
|
||||
# If a pod passes the above checking, both the pod and its
|
||||
# corresponding binding are returned.
|
||||
return pod_b, pod
|
||||
return None, None
|
||||
|
||||
@staticmethod
|
||||
def create_binding(context, tenant_id, pod_id):
|
||||
try:
|
||||
db_api.create_pod_binding(context, tenant_id, pod_id)
|
||||
except Exception as e:
|
||||
LOG.error(_LE('Fail to create pod binding: %(exception)s'),
|
||||
{'exception': e})
|
||||
return False
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def update_binding(context, current_binding, pod_id):
|
||||
current_binding['is_binding'] = False
|
||||
try:
|
||||
db_api.change_pod_binding(
|
||||
context, current_binding, pod_id)
|
||||
except Exception as e:
|
||||
LOG.error(_LE('Fail to update pod binding: %(exception)s'),
|
||||
{'exception': e})
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_available_pods(self, context, az_name, pod_group):
|
||||
if az_name != '':
|
||||
filter_q = [{'key': 'az_name',
|
||||
'comparator': 'eq', 'value': az_name}]
|
||||
else:
|
||||
filter_q = None
|
||||
pods = db_api.list_pods(context, filter_q)
|
||||
for filter_ in self.default_filters:
|
||||
objs_ = filter_.filter_all(pods, pod_group)
|
||||
pods = list(objs_)
|
||||
return pods
|
@ -1,83 +0,0 @@
|
||||
# Copyright (c) 2015 Huawei, Tech. Co,. Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import pecan
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from tricircle.common.i18n import _
|
||||
from tricircle.common import restapp
|
||||
from tricircle.nova_apigw.controllers import micro_versions
|
||||
from tricircle.nova_apigw.controllers import root
|
||||
from tricircle.nova_apigw.controllers import root_versions
|
||||
|
||||
|
||||
common_opts = [
|
||||
cfg.IPOpt('bind_host', default='0.0.0.0',
|
||||
help=_("The host IP to bind to")),
|
||||
cfg.PortOpt('bind_port', default=19998,
|
||||
help=_("The port to bind to")),
|
||||
cfg.IntOpt('api_workers', default=1,
|
||||
help=_("number of api workers")),
|
||||
cfg.StrOpt('api_extensions_path', default="",
|
||||
help=_("The path for API extensions")),
|
||||
cfg.StrOpt('auth_strategy', default='keystone',
|
||||
help=_("The type of authentication to use")),
|
||||
cfg.BoolOpt('allow_bulk', default=True,
|
||||
help=_("Allow the usage of the bulk API")),
|
||||
cfg.BoolOpt('allow_pagination', default=False,
|
||||
help=_("Allow the usage of the pagination")),
|
||||
cfg.BoolOpt('allow_sorting', default=False,
|
||||
help=_("Allow the usage of the sorting")),
|
||||
cfg.StrOpt('pagination_max_limit', default="-1",
|
||||
help=_("The maximum number of items returned in a single "
|
||||
"response, value was 'infinite' or negative integer "
|
||||
"means no limit")),
|
||||
]
|
||||
|
||||
|
||||
def setup_app(*args, **kwargs):
|
||||
config = {
|
||||
'server': {
|
||||
'port': cfg.CONF.bind_port,
|
||||
'host': cfg.CONF.bind_host
|
||||
},
|
||||
'app': {
|
||||
'root': 'tricircle.nova_apigw.controllers.root.RootController',
|
||||
'modules': ['tricircle.nova_apigw'],
|
||||
'errors': {
|
||||
400: '/error',
|
||||
'__force_dict__': True
|
||||
}
|
||||
}
|
||||
}
|
||||
pecan_config = pecan.configuration.conf_from_dict(config)
|
||||
|
||||
app_hooks = [root.ErrorHook()]
|
||||
|
||||
app = pecan.make_app(
|
||||
pecan_config.app.root,
|
||||
debug=False,
|
||||
wrap_app=restapp.auth_app,
|
||||
force_canonical=False,
|
||||
hooks=app_hooks,
|
||||
guess_content_type_from_ext=True
|
||||
)
|
||||
|
||||
# get nova api version
|
||||
app = micro_versions.MicroVersion(app)
|
||||
# version can be unauthenticated so it goes outside of auth
|
||||
app = root_versions.Versions(app)
|
||||
|
||||
return app
|
@ -1,173 +0,0 @@
|
||||
# Copyright (c) 2015 Huawei Tech. Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pecan
|
||||
from pecan import expose
|
||||
from pecan import rest
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
import tricircle.common.client as t_client
|
||||
from tricircle.common import constants
|
||||
import tricircle.common.context as t_context
|
||||
from tricircle.common.i18n import _
|
||||
from tricircle.common import utils
|
||||
import tricircle.db.api as db_api
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ActionController(rest.RestController):
|
||||
|
||||
def __init__(self, project_id, server_id):
|
||||
self.project_id = project_id
|
||||
self.server_id = server_id
|
||||
self.clients = {constants.TOP: t_client.Client()}
|
||||
self.handle_map = {
|
||||
'os-start': self._handle_start,
|
||||
'os-stop': self._handle_stop,
|
||||
'forceDelete': self._handle_force_delete,
|
||||
'lock': self._handle_lock,
|
||||
'unlock': self._handle_unlock,
|
||||
'pause': self._handle_pause,
|
||||
'unpause': self._handle_unpause,
|
||||
'resume': self._handle_resume,
|
||||
'suspend': self._handle_suspend,
|
||||
'shelve': self._handle_shelve,
|
||||
'unshelve': self._handle_unshelve,
|
||||
'shelveOffload': self._handle_shelve_offload,
|
||||
'migrate': self._handle_migrate,
|
||||
'trigger_crash_dump': self._handle_trigger_crash_dump,
|
||||
'reboot': self._handle_action,
|
||||
'resize': self._handle_action,
|
||||
'confirmResize': self._handle_action,
|
||||
'revertResize': self._handle_action,
|
||||
'os-resetState': self._handle_action
|
||||
}
|
||||
|
||||
def _get_client(self, pod_name=constants.TOP):
|
||||
if pod_name not in self.clients:
|
||||
self.clients[pod_name] = t_client.Client(pod_name)
|
||||
return self.clients[pod_name]
|
||||
|
||||
def _handle_start(self, context, pod_name, body):
|
||||
client = self._get_client(pod_name)
|
||||
return client.action_servers(context, 'start', self.server_id)
|
||||
|
||||
def _handle_stop(self, context, pod_name, body):
|
||||
client = self._get_client(pod_name)
|
||||
return client.action_servers(context, 'stop', self.server_id)
|
||||
|
||||
def _handle_force_delete(self, context, pod_name, body):
|
||||
client = self._get_client(pod_name)
|
||||
return client.action_servers(context, 'force_delete', self.server_id)
|
||||
|
||||
def _handle_pause(self, context, pod_name, body):
|
||||
client = self._get_client(pod_name)
|
||||
return client.action_servers(context, 'pause', self.server_id)
|
||||
|
||||
def _handle_unpause(self, context, pod_name, body):
|
||||
client = self._get_client(pod_name)
|
||||
return client.action_servers(context, 'unpause', self.server_id)
|
||||
|
||||
def _handle_lock(self, context, pod_name, body):
|
||||
client = self._get_client(pod_name)
|
||||
return client.action_servers(context, 'lock', self.server_id)
|
||||
|
||||
def _handle_unlock(self, context, pod_name, body):
|
||||
client = self._get_client(pod_name)
|
||||
return client.action_servers(context, 'unlock', self.server_id)
|
||||
|
||||
def _handle_suspend(self, context, pod_name, body):
|
||||
client = self._get_client(pod_name)
|
||||
return client.action_servers(context, 'suspend', self.server_id)
|
||||
|
||||
def _handle_resume(self, context, pod_name, body):
|
||||
client = self._get_client(pod_name)
|
||||
return client.action_servers(context, 'resume', self.server_id)
|
||||
|
||||
def _handle_shelve(self, context, pod_name, body):
|
||||
client = self._get_client(pod_name)
|
||||
return client.action_servers(context, 'shelve', self.server_id)
|
||||
|
||||
def _handle_shelve_offload(self, context, pod_name, body):
|
||||
client = self._get_client(pod_name)
|
||||
return client.action_servers(context, 'shelve_offload', self.server_id)
|
||||
|
||||
def _handle_unshelve(self, context, pod_name, body):
|
||||
client = self._get_client(pod_name)
|
||||
return client.action_servers(context, 'unshelve', self.server_id)
|
||||
|
||||
def _handle_trigger_crash_dump(self, context, pod_name, body):
|
||||
client = self._get_client(pod_name)
|
||||
return client.action_servers(context, 'trigger_crash_dump',
|
||||
self.server_id)
|
||||
|
||||
def _handle_migrate(self, context, pod_name, body):
|
||||
client = self._get_client(pod_name)
|
||||
return client.action_servers(context, 'migrate', self.server_id)
|
||||
|
||||
def _handle_action(self, context, pod_name, body):
|
||||
"""Perform a server action
|
||||
|
||||
:param pod_name: the bottom pod name.
|
||||
|
||||
:param body: action parameters body.
|
||||
"""
|
||||
url = constants.SERVER_ACTION_URL % self.server_id
|
||||
api = self._get_client(pod_name).get_native_client(constants.RT_SERVER,
|
||||
context)
|
||||
return api.client.post(url, body=body)
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def post(self, **kw):
|
||||
context = t_context.extract_context_from_environ()
|
||||
|
||||
action_handle = None
|
||||
action_type = None
|
||||
for _type in self.handle_map:
|
||||
if _type in kw:
|
||||
action_handle = self.handle_map[_type]
|
||||
action_type = _type
|
||||
if not action_handle:
|
||||
return utils.format_nova_error(
|
||||
400, _('Server action not supported'))
|
||||
|
||||
server_mappings = db_api.get_bottom_mappings_by_top_id(
|
||||
context, self.server_id, constants.RT_SERVER)
|
||||
if not server_mappings:
|
||||
return utils.format_nova_error(
|
||||
404, _('Server %s could not be found') % self.server_id)
|
||||
|
||||
pod_name = server_mappings[0][0]['pod_name']
|
||||
try:
|
||||
resp, body = action_handle(context, pod_name, kw)
|
||||
pecan.response.status = resp.status_code
|
||||
if not body:
|
||||
return pecan.response
|
||||
else:
|
||||
return body
|
||||
except Exception as e:
|
||||
code = 500
|
||||
message = _('Action %(action)s on server %(server_id)s fails') % {
|
||||
'action': action_type,
|
||||
'server_id': self.server_id}
|
||||
if hasattr(e, 'code'):
|
||||
code = e.code
|
||||
ex_message = str(e)
|
||||
if ex_message:
|
||||
message = ex_message
|
||||
LOG.error(message)
|
||||
return utils.format_nova_error(code, message)
|
@ -1,141 +0,0 @@
|
||||
# Copyright (c) 2015 Huawei Tech. Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pecan
|
||||
from pecan import expose
|
||||
from pecan import rest
|
||||
|
||||
import oslo_db.exception as db_exc
|
||||
|
||||
from tricircle.common import az_ag
|
||||
import tricircle.common.context as t_context
|
||||
import tricircle.common.exceptions as t_exc
|
||||
from tricircle.common.i18n import _
|
||||
from tricircle.common import utils
|
||||
from tricircle.db import core
|
||||
from tricircle.db import models
|
||||
|
||||
|
||||
class AggregateActionController(rest.RestController):
|
||||
|
||||
def __init__(self, project_id, aggregate_id):
|
||||
self.project_id = project_id
|
||||
self.aggregate_id = aggregate_id
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def post(self, **kw):
|
||||
context = t_context.extract_context_from_environ()
|
||||
if not context.is_admin:
|
||||
return utils.format_nova_error(
|
||||
403, _("Policy doesn't allow os_compute_api:os-aggregates:"
|
||||
"index to be performed."))
|
||||
try:
|
||||
with context.session.begin():
|
||||
core.get_resource(context, models.Aggregate, self.aggregate_id)
|
||||
except t_exc.ResourceNotFound:
|
||||
return utils.format_nova_error(
|
||||
404, _('Aggregate %s could not be found.') % self.aggregate_id)
|
||||
if 'add_host' in kw or 'remove_host' in kw:
|
||||
return utils.format_nova_error(
|
||||
400, _('Add and remove host action not supported'))
|
||||
# TODO(zhiyuan) handle aggregate metadata updating
|
||||
try:
|
||||
aggregate = az_ag.get_one_ag(context, self.aggregate_id)
|
||||
return {'aggregate': aggregate}
|
||||
except Exception:
|
||||
return utils.format_nova_error(
|
||||
500, _('Aggregate operation on %s failed') % self.aggregate_id)
|
||||
|
||||
|
||||
class AggregateController(rest.RestController):
|
||||
|
||||
def __init__(self, project_id):
|
||||
self.project_id = project_id
|
||||
|
||||
@pecan.expose()
|
||||
def _lookup(self, aggregate_id, action, *remainder):
|
||||
if action == 'action':
|
||||
return AggregateActionController(self.project_id,
|
||||
aggregate_id), remainder
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def post(self, **kw):
|
||||
context = t_context.extract_context_from_environ()
|
||||
if not context.is_admin:
|
||||
return utils.format_nova_error(
|
||||
403, _("Policy doesn't allow os_compute_api:os-aggregates:"
|
||||
"index to be performed."))
|
||||
if 'aggregate' not in kw:
|
||||
return utils.format_nova_error(
|
||||
400, _('aggregate is not set'))
|
||||
|
||||
host_aggregate = kw['aggregate']
|
||||
name = host_aggregate['name'].strip()
|
||||
avail_zone = host_aggregate.get('availability_zone')
|
||||
if avail_zone:
|
||||
avail_zone = avail_zone.strip()
|
||||
|
||||
try:
|
||||
with context.session.begin():
|
||||
aggregate = az_ag.create_ag_az(context,
|
||||
ag_name=name,
|
||||
az_name=avail_zone)
|
||||
except db_exc.DBDuplicateEntry:
|
||||
return utils.format_nova_error(
|
||||
409, _('Aggregate %s already exists.') % name)
|
||||
except Exception:
|
||||
return utils.format_nova_error(
|
||||
500, _('Fail to create aggregate'))
|
||||
|
||||
return {'aggregate': aggregate}
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def get_one(self, _id):
|
||||
context = t_context.extract_context_from_environ()
|
||||
try:
|
||||
with context.session.begin():
|
||||
aggregate = az_ag.get_one_ag(context, _id)
|
||||
return {'aggregate': aggregate}
|
||||
except t_exc.ResourceNotFound:
|
||||
return utils.format_nova_error(
|
||||
404, _('Aggregate %s could not be found.') % _id)
|
||||
except Exception:
|
||||
return utils.format_nova_error(
|
||||
500, _('Fail to get aggregate %s') % _id)
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def get_all(self):
|
||||
context = t_context.extract_context_from_environ()
|
||||
|
||||
try:
|
||||
with context.session.begin():
|
||||
aggregates = az_ag.get_all_ag(context)
|
||||
except Exception:
|
||||
return utils.format_nova_error(500, _('Fail to list aggregates'))
|
||||
return {'aggregates': aggregates}
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def delete(self, _id):
|
||||
context = t_context.extract_context_from_environ()
|
||||
try:
|
||||
with context.session.begin():
|
||||
az_ag.delete_ag(context, _id)
|
||||
pecan.response.status = 200
|
||||
except t_exc.ResourceNotFound:
|
||||
return utils.format_nova_error(
|
||||
404, _('Aggregate %s could not be found.') % _id)
|
||||
except Exception:
|
||||
return utils.format_nova_error(
|
||||
500, _('Fail to delete aggregate %s') % _id)
|
@ -1,217 +0,0 @@
|
||||
# Copyright (c) 2015 Huawei Tech. Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pecan
|
||||
from pecan import expose
|
||||
from pecan import rest
|
||||
|
||||
import oslo_db.exception as db_exc
|
||||
|
||||
import tricircle.common.context as t_context
|
||||
from tricircle.common.i18n import _
|
||||
from tricircle.common import utils
|
||||
from tricircle.db import core
|
||||
from tricircle.db import models
|
||||
|
||||
|
||||
class FlavorManageController(rest.RestController):
|
||||
# NOTE(zhiyuan) according to nova API reference, flavor creating and
|
||||
# deleting should use '/flavors/os-flavor-manage' path, but '/flavors/'
|
||||
# also supports this two operations to keep compatible with nova client
|
||||
|
||||
def __init__(self, project_id):
|
||||
self.project_id = project_id
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def post(self, **kw):
|
||||
context = t_context.extract_context_from_environ()
|
||||
if not context.is_admin:
|
||||
return utils.format_nova_error(
|
||||
403, _("Policy doesn't allow os_compute_api:os-flavor-manage "
|
||||
"to be performed."))
|
||||
|
||||
required_fields = ['name', 'ram', 'vcpus', 'disk']
|
||||
if 'flavor' not in kw:
|
||||
utils.format_nova_error(400, _('flavor is not set'))
|
||||
if not utils.validate_required_fields_set(kw['flavor'],
|
||||
required_fields):
|
||||
utils.format_nova_error(
|
||||
400, _('Invalid input for field/attribute flavor.'))
|
||||
|
||||
flavor_dict = {
|
||||
'name': kw['flavor']['name'],
|
||||
'flavorid': kw['flavor'].get('id'),
|
||||
'memory_mb': kw['flavor']['ram'],
|
||||
'vcpus': kw['flavor']['vcpus'],
|
||||
'root_gb': kw['flavor']['disk'],
|
||||
'ephemeral_gb': kw['flavor'].get('OS-FLV-EXT-DATA:ephemeral', 0),
|
||||
'swap': kw['flavor'].get('swap', 0),
|
||||
'rxtx_factor': kw['flavor'].get('rxtx_factor', 1.0),
|
||||
'is_public': kw['flavor'].get('os-flavor-access:is_public', True),
|
||||
}
|
||||
|
||||
try:
|
||||
with context.session.begin():
|
||||
flavor = core.create_resource(
|
||||
context, models.InstanceTypes, flavor_dict)
|
||||
except db_exc.DBDuplicateEntry as e:
|
||||
if 'flavorid' in e.columns:
|
||||
return utils.format_nova_error(
|
||||
409, _('Flavor with ID %s already '
|
||||
'exists.') % flavor_dict['flavorid'])
|
||||
else:
|
||||
return utils.format_nova_error(
|
||||
409, _('Flavor with name %s already '
|
||||
'exists.') % flavor_dict['name'])
|
||||
except Exception:
|
||||
return utils.format_nova_error(500, _('Failed to create flavor'))
|
||||
|
||||
return {'flavor': flavor}
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def delete(self, _id):
|
||||
context = t_context.extract_context_from_environ()
|
||||
try:
|
||||
with context.session.begin():
|
||||
flavors = core.query_resource(context, models.InstanceTypes,
|
||||
[{'key': 'flavorid',
|
||||
'comparator': 'eq',
|
||||
'value': _id}], [])
|
||||
if not flavors:
|
||||
return utils.format_nova_error(
|
||||
404, _('Flavor %s could not be found') % _id)
|
||||
core.delete_resource(context, models.InstanceTypes,
|
||||
flavors[0]['id'])
|
||||
except Exception:
|
||||
return utils.format_nova_error(500, _('Failed to delete flavor'))
|
||||
pecan.response.status = 202
|
||||
return
|
||||
|
||||
|
||||
class FlavorController(rest.RestController):
|
||||
|
||||
def __init__(self, project_id):
|
||||
self.project_id = project_id
|
||||
|
||||
@pecan.expose()
|
||||
def _lookup(self, action, *remainder):
|
||||
if action == 'os-flavor-manage':
|
||||
return FlavorManageController(self.project_id), remainder
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def post(self, **kw):
|
||||
context = t_context.extract_context_from_environ()
|
||||
if not context.is_admin:
|
||||
return utils.format_nova_error(
|
||||
403, _("Policy doesn't allow os_compute_api:os-flavor-manage "
|
||||
"to be performed."))
|
||||
|
||||
required_fields = ['name', 'ram', 'vcpus', 'disk']
|
||||
if 'flavor' not in kw:
|
||||
utils.format_nova_error(400, _('flavor is not set'))
|
||||
if not utils.validate_required_fields_set(kw['flavor'],
|
||||
required_fields):
|
||||
utils.format_nova_error(
|
||||
400, _('Invalid input for field/attribute flavor.'))
|
||||
|
||||
flavor_dict = {
|
||||
'name': kw['flavor']['name'],
|
||||
'flavorid': kw['flavor'].get('id'),
|
||||
'memory_mb': kw['flavor']['ram'],
|
||||
'vcpus': kw['flavor']['vcpus'],
|
||||
'root_gb': kw['flavor']['disk'],
|
||||
'ephemeral_gb': kw['flavor'].get('OS-FLV-EXT-DATA:ephemeral', 0),
|
||||
'swap': kw['flavor'].get('swap', 0),
|
||||
'rxtx_factor': kw['flavor'].get('rxtx_factor', 1.0),
|
||||
'is_public': kw['flavor'].get('os-flavor-access:is_public', True),
|
||||
}
|
||||
|
||||
try:
|
||||
with context.session.begin():
|
||||
flavor = core.create_resource(
|
||||
context, models.InstanceTypes, flavor_dict)
|
||||
except db_exc.DBDuplicateEntry as e:
|
||||
if 'flavorid' in e.columns:
|
||||
return utils.format_nova_error(
|
||||
409, _('Flavor with ID %s already '
|
||||
'exists.') % flavor_dict['flavorid'])
|
||||
else:
|
||||
return utils.format_nova_error(
|
||||
409, _('Flavor with name %s already '
|
||||
'exists.') % flavor_dict['name'])
|
||||
except Exception:
|
||||
utils.format_nova_error(500, _('Failed to create flavor'))
|
||||
|
||||
flavor['id'] = flavor['flavorid']
|
||||
del flavor['flavorid']
|
||||
return {'flavor': flavor}
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def get_one(self, _id):
|
||||
# NOTE(zhiyuan) this function handles two kinds of requests
|
||||
# GET /flavors/flavor_id
|
||||
# GET /flavors/detail
|
||||
context = t_context.extract_context_from_environ()
|
||||
if _id == 'detail':
|
||||
with context.session.begin():
|
||||
flavors = core.query_resource(context, models.InstanceTypes,
|
||||
[], [])
|
||||
for flavor in flavors:
|
||||
flavor['id'] = flavor['flavorid']
|
||||
del flavor['flavorid']
|
||||
return {'flavors': flavors}
|
||||
else:
|
||||
with context.session.begin():
|
||||
flavors = core.query_resource(context, models.InstanceTypes,
|
||||
[{'key': 'flavorid',
|
||||
'comparator': 'eq',
|
||||
'value': _id}], [])
|
||||
if not flavors:
|
||||
return utils.format_nova_error(
|
||||
404, _('Flavor %s could not be found') % _id)
|
||||
flavor = flavors[0]
|
||||
flavor['id'] = flavor['flavorid']
|
||||
del flavor['flavorid']
|
||||
return {'flavor': flavor}
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def get_all(self):
|
||||
context = t_context.extract_context_from_environ()
|
||||
with context.session.begin():
|
||||
flavors = core.query_resource(context, models.InstanceTypes,
|
||||
[], [])
|
||||
return {'flavors': [dict(
|
||||
[('id', flavor['flavorid']),
|
||||
('name', flavor['name'])]) for flavor in flavors]}
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def delete(self, _id):
|
||||
# TODO(zhiyuan) handle foreign key constraint
|
||||
context = t_context.extract_context_from_environ()
|
||||
try:
|
||||
with context.session.begin():
|
||||
flavors = core.query_resource(context, models.InstanceTypes,
|
||||
[{'key': 'flavorid',
|
||||
'comparator': 'eq',
|
||||
'value': _id}], [])
|
||||
if not flavors:
|
||||
return utils.format_nova_error(
|
||||
404, _('Flavor %s could not be found') % _id)
|
||||
core.delete_resource(context,
|
||||
models.InstanceTypes, flavors[0]['id'])
|
||||
except Exception:
|
||||
return utils.format_nova_error(500, _('Failed to delete flavor'))
|
||||
pecan.response.status = 202
|
||||
return
|
@ -1,197 +0,0 @@
|
||||
# Copyright (c) 2015 Huawei Tech. Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from pecan import expose
|
||||
from pecan import rest
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
import tricircle.common.client as t_client
|
||||
from tricircle.common import constants
|
||||
import tricircle.common.context as t_context
|
||||
from tricircle.common.i18n import _
|
||||
from tricircle.common import utils
|
||||
import tricircle.db.api as db_api
|
||||
|
||||
SUPPORTED_FILTERS = {
|
||||
'name': 'name',
|
||||
'status': 'status',
|
||||
'changes-since': 'changes-since',
|
||||
'server': 'property-instance_uuid',
|
||||
'type': 'property-image_type',
|
||||
'minRam': 'min_ram',
|
||||
'minDisk': 'min_disk',
|
||||
}
|
||||
|
||||
|
||||
def url_join(*parts):
|
||||
"""Convenience method for joining parts of a URL
|
||||
|
||||
Any leading and trailing '/' characters are removed, and the parts joined
|
||||
together with '/' as a separator. If last element of 'parts' is an empty
|
||||
string, the returned URL will have a trailing slash.
|
||||
"""
|
||||
parts = parts or ['']
|
||||
clean_parts = [part.strip('/') for part in parts if part]
|
||||
if not parts[-1]:
|
||||
# Empty last element should add a trailing slash
|
||||
clean_parts.append('')
|
||||
return '/'.join(clean_parts)
|
||||
|
||||
|
||||
def remove_trailing_version_from_href(href):
|
||||
"""Removes the api version from the href.
|
||||
|
||||
Given: 'http://www.nova.com/compute/v1.1'
|
||||
Returns: 'http://www.nova.com/compute'
|
||||
|
||||
Given: 'http://www.nova.com/v1.1'
|
||||
Returns: 'http://www.nova.com'
|
||||
|
||||
"""
|
||||
parsed_url = urlparse.urlsplit(href)
|
||||
url_parts = parsed_url.path.rsplit('/', 1)
|
||||
|
||||
# NOTE: this should match vX.X or vX
|
||||
expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)')
|
||||
if not expression.match(url_parts.pop()):
|
||||
raise ValueError('URL %s does not contain version' % href)
|
||||
|
||||
new_path = url_join(*url_parts)
|
||||
parsed_url = list(parsed_url)
|
||||
parsed_url[2] = new_path
|
||||
return urlparse.urlunsplit(parsed_url)
|
||||
|
||||
|
||||
class ImageController(rest.RestController):
|
||||
|
||||
def __init__(self, project_id):
|
||||
self.project_id = project_id
|
||||
self.client = t_client.Client()
|
||||
|
||||
def _get_links(self, context, image):
|
||||
nova_url = self.client.get_endpoint(
|
||||
context, db_api.get_top_pod(context)['pod_id'],
|
||||
constants.ST_NOVA)
|
||||
nova_url = nova_url.replace('/$(tenant_id)s', '')
|
||||
self_link = url_join(nova_url, self.project_id, 'images', image['id'])
|
||||
bookmark_link = url_join(
|
||||
remove_trailing_version_from_href(nova_url),
|
||||
self.project_id, 'images', image['id'])
|
||||
glance_url = self.client.get_endpoint(
|
||||
context, db_api.get_top_pod(context)['pod_id'],
|
||||
constants.ST_GLANCE)
|
||||
alternate_link = '/'.join([glance_url, 'images', image['id']])
|
||||
return [{'rel': 'self', 'href': self_link},
|
||||
{'rel': 'bookmark', 'href': bookmark_link},
|
||||
{'rel': 'alternate',
|
||||
'type': 'application/vnd.openstack.image',
|
||||
'href': alternate_link}]
|
||||
|
||||
@staticmethod
|
||||
def _format_date(dt):
|
||||
"""Return standard format for a given datetime string."""
|
||||
if dt is not None:
|
||||
date_string = dt.split('.')[0]
|
||||
date_string += 'Z'
|
||||
return date_string
|
||||
|
||||
@staticmethod
|
||||
def _get_status(image):
|
||||
"""Update the status field to standardize format."""
|
||||
return {
|
||||
'active': 'ACTIVE',
|
||||
'queued': 'SAVING',
|
||||
'saving': 'SAVING',
|
||||
'deleted': 'DELETED',
|
||||
'pending_delete': 'DELETED',
|
||||
'killed': 'ERROR',
|
||||
}.get(image.get('status'), 'UNKNOWN')
|
||||
|
||||
@staticmethod
|
||||
def _get_progress(image):
|
||||
return {
|
||||
'queued': 25,
|
||||
'saving': 50,
|
||||
'active': 100,
|
||||
}.get(image.get('status'), 0)
|
||||
|
||||
def _construct_list_image_entry(self, context, image):
|
||||
return {'id': image['id'],
|
||||
'name': image.get('name'),
|
||||
'links': self._get_links(context, image)}
|
||||
|
||||
def _construct_show_image_entry(self, context, image):
|
||||
return {
|
||||
'id': image['id'],
|
||||
'name': image.get('name'),
|
||||
'minRam': int(image.get('min_ram') or 0),
|
||||
'minDisk': int(image.get('min_disk') or 0),
|
||||
'metadata': image.get('properties', {}),
|
||||
'created': self._format_date(image.get('created_at')),
|
||||
'updated': self._format_date(image.get('updated_at')),
|
||||
'status': self._get_status(image),
|
||||
'progress': self._get_progress(image),
|
||||
'links': self._get_links(context, image)
|
||||
}
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def get_one(self, _id, **kwargs):
|
||||
context = t_context.extract_context_from_environ()
|
||||
if _id == 'detail':
|
||||
return self.get_all(**kwargs)
|
||||
image = self.client.get_images(context, _id)
|
||||
if not image:
|
||||
return utils.format_nova_error(404, _('Image not found'))
|
||||
return {'image': self._construct_show_image_entry(context, image)}
|
||||
|
||||
def _get_filters(self, params):
|
||||
"""Return a dictionary of query param filters from the request.
|
||||
|
||||
:param params: the URI params coming from the wsgi layer
|
||||
:return a dict of key/value filters
|
||||
"""
|
||||
filters = {}
|
||||
for param in params:
|
||||
if param in SUPPORTED_FILTERS or param.startswith('property-'):
|
||||
# map filter name or carry through if property-*
|
||||
filter_name = SUPPORTED_FILTERS.get(param, param)
|
||||
filters[filter_name] = params.get(param)
|
||||
|
||||
# ensure server filter is the instance uuid
|
||||
filter_name = 'property-instance_uuid'
|
||||
try:
|
||||
filters[filter_name] = filters[filter_name].rsplit('/', 1)[1]
|
||||
except (AttributeError, IndexError, KeyError):
|
||||
pass
|
||||
|
||||
filter_name = 'status'
|
||||
if filter_name in filters:
|
||||
# The Image API expects us to use lowercase strings for status
|
||||
filters[filter_name] = filters[filter_name].lower()
|
||||
|
||||
return filters
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def get_all(self, **kwargs):
|
||||
context = t_context.extract_context_from_environ()
|
||||
filters = self._get_filters(kwargs)
|
||||
filters = [{'key': key,
|
||||
'comparator': 'eq',
|
||||
'value': value} for key, value in filters.iteritems()]
|
||||
images = self.client.list_images(context, filters=filters)
|
||||
ret_images = [self._construct_list_image_entry(
|
||||
context, image) for image in images]
|
||||
return {'images': ret_images}
|
@ -1,120 +0,0 @@
|
||||
# Copyright 2015 Huawei Technologies Co., Ltd.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from novaclient import api_versions
|
||||
from novaclient import exceptions
|
||||
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_service import wsgi
|
||||
from oslo_utils import encodeutils
|
||||
|
||||
import webob.dec
|
||||
|
||||
from tricircle.common import constants
|
||||
|
||||
|
||||
class MicroVersion(object):
|
||||
|
||||
@staticmethod
|
||||
def _format_error(code, message, error_type='computeFault'):
|
||||
return {error_type: {'message': message, 'code': code}}
|
||||
|
||||
@classmethod
|
||||
def factory(cls, global_config, **local_config):
|
||||
return cls(app=None)
|
||||
|
||||
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||
def __call__(self, req):
|
||||
"""Get the nova micro version number
|
||||
|
||||
* If neither "X-OpenStack-Nova-API-Version" nor
|
||||
"OpenStack-API-Version" (specifying "compute") is provided,
|
||||
act as if the minimum supported microversion was specified.
|
||||
|
||||
* If both headers are provided,
|
||||
"OpenStack-API-Version" will be preferred.
|
||||
|
||||
* If "X-OpenStack-Nova-API-Version" or "OpenStack-API-Version"
|
||||
is provided, respond with the API at that microversion.
|
||||
If that's outside of the range of microversions supported,
|
||||
return 406 Not Acceptable.
|
||||
|
||||
* If "X-OpenStack-Nova-API-Version" or "OpenStack-API-Version"
|
||||
has a value of "latest" (special keyword),
|
||||
act as if maximum was specified.
|
||||
"""
|
||||
version_num = req.environ.get(
|
||||
constants.HTTP_NOVA_API_VERSION_REQUEST_HEADER)
|
||||
legacy_version_num = req.environ.get(
|
||||
constants.HTTP_LEGACY_NOVA_API_VERSION_REQUEST_HEADER)
|
||||
message = None
|
||||
api_version = None
|
||||
|
||||
if version_num is None and legacy_version_num is None:
|
||||
micro_version = constants.NOVA_APIGW_MIN_VERSION
|
||||
elif version_num is not None:
|
||||
err_msg = ("Invalid format of client version '%s'. "
|
||||
"Expected format 'compute X.Y',"
|
||||
"where X is a major part and Y "
|
||||
"is a minor part of version.") % version_num
|
||||
try:
|
||||
nova_version_prefix = version_num.split()[0]
|
||||
micro_version = ''.join(version_num.split()[1:])
|
||||
if nova_version_prefix != 'compute':
|
||||
message = err_msg
|
||||
except Exception:
|
||||
message = err_msg
|
||||
else:
|
||||
micro_version = legacy_version_num
|
||||
|
||||
if message is None:
|
||||
try:
|
||||
# Returns checked APIVersion object,
|
||||
# or raise UnsupportedVersion exceptions.
|
||||
api_version = api_versions.get_api_version(micro_version)
|
||||
except exceptions.UnsupportedVersion as e:
|
||||
message = e.message
|
||||
|
||||
if message is None and api_version is not None:
|
||||
min_minor = int(constants.NOVA_APIGW_MIN_VERSION.split('.')[1])
|
||||
max_minor = int(constants.NOVA_APIGW_MAX_VERSION.split('.')[1])
|
||||
if api_version.is_latest():
|
||||
micro_version = constants.NOVA_APIGW_MAX_VERSION
|
||||
api_version.ver_minor = max_minor
|
||||
|
||||
if api_version.ver_minor < min_minor or \
|
||||
api_version.ver_minor > max_minor:
|
||||
message = ("Version %s is not supported by the API. "
|
||||
"Minimum is %s, and maximum is %s"
|
||||
% (micro_version, constants.NOVA_APIGW_MIN_VERSION,
|
||||
constants.NOVA_APIGW_MAX_VERSION))
|
||||
|
||||
if message is None:
|
||||
req.environ[constants.NOVA_API_VERSION_REQUEST_HEADER] = \
|
||||
micro_version
|
||||
if self.app:
|
||||
return req.get_response(self.app)
|
||||
else:
|
||||
content_type = 'application/json'
|
||||
body = jsonutils.dumps(
|
||||
self._format_error('406', message, 'computeFault'))
|
||||
response = webob.Response()
|
||||
response.content_type = content_type
|
||||
response.body = encodeutils.to_utf8(body)
|
||||
response.status_code = 406
|
||||
return response
|
||||
|
||||
def __init__(self, app):
|
||||
self.app = app
|
@ -1,50 +0,0 @@
|
||||
# Copyright (c) 2015 Huawei Tech. Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from pecan import expose
|
||||
from pecan import rest
|
||||
|
||||
import tricircle.common.client as t_client
|
||||
import tricircle.common.context as t_context
|
||||
from tricircle.common.i18n import _
|
||||
from tricircle.common import utils
|
||||
|
||||
|
||||
class NetworkController(rest.RestController):
|
||||
|
||||
def __init__(self, project_id):
|
||||
self.project_id = project_id
|
||||
self.client = t_client.Client()
|
||||
|
||||
@staticmethod
|
||||
def _construct_network_entry(network):
|
||||
network['uuid'] = network['id']
|
||||
network['label'] = network['name']
|
||||
return network
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def get_one(self, _id):
|
||||
context = t_context.extract_context_from_environ()
|
||||
network = self.client.get_networks(context, _id)
|
||||
if not network:
|
||||
return utils.format_nova_error(404, _('Network not found'))
|
||||
return {'network': self._construct_network_entry(network)}
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def get_all(self):
|
||||
context = t_context.extract_context_from_environ()
|
||||
networks = self.client.list_networks(context)
|
||||
return {'networks': [self._construct_network_entry(
|
||||
network) for network in networks]}
|
@ -1,269 +0,0 @@
|
||||
# Copyright (c) 2015 Huawei Tech. Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import six
|
||||
|
||||
from pecan import expose
|
||||
from pecan import request
|
||||
from pecan import response
|
||||
from pecan import Response
|
||||
from pecan import rest
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
import tricircle.common.context as t_context
|
||||
from tricircle.common import exceptions as t_exceptions
|
||||
from tricircle.common.i18n import _
|
||||
from tricircle.common import quota
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class QuotaSetsController(rest.RestController):
|
||||
|
||||
def __init__(self, tenant_id):
|
||||
self.tenant_id = tenant_id
|
||||
|
||||
@expose()
|
||||
def _lookup(self, target_tenant_id, *remainder):
|
||||
return QuotaController(self.tenant_id, target_tenant_id), remainder
|
||||
|
||||
|
||||
def build_absolute_limits(quotas):
|
||||
|
||||
quota_map = {
|
||||
'maxTotalRAMSize': 'ram',
|
||||
'maxTotalInstances': 'instances',
|
||||
'maxTotalCores': 'cores',
|
||||
'maxTotalKeypairs': 'key_pairs',
|
||||
'maxTotalFloatingIps': 'floating_ips',
|
||||
'maxPersonality': 'injected_files',
|
||||
'maxPersonalitySize': 'injected_file_content_bytes',
|
||||
'maxSecurityGroups': 'security_groups',
|
||||
'maxSecurityGroupRules': 'security_group_rules',
|
||||
'maxServerMeta': 'metadata_items',
|
||||
'maxServerGroups': 'server_groups',
|
||||
'maxServerGroupMembers': 'server_group_members',
|
||||
}
|
||||
|
||||
limits = {}
|
||||
for display_name, key in six.iteritems(quota_map):
|
||||
if key in quotas:
|
||||
limits[display_name] = quotas[key]['limit']
|
||||
return limits
|
||||
|
||||
|
||||
def build_used_limits(quotas):
|
||||
|
||||
quota_map = {
|
||||
'totalRAMUsed': 'ram',
|
||||
'totalCoresUsed': 'cores',
|
||||
'totalInstancesUsed': 'instances',
|
||||
'totalFloatingIpsUsed': 'floating_ips',
|
||||
'totalSecurityGroupsUsed': 'security_groups',
|
||||
'totalServerGroupsUsed': 'server_groups',
|
||||
}
|
||||
|
||||
# need to refresh usage from the bottom pods? Now from the data in top
|
||||
used_limits = {}
|
||||
for display_name, key in six.iteritems(quota_map):
|
||||
if key in quotas:
|
||||
reserved = quotas[key]['reserved']
|
||||
used_limits[display_name] = quotas[key]['in_use'] + reserved
|
||||
|
||||
return used_limits
|
||||
|
||||
|
||||
class LimitsController(rest.RestController):
|
||||
|
||||
def __init__(self, tenant_id):
|
||||
self.tenant_id = tenant_id
|
||||
|
||||
@staticmethod
|
||||
def _reserved(req):
|
||||
try:
|
||||
return int(req.GET['reserved'])
|
||||
except (ValueError, KeyError):
|
||||
return False
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def get_all(self):
|
||||
|
||||
# TODO(joehuang): add policy controll here
|
||||
|
||||
context = t_context.extract_context_from_environ()
|
||||
context.project_id = self.tenant_id
|
||||
target_tenant_id = request.params.get('tenant_id', None)
|
||||
if target_tenant_id:
|
||||
target_tenant_id.strip()
|
||||
else:
|
||||
return Response('tenant_id not given', 400)
|
||||
|
||||
qs = quota.QuotaSetOperation(target_tenant_id,
|
||||
None)
|
||||
try:
|
||||
quotas = qs.show_detail_quota(context, show_usage=True)
|
||||
except t_exceptions.NotFound as e:
|
||||
msg = str(e)
|
||||
LOG.exception(msg=msg)
|
||||
return Response(msg, 404)
|
||||
except (t_exceptions.AdminRequired,
|
||||
t_exceptions.NotAuthorized,
|
||||
t_exceptions.HTTPForbiddenError) as e:
|
||||
msg = str(e)
|
||||
LOG.exception(msg=msg)
|
||||
return Response(msg, 403)
|
||||
except Exception as e:
|
||||
msg = str(e)
|
||||
LOG.exception(msg=msg)
|
||||
return Response(msg, 400)
|
||||
|
||||
# TODO(joehuang): add API rate limits later
|
||||
ret = {
|
||||
'limits': {
|
||||
'rate': {},
|
||||
'absolute': {},
|
||||
},
|
||||
}
|
||||
|
||||
ret['limits']['absolute'].update(
|
||||
build_absolute_limits(quotas['quota_set']))
|
||||
ret['limits']['absolute'].update(
|
||||
build_used_limits(quotas['quota_set']))
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
class QuotaController(rest.RestController):
|
||||
|
||||
def __init__(self, owner_tenant_id, target_tenant_id):
|
||||
self.owner_tenant_id = owner_tenant_id
|
||||
self.target_tenant_id = target_tenant_id
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def put(self, **kw):
|
||||
|
||||
context = t_context.extract_context_from_environ()
|
||||
if not context.is_admin:
|
||||
# TODO(joahuang): changed to policy control later
|
||||
# to support reseller admin mode
|
||||
return Response(_('Admin role required to update quota'), 409)
|
||||
|
||||
return self._quota_action('put', **kw)
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def delete(self):
|
||||
"""Delete Quota for a particular tenant.
|
||||
|
||||
This works for hierarchical and non-hierarchical projects. For
|
||||
hierarchical projects only immediate parent admin or the
|
||||
CLOUD admin are able to perform a delete.
|
||||
|
||||
:param id: target project id that needs to be deleted
|
||||
"""
|
||||
|
||||
context = t_context.extract_context_from_environ()
|
||||
if not context.is_admin:
|
||||
# TODO(joahuang): changed to policy control later
|
||||
# to support reseller admin mode
|
||||
return Response(_('Admin role required to delete quota'), 409)
|
||||
|
||||
kw = {}
|
||||
return self._quota_action('delete', **kw)
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def get_one(self, show_what):
|
||||
kw = {}
|
||||
if show_what == 'defaults' or show_what == 'detail':
|
||||
return self._quota_action(show_what, **kw)
|
||||
else:
|
||||
return Response(_('Only show defaults or detail allowed'), 400)
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def get_all(self):
|
||||
kw = {}
|
||||
return self._quota_action('quota-show', **kw)
|
||||
|
||||
def _quota_action(self, action, **kw):
|
||||
|
||||
context = t_context.extract_context_from_environ()
|
||||
context.project_id = self.owner_tenant_id
|
||||
target_tenant_id = self.target_tenant_id
|
||||
target_user_id = request.params.get('user_id', None)
|
||||
if target_user_id:
|
||||
target_user_id.strip()
|
||||
|
||||
qs = quota.QuotaSetOperation(target_tenant_id,
|
||||
target_user_id)
|
||||
quotas = {}
|
||||
try:
|
||||
if action == 'put':
|
||||
quotas = qs.update(context, **kw)
|
||||
elif action == 'delete':
|
||||
qs.delete(context)
|
||||
response.status = 202
|
||||
return
|
||||
elif action == 'defaults':
|
||||
quotas = qs.show_default_quota(context)
|
||||
elif action == 'detail':
|
||||
quotas = qs.show_detail_quota(context, show_usage=True)
|
||||
|
||||
# remove the allocated field which is not visible in Nova
|
||||
for k, v in quotas['quota_set'].iteritems():
|
||||
if k != 'id':
|
||||
v.pop('allocated', None)
|
||||
|
||||
elif action == 'quota-show':
|
||||
quotas = qs.show_detail_quota(context, show_usage=False)
|
||||
else:
|
||||
return Response('Resource not found', 404)
|
||||
except t_exceptions.NotFound as e:
|
||||
msg = str(e)
|
||||
LOG.exception(msg=msg)
|
||||
return Response(msg, 404)
|
||||
except (t_exceptions.AdminRequired,
|
||||
t_exceptions.NotAuthorized,
|
||||
t_exceptions.HTTPForbiddenError) as e:
|
||||
msg = str(e)
|
||||
LOG.exception(msg=msg)
|
||||
return Response(msg, 403)
|
||||
except Exception as e:
|
||||
msg = str(e)
|
||||
LOG.exception(msg=msg)
|
||||
return Response(msg, 400)
|
||||
|
||||
return {'quota_set': self._build_visible_quota(quotas['quota_set'])}
|
||||
|
||||
def _build_visible_quota(self, quota_set):
|
||||
quota_map = [
|
||||
'id', 'instances', 'ram', 'cores', 'key_pairs',
|
||||
'floating_ips', 'fixed_ips',
|
||||
'injected_files', 'injected_file_path_bytes',
|
||||
'injected_file_content_bytes',
|
||||
'security_groups', 'security_group_rules',
|
||||
'metadata_items', 'server_groups', 'server_group_members',
|
||||
]
|
||||
|
||||
ret = {}
|
||||
# only return Nova visible quota items
|
||||
for k, v in quota_set.iteritems():
|
||||
if k in quota_map:
|
||||
ret[k] = v
|
||||
|
||||
return ret
|
@ -1,166 +0,0 @@
|
||||
# Copyright (c) 2015 Huawei Tech. Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pecan
|
||||
|
||||
from pecan import expose
|
||||
from pecan import hooks
|
||||
from pecan import rest
|
||||
|
||||
import oslo_log.log as logging
|
||||
|
||||
import webob.exc as web_exc
|
||||
|
||||
from tricircle.common import constants
|
||||
from tricircle.common import context as ctx
|
||||
from tricircle.common import xrpcapi
|
||||
from tricircle.nova_apigw.controllers import action
|
||||
from tricircle.nova_apigw.controllers import aggregate
|
||||
from tricircle.nova_apigw.controllers import flavor
|
||||
from tricircle.nova_apigw.controllers import image
|
||||
from tricircle.nova_apigw.controllers import network
|
||||
from tricircle.nova_apigw.controllers import quota_sets
|
||||
from tricircle.nova_apigw.controllers import server
|
||||
from tricircle.nova_apigw.controllers import server_ips
|
||||
from tricircle.nova_apigw.controllers import volume
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ErrorHook(hooks.PecanHook):
|
||||
# NOTE(zhiyuan) pecan's default error body is not compatible with nova
|
||||
# client, clear body in this hook
|
||||
def on_error(self, state, exc):
|
||||
if isinstance(exc, web_exc.HTTPException):
|
||||
exc.body = ''
|
||||
return exc
|
||||
|
||||
|
||||
class RootController(object):
|
||||
|
||||
@pecan.expose()
|
||||
def _lookup(self, version, *remainder):
|
||||
if version == 'v2.1':
|
||||
return V21Controller(), remainder
|
||||
|
||||
|
||||
class V21Controller(object):
|
||||
|
||||
_media_type = "application/vnd.openstack.compute+json;version=2.1"
|
||||
|
||||
def __init__(self):
|
||||
self.resource_controller = {
|
||||
'flavors': flavor.FlavorController,
|
||||
'os-aggregates': aggregate.AggregateController,
|
||||
'servers': server.ServerController,
|
||||
'images': image.ImageController,
|
||||
'os-quota-sets': quota_sets.QuotaSetsController,
|
||||
'limits': quota_sets.LimitsController,
|
||||
'os-networks': network.NetworkController
|
||||
}
|
||||
self.server_sub_controller = {
|
||||
'os-volume_attachments': volume.VolumeController,
|
||||
'action': action.ActionController,
|
||||
'ips': server_ips.ServerIpsController
|
||||
}
|
||||
|
||||
def _get_resource_controller(self, project_id, remainder):
|
||||
if not remainder:
|
||||
pecan.abort(404)
|
||||
return
|
||||
resource = remainder[0]
|
||||
if resource not in self.resource_controller:
|
||||
pecan.abort(404)
|
||||
return
|
||||
if resource == 'servers' and len(remainder) >= 3:
|
||||
server_id = remainder[1]
|
||||
sub_resource = remainder[2]
|
||||
if sub_resource not in self.server_sub_controller:
|
||||
pecan.abort(404)
|
||||
return
|
||||
return self.server_sub_controller[sub_resource](
|
||||
project_id, server_id), remainder[3:]
|
||||
return self.resource_controller[resource](project_id), remainder[1:]
|
||||
|
||||
@pecan.expose()
|
||||
def _lookup(self, project_id, *remainder):
|
||||
if project_id == 'testrpc':
|
||||
return TestRPCController(), remainder
|
||||
else:
|
||||
# If the last charater of the url path is '/', then
|
||||
# last remainder element is ''. Since pecan can't
|
||||
# handle the url properly, '' element must be removed.
|
||||
# Otherwise pecan will crashed for routing the request
|
||||
# with incorrect parameter
|
||||
num = len(remainder)
|
||||
if num >= 1 and remainder[num - 1] == '':
|
||||
new_remainder = remainder[:num - 1]
|
||||
else:
|
||||
new_remainder = remainder
|
||||
return self._get_resource_controller(project_id, new_remainder)
|
||||
|
||||
@pecan.expose(generic=True, template='json')
|
||||
def index(self):
|
||||
return {
|
||||
"version": {
|
||||
"status": "CURRENT",
|
||||
"updated": "2013-07-23T11:33:21Z",
|
||||
"links": [
|
||||
{
|
||||
"href": pecan.request.application_url + "/v2.1/",
|
||||
"rel": "self"
|
||||
},
|
||||
{
|
||||
"href": "http://docs.openstack.org/",
|
||||
"type": "text/html",
|
||||
"rel": "describedby"
|
||||
}
|
||||
],
|
||||
"min_version": constants.NOVA_APIGW_MIN_VERSION,
|
||||
"version": constants.NOVA_APIGW_MAX_VERSION,
|
||||
"media-types": [
|
||||
{
|
||||
"base": "application/json",
|
||||
"type": self._media_type
|
||||
}
|
||||
],
|
||||
"id": "v2.1"
|
||||
}
|
||||
}
|
||||
|
||||
@index.when(method='POST')
|
||||
@index.when(method='PUT')
|
||||
@index.when(method='DELETE')
|
||||
@index.when(method='HEAD')
|
||||
@index.when(method='PATCH')
|
||||
def not_supported(self):
|
||||
pecan.abort(404)
|
||||
|
||||
|
||||
class TestRPCController(rest.RestController):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(TestRPCController, self).__init__(*args, **kwargs)
|
||||
self.xjobapi = xrpcapi.XJobAPI()
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def index(self):
|
||||
if pecan.request.method != 'GET':
|
||||
pecan.abort(405)
|
||||
|
||||
context = ctx.extract_context_from_environ()
|
||||
|
||||
payload = '#result from xjob rpc'
|
||||
|
||||
return self.xjobapi.test_rpc(context, payload)
|
@ -1,82 +0,0 @@
|
||||
# Copyright 2015 Huawei Technologies Co., Ltd.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_service import wsgi
|
||||
from oslo_utils import encodeutils
|
||||
|
||||
import webob.dec
|
||||
|
||||
from tricircle.common import constants
|
||||
|
||||
|
||||
class Versions(object):
|
||||
|
||||
@classmethod
|
||||
def factory(cls, global_config, **local_config):
|
||||
return cls(app=None)
|
||||
|
||||
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||
def __call__(self, req):
|
||||
if req.path != '/':
|
||||
if self.app:
|
||||
return req.get_response(self.app)
|
||||
|
||||
method = req.environ.get('REQUEST_METHOD')
|
||||
not_allowed_methods = ['POST', 'PUT', 'DELETE', 'HEAD', 'PATCH']
|
||||
if method in not_allowed_methods:
|
||||
response = webob.Response()
|
||||
response.status_code = 404
|
||||
return response
|
||||
|
||||
versions = {
|
||||
"versions": [
|
||||
{
|
||||
"status": "SUPPORTED",
|
||||
"updated": "2011-01-21T11:33:21Z",
|
||||
"links": [
|
||||
{"href": "http://127.0.0.1:8774/v2/",
|
||||
"rel": "self"}
|
||||
],
|
||||
"min_version": "",
|
||||
"version": "",
|
||||
"id": "v2.0"
|
||||
},
|
||||
{
|
||||
"status": "CURRENT",
|
||||
"updated": "2013-07-23T11:33:21Z",
|
||||
"links": [
|
||||
{
|
||||
"href": req.application_url + "/v2.1/",
|
||||
"rel": "self"
|
||||
}
|
||||
],
|
||||
"min_version": constants.NOVA_APIGW_MIN_VERSION,
|
||||
"version": constants.NOVA_APIGW_MAX_VERSION,
|
||||
"id": "v2.1"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
content_type = 'application/json'
|
||||
body = jsonutils.dumps(versions)
|
||||
response = webob.Response()
|
||||
response.content_type = content_type
|
||||
response.body = encodeutils.to_utf8(body)
|
||||
|
||||
return response
|
||||
|
||||
def __init__(self, app):
|
||||
self.app = app
|
@ -1,680 +0,0 @@
|
||||
# Copyright (c) 2015 Huawei Tech. Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import netaddr
|
||||
import pecan
|
||||
from pecan import expose
|
||||
from pecan import rest
|
||||
import six
|
||||
|
||||
import oslo_log.log as logging
|
||||
|
||||
import neutronclient.common.exceptions as q_exceptions
|
||||
|
||||
import tricircle.common.client as t_client
|
||||
from tricircle.common import constants
|
||||
import tricircle.common.context as t_context
|
||||
import tricircle.common.exceptions as t_exceptions
|
||||
from tricircle.common.i18n import _
|
||||
from tricircle.common.i18n import _LE
|
||||
import tricircle.common.lock_handle as t_lock
|
||||
from tricircle.common.quota import QUOTAS
|
||||
from tricircle.common.scheduler import filter_scheduler
|
||||
from tricircle.common import utils
|
||||
from tricircle.common import xrpcapi
|
||||
import tricircle.db.api as db_api
|
||||
from tricircle.db import core
|
||||
from tricircle.db import models
|
||||
from tricircle.network import helper
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
MAX_METADATA_KEY_LENGTH = 255
|
||||
MAX_METADATA_VALUE_LENGTH = 255
|
||||
|
||||
|
||||
class ServerController(rest.RestController):
|
||||
|
||||
def __init__(self, project_id):
|
||||
self.project_id = project_id
|
||||
self.clients = {constants.TOP: t_client.Client()}
|
||||
self.helper = helper.NetworkHelper()
|
||||
self.xjob_handler = xrpcapi.XJobAPI()
|
||||
self.filter_scheduler = filter_scheduler.FilterScheduler()
|
||||
|
||||
def _get_client(self, pod_name=constants.TOP):
|
||||
if pod_name not in self.clients:
|
||||
self.clients[pod_name] = t_client.Client(pod_name)
|
||||
return self.clients[pod_name]
|
||||
|
||||
def _get_all(self, context, params):
|
||||
filters = [{'key': key,
|
||||
'comparator': 'eq',
|
||||
'value': value} for key, value in params.iteritems()]
|
||||
ret = []
|
||||
pods = db_api.list_pods(context)
|
||||
for pod in pods:
|
||||
if not pod['az_name']:
|
||||
continue
|
||||
client = self._get_client(pod['pod_name'])
|
||||
servers = client.list_servers(context, filters=filters)
|
||||
self._remove_fip_info(servers)
|
||||
ret.extend(servers)
|
||||
return ret
|
||||
|
||||
@staticmethod
|
||||
def _construct_brief_server_entry(server):
|
||||
return {'id': server['id'],
|
||||
'name': server.get('name'),
|
||||
'links': server.get('links')}
|
||||
|
||||
@staticmethod
|
||||
def _transform_network_name(server):
|
||||
if 'addresses' not in server:
|
||||
return
|
||||
keys = [key for key in server['addresses'].iterkeys()]
|
||||
for key in keys:
|
||||
value = server['addresses'].pop(key)
|
||||
network_name = key.split('#')[1]
|
||||
server['addresses'][network_name] = value
|
||||
return server
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def get_one(self, _id, **kwargs):
|
||||
context = t_context.extract_context_from_environ()
|
||||
|
||||
if _id == 'detail':
|
||||
return {'servers': [self._transform_network_name(
|
||||
server) for server in self._get_all(context, kwargs)]}
|
||||
|
||||
mappings = db_api.get_bottom_mappings_by_top_id(
|
||||
context, _id, constants.RT_SERVER)
|
||||
if not mappings:
|
||||
return utils.format_nova_error(
|
||||
404, _('Instance %s could not be found.') % _id)
|
||||
pod, bottom_id = mappings[0]
|
||||
client = self._get_client(pod['pod_name'])
|
||||
server = client.get_servers(context, bottom_id)
|
||||
if not server:
|
||||
return utils.format_nova_error(
|
||||
404, _('Instance %s could not be found.') % _id)
|
||||
else:
|
||||
self._transform_network_name(server)
|
||||
return {'server': server}
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def get_all(self, **kwargs):
|
||||
context = t_context.extract_context_from_environ()
|
||||
return {'servers': [self._construct_brief_server_entry(
|
||||
server) for server in self._get_all(context, kwargs)]}
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def post(self, **kw):
|
||||
context = t_context.extract_context_from_environ()
|
||||
|
||||
if 'server' not in kw:
|
||||
return utils.format_nova_error(
|
||||
400, _('server is not set'))
|
||||
|
||||
az = kw['server'].get('availability_zone', '')
|
||||
pod, b_az = self.filter_scheduler.select_destination(
|
||||
context, az, self.project_id, pod_group='')
|
||||
if not pod:
|
||||
return utils.format_nova_error(
|
||||
500, _('Pod not configured or scheduling failure'))
|
||||
|
||||
t_server_dict = kw['server']
|
||||
self._process_metadata_quota(context, t_server_dict)
|
||||
self._process_injected_file_quota(context, t_server_dict)
|
||||
|
||||
server_body = self._get_create_server_body(kw['server'], b_az)
|
||||
|
||||
top_client = self._get_client()
|
||||
|
||||
sg_filters = [{'key': 'tenant_id', 'comparator': 'eq',
|
||||
'value': self.project_id}]
|
||||
top_sgs = top_client.list_security_groups(context, sg_filters)
|
||||
top_sg_map = dict((sg['name'], sg) for sg in top_sgs)
|
||||
|
||||
if 'security_groups' not in kw['server']:
|
||||
security_groups = ['default']
|
||||
else:
|
||||
security_groups = []
|
||||
for sg in kw['server']['security_groups']:
|
||||
if 'name' not in sg:
|
||||
return utils.format_nova_error(
|
||||
400, _('Invalid input for field/attribute'))
|
||||
if sg['name'] not in top_sg_map:
|
||||
return utils.format_nova_error(
|
||||
400, _('Unable to find security_group with name or id '
|
||||
'%s') % sg['name'])
|
||||
security_groups.append(sg['name'])
|
||||
t_sg_ids, b_sg_ids, is_news = self._handle_security_group(
|
||||
context, pod, top_sg_map, security_groups)
|
||||
|
||||
server_body['networks'] = []
|
||||
if 'networks' in kw['server']:
|
||||
for net_info in kw['server']['networks']:
|
||||
if 'uuid' in net_info:
|
||||
network = top_client.get_networks(context,
|
||||
net_info['uuid'])
|
||||
if not network:
|
||||
return utils.format_nova_error(
|
||||
400, _('Network %s could not be '
|
||||
'found') % net_info['uuid'])
|
||||
|
||||
if not self._check_network_server_az_match(
|
||||
context, network,
|
||||
kw['server']['availability_zone']):
|
||||
return utils.format_nova_error(
|
||||
400, _('Network and server not in the same '
|
||||
'availability zone'))
|
||||
|
||||
subnets = top_client.list_subnets(
|
||||
context, [{'key': 'network_id',
|
||||
'comparator': 'eq',
|
||||
'value': network['id']}])
|
||||
if not subnets:
|
||||
return utils.format_nova_error(
|
||||
400, _('Network does not contain any subnets'))
|
||||
t_port_id, b_port_id = self._handle_network(
|
||||
context, pod, network, subnets,
|
||||
top_sg_ids=t_sg_ids, bottom_sg_ids=b_sg_ids)
|
||||
elif 'port' in net_info:
|
||||
port = top_client.get_ports(context, net_info['port'])
|
||||
if not port:
|
||||
return utils.format_nova_error(
|
||||
400, _('Port %s could not be '
|
||||
'found') % net_info['port'])
|
||||
t_port_id, b_port_id = self._handle_port(
|
||||
context, pod, port)
|
||||
server_body['networks'].append({'port': b_port_id})
|
||||
|
||||
# only for security group first created in a pod, we invoke
|
||||
# _handle_sg_rule_for_new_group to initialize rules in that group, this
|
||||
# method removes all the rules in the new group then add new rules
|
||||
top_sg_id_map = dict((sg['id'], sg) for sg in top_sgs)
|
||||
new_top_sgs = []
|
||||
new_bottom_sg_ids = []
|
||||
default_sg = None
|
||||
for t_id, b_id, is_new in zip(t_sg_ids, b_sg_ids, is_news):
|
||||
sg_name = top_sg_id_map[t_id]['name']
|
||||
if sg_name == 'default':
|
||||
default_sg = top_sg_id_map[t_id]
|
||||
continue
|
||||
if not is_new:
|
||||
continue
|
||||
new_top_sgs.append(top_sg_id_map[t_id])
|
||||
new_bottom_sg_ids.append(b_id)
|
||||
self._handle_sg_rule_for_new_group(context, pod, new_top_sgs,
|
||||
new_bottom_sg_ids)
|
||||
if default_sg:
|
||||
self._handle_sg_rule_for_default_group(
|
||||
context, pod, default_sg, self.project_id)
|
||||
|
||||
client = self._get_client(pod['pod_name'])
|
||||
nics = [
|
||||
{'port-id': _port['port']} for _port in server_body['networks']]
|
||||
|
||||
server = client.create_servers(context,
|
||||
name=server_body['name'],
|
||||
image=server_body['imageRef'],
|
||||
flavor=server_body['flavorRef'],
|
||||
nics=nics,
|
||||
security_groups=b_sg_ids)
|
||||
with context.session.begin():
|
||||
core.create_resource(context, models.ResourceRouting,
|
||||
{'top_id': server['id'],
|
||||
'bottom_id': server['id'],
|
||||
'pod_id': pod['pod_id'],
|
||||
'project_id': self.project_id,
|
||||
'resource_type': constants.RT_SERVER})
|
||||
pecan.response.status = 202
|
||||
return {'server': server}
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def delete(self, _id):
|
||||
context = t_context.extract_context_from_environ()
|
||||
|
||||
mappings = db_api.get_bottom_mappings_by_top_id(context, _id,
|
||||
constants.RT_SERVER)
|
||||
if not mappings:
|
||||
pecan.response.status = 404
|
||||
return {'Error': {'message': _('Server not found'), 'code': 404}}
|
||||
|
||||
pod, bottom_id = mappings[0]
|
||||
client = self._get_client(pod['pod_name'])
|
||||
top_client = self._get_client()
|
||||
try:
|
||||
server_ports = top_client.list_ports(
|
||||
context, filters=[{'key': 'device_id', 'comparator': 'eq',
|
||||
'value': _id}])
|
||||
ret = client.delete_servers(context, bottom_id)
|
||||
# none return value indicates server not found
|
||||
if ret is None:
|
||||
self._remove_stale_mapping(context, _id)
|
||||
pecan.response.status = 404
|
||||
return {'Error': {'message': _('Server not found'),
|
||||
'code': 404}}
|
||||
for server_port in server_ports:
|
||||
self.xjob_handler.delete_server_port(context,
|
||||
server_port['id'])
|
||||
except Exception as e:
|
||||
code = 500
|
||||
message = _('Delete server %(server_id)s fails') % {
|
||||
'server_id': _id}
|
||||
if hasattr(e, 'code'):
|
||||
code = e.code
|
||||
ex_message = str(e)
|
||||
if ex_message:
|
||||
message = ex_message
|
||||
LOG.error(message)
|
||||
|
||||
pecan.response.status = code
|
||||
return {'Error': {'message': message, 'code': code}}
|
||||
|
||||
# NOTE(zhiyuan) Security group rules for default security group are
|
||||
# also kept until subnet is deleted.
|
||||
pecan.response.status = 204
|
||||
return pecan.response
|
||||
|
||||
def _get_or_create_route(self, context, pod, _id, _type):
|
||||
def list_resources(t_ctx, q_ctx, pod_, ele, _type_):
|
||||
client = self._get_client(pod_['pod_name'])
|
||||
return client.list_resources(_type_, t_ctx, [{'key': 'name',
|
||||
'comparator': 'eq',
|
||||
'value': ele['id']}])
|
||||
|
||||
return t_lock.get_or_create_route(context, None,
|
||||
self.project_id, pod, {'id': _id},
|
||||
_type, list_resources)
|
||||
|
||||
def _handle_router(self, context, pod, net):
|
||||
top_client = self._get_client()
|
||||
|
||||
interfaces = top_client.list_ports(
|
||||
context, filters=[{'key': 'network_id',
|
||||
'comparator': 'eq',
|
||||
'value': net['id']},
|
||||
{'key': 'device_owner',
|
||||
'comparator': 'eq',
|
||||
'value': 'network:router_interface'}])
|
||||
interfaces = [inf for inf in interfaces if inf['device_id']]
|
||||
if not interfaces:
|
||||
return
|
||||
# TODO(zhiyuan) change xjob invoking from "cast" to "call" to guarantee
|
||||
# the job can be successfully registered
|
||||
self.xjob_handler.setup_bottom_router(
|
||||
context, net['id'], interfaces[0]['device_id'], pod['pod_id'])
|
||||
|
||||
def _handle_network(self, context, pod, net, subnets, port=None,
|
||||
top_sg_ids=None, bottom_sg_ids=None):
|
||||
(bottom_net_id,
|
||||
subnet_map) = self.helper.prepare_bottom_network_subnets(
|
||||
context, None, self.project_id, pod, net, subnets)
|
||||
|
||||
top_client = self._get_client()
|
||||
top_port_body = {'port': {'network_id': net['id'],
|
||||
'admin_state_up': True}}
|
||||
if top_sg_ids:
|
||||
top_port_body['port']['security_groups'] = top_sg_ids
|
||||
|
||||
# port
|
||||
if not port:
|
||||
port = top_client.create_ports(context, top_port_body)
|
||||
port_body = self.helper.get_create_port_body(
|
||||
self.project_id, port, subnet_map, bottom_net_id,
|
||||
bottom_sg_ids)
|
||||
else:
|
||||
port_body = self.helper.get_create_port_body(
|
||||
self.project_id, port, subnet_map, bottom_net_id)
|
||||
_, bottom_port_id = self.helper.prepare_bottom_element(
|
||||
context, self.project_id, pod, port, constants.RT_PORT, port_body)
|
||||
|
||||
self._handle_router(context, pod, net)
|
||||
|
||||
return port['id'], bottom_port_id
|
||||
|
||||
def _handle_port(self, context, pod, port):
|
||||
top_client = self._get_client()
|
||||
# NOTE(zhiyuan) at this moment, it is possible that the bottom port has
|
||||
# been created. if user creates a port and associate it with a floating
|
||||
# ip before booting a vm, tricircle plugin will create the bottom port
|
||||
# first in order to setup floating ip in bottom pod. but it is still
|
||||
# safe for us to use network id and subnet id in the returned port dict
|
||||
# since tricircle plugin will do id mapping and guarantee ids in the
|
||||
# dict are top id.
|
||||
net = top_client.get_networks(context, port['network_id'])
|
||||
subnets = []
|
||||
for fixed_ip in port['fixed_ips']:
|
||||
subnets.append(top_client.get_subnets(context,
|
||||
fixed_ip['subnet_id']))
|
||||
return self._handle_network(context, pod, net, subnets, port=port)
|
||||
|
||||
@staticmethod
|
||||
def _safe_create_security_group_rule(context, client, body):
|
||||
try:
|
||||
client.create_security_group_rules(context, body)
|
||||
except q_exceptions.Conflict:
|
||||
return
|
||||
|
||||
@staticmethod
|
||||
def _safe_delete_security_group_rule(context, client, _id):
|
||||
try:
|
||||
client.delete_security_group_rules(context, _id)
|
||||
except q_exceptions.NotFound:
|
||||
return
|
||||
|
||||
def _handle_security_group(self, context, pod, top_sg_map,
|
||||
security_groups):
|
||||
t_sg_ids = []
|
||||
b_sg_ids = []
|
||||
is_news = []
|
||||
for sg_name in security_groups:
|
||||
t_sg = top_sg_map[sg_name]
|
||||
sg_body = {
|
||||
'security_group': {
|
||||
'name': t_sg['id'],
|
||||
'description': t_sg['description']}}
|
||||
is_new, b_sg_id = self.helper.prepare_bottom_element(
|
||||
context, self.project_id, pod, t_sg, constants.RT_SG, sg_body)
|
||||
t_sg_ids.append(t_sg['id'])
|
||||
is_news.append(is_new)
|
||||
b_sg_ids.append(b_sg_id)
|
||||
|
||||
return t_sg_ids, b_sg_ids, is_news
|
||||
|
||||
@staticmethod
|
||||
def _construct_bottom_rule(rule, sg_id, ip=None):
|
||||
ip = ip or rule['remote_ip_prefix']
|
||||
# if ip is passed, this is a extended rule for remote group
|
||||
return {'remote_group_id': None,
|
||||
'direction': rule['direction'],
|
||||
'remote_ip_prefix': ip,
|
||||
'protocol': rule.get('protocol'),
|
||||
'ethertype': rule['ethertype'],
|
||||
'port_range_max': rule.get('port_range_max'),
|
||||
'port_range_min': rule.get('port_range_min'),
|
||||
'security_group_id': sg_id}
|
||||
|
||||
@staticmethod
|
||||
def _compare_rule(rule1, rule2):
|
||||
for key in ('direction', 'remote_ip_prefix', 'protocol', 'ethertype',
|
||||
'port_range_max', 'port_range_min'):
|
||||
if rule1[key] != rule2[key]:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _handle_sg_rule_for_default_group(self, context, pod, default_sg,
|
||||
project_id):
|
||||
top_client = self._get_client()
|
||||
new_b_rules = []
|
||||
for t_rule in default_sg['security_group_rules']:
|
||||
if not t_rule['remote_group_id']:
|
||||
# leave sg_id empty here
|
||||
new_b_rules.append(
|
||||
self._construct_bottom_rule(t_rule, ''))
|
||||
continue
|
||||
if t_rule['ethertype'] != 'IPv4':
|
||||
continue
|
||||
subnets = top_client.list_subnets(
|
||||
context, [{'key': 'tenant_id', 'comparator': 'eq',
|
||||
'value': project_id}])
|
||||
bridge_ip_net = netaddr.IPNetwork('100.0.0.0/8')
|
||||
for subnet in subnets:
|
||||
ip_net = netaddr.IPNetwork(subnet['cidr'])
|
||||
if ip_net in bridge_ip_net:
|
||||
continue
|
||||
# leave sg_id empty here
|
||||
new_b_rules.append(
|
||||
self._construct_bottom_rule(t_rule, '',
|
||||
subnet['cidr']))
|
||||
|
||||
mappings = db_api.get_bottom_mappings_by_top_id(
|
||||
context, default_sg['id'], constants.RT_SG)
|
||||
for pod, b_sg_id in mappings:
|
||||
client = self._get_client(pod['pod_name'])
|
||||
b_sg = client.get_security_groups(context, b_sg_id)
|
||||
add_rules = []
|
||||
del_rules = []
|
||||
match_index = set()
|
||||
for b_rule in b_sg['security_group_rules']:
|
||||
match = False
|
||||
for i, rule in enumerate(new_b_rules):
|
||||
if self._compare_rule(b_rule, rule):
|
||||
match = True
|
||||
match_index.add(i)
|
||||
break
|
||||
if not match:
|
||||
del_rules.append(b_rule)
|
||||
for i, rule in enumerate(new_b_rules):
|
||||
if i not in match_index:
|
||||
add_rules.append(rule)
|
||||
|
||||
for del_rule in del_rules:
|
||||
self._safe_delete_security_group_rule(
|
||||
context, client, del_rule['id'])
|
||||
if add_rules:
|
||||
rule_body = {'security_group_rules': []}
|
||||
for add_rule in add_rules:
|
||||
add_rule['security_group_id'] = b_sg_id
|
||||
rule_body['security_group_rules'].append(add_rule)
|
||||
self._safe_create_security_group_rule(context,
|
||||
client, rule_body)
|
||||
|
||||
def _handle_sg_rule_for_new_group(self, context, pod, top_sgs,
|
||||
bottom_sg_ids):
|
||||
client = self._get_client(pod['pod_name'])
|
||||
for i, t_sg in enumerate(top_sgs):
|
||||
b_sg_id = bottom_sg_ids[i]
|
||||
new_b_rules = []
|
||||
for t_rule in t_sg['security_group_rules']:
|
||||
if t_rule['remote_group_id']:
|
||||
# we do not handle remote group rule for non-default
|
||||
# security group, actually tricircle plugin in neutron
|
||||
# will reject such rule
|
||||
# default security group is not passed with top_sgs so
|
||||
# t_rule will not belong to default security group
|
||||
continue
|
||||
new_b_rules.append(
|
||||
self._construct_bottom_rule(t_rule, b_sg_id))
|
||||
try:
|
||||
b_sg = client.get_security_groups(context, b_sg_id)
|
||||
for b_rule in b_sg['security_group_rules']:
|
||||
self._safe_delete_security_group_rule(
|
||||
context, client, b_rule['id'])
|
||||
if new_b_rules:
|
||||
rule_body = {'security_group_rules': new_b_rules}
|
||||
self._safe_create_security_group_rule(context, client,
|
||||
rule_body)
|
||||
except Exception:
|
||||
# if we fails when operating bottom security group rule, we
|
||||
# update the security group mapping to set bottom_id to None
|
||||
# and expire the mapping, so next time the security group rule
|
||||
# operations can be redone
|
||||
with context.session.begin():
|
||||
routes = core.query_resource(
|
||||
context, models.ResourceRouting,
|
||||
[{'key': 'top_id', 'comparator': 'eq',
|
||||
'value': t_sg['id']},
|
||||
{'key': 'bottom_id', 'comparator': 'eq',
|
||||
'value': b_sg_id}], [])
|
||||
update_dict = {'bottom_id': None,
|
||||
'created_at': constants.expire_time,
|
||||
'updated_at': constants.expire_time}
|
||||
core.update_resource(context, models.ResourceRouting,
|
||||
routes[0]['id'], update_dict)
|
||||
raise
|
||||
|
||||
@staticmethod
|
||||
def _get_create_server_body(origin, bottom_az):
|
||||
body = {}
|
||||
copy_fields = ['name', 'imageRef', 'flavorRef',
|
||||
'max_count', 'min_count']
|
||||
if bottom_az:
|
||||
body['availability_zone'] = bottom_az
|
||||
for field in copy_fields:
|
||||
if field in origin:
|
||||
body[field] = origin[field]
|
||||
return body
|
||||
|
||||
@staticmethod
|
||||
def _remove_fip_info(servers):
|
||||
for server in servers:
|
||||
if 'addresses' not in server:
|
||||
continue
|
||||
for addresses in server['addresses'].values():
|
||||
remove_index = -1
|
||||
for i, address in enumerate(addresses):
|
||||
if address.get('OS-EXT-IPS:type') == 'floating':
|
||||
remove_index = i
|
||||
break
|
||||
if remove_index >= 0:
|
||||
del addresses[remove_index]
|
||||
|
||||
@staticmethod
|
||||
def _remove_stale_mapping(context, server_id):
|
||||
filters = [{'key': 'top_id', 'comparator': 'eq', 'value': server_id},
|
||||
{'key': 'resource_type',
|
||||
'comparator': 'eq',
|
||||
'value': constants.RT_SERVER}]
|
||||
with context.session.begin():
|
||||
core.delete_resources(context,
|
||||
models.ResourceRouting,
|
||||
filters)
|
||||
|
||||
@staticmethod
|
||||
def _check_network_server_az_match(context, network, server_az):
|
||||
az_hints = 'availability_zone_hints'
|
||||
network_type = 'provider:network_type'
|
||||
|
||||
# for local type network, we make sure it's created in only one az
|
||||
|
||||
# NOTE(zhiyuan) race condition exists when creating vms in the same
|
||||
# local type network but different azs at the same time
|
||||
if network.get(network_type) == constants.NT_LOCAL:
|
||||
mappings = db_api.get_bottom_mappings_by_top_id(
|
||||
context, network['id'], constants.RT_NETWORK)
|
||||
if mappings:
|
||||
pod, _ = mappings[0]
|
||||
if pod['az_name'] != server_az:
|
||||
return False
|
||||
# if neutron az not assigned, server az is used
|
||||
if not network.get(az_hints):
|
||||
return True
|
||||
if server_az in network[az_hints]:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def _process_injected_file_quota(self, context, t_server_dict):
|
||||
try:
|
||||
ctx = context.elevated()
|
||||
injected_files = t_server_dict.get('injected_files', None)
|
||||
self._check_injected_file_quota(ctx, injected_files)
|
||||
except (t_exceptions.OnsetFileLimitExceeded,
|
||||
t_exceptions.OnsetFilePathLimitExceeded,
|
||||
t_exceptions.OnsetFileContentLimitExceeded) as e:
|
||||
msg = str(e)
|
||||
LOG.exception(_LE('Quota exceeded %(msg)s'),
|
||||
{'msg': msg})
|
||||
return utils.format_nova_error(400, _('Quota exceeded %s') % msg)
|
||||
|
||||
def _check_injected_file_quota(self, context, injected_files):
|
||||
"""Enforce quota limits on injected files.
|
||||
|
||||
Raises a QuotaError if any limit is exceeded.
|
||||
|
||||
"""
|
||||
|
||||
if injected_files is None:
|
||||
return
|
||||
|
||||
# Check number of files first
|
||||
try:
|
||||
QUOTAS.limit_check(context,
|
||||
injected_files=len(injected_files))
|
||||
except t_exceptions.OverQuota:
|
||||
raise t_exceptions.OnsetFileLimitExceeded()
|
||||
|
||||
# OK, now count path and content lengths; we're looking for
|
||||
# the max...
|
||||
max_path = 0
|
||||
max_content = 0
|
||||
for path, content in injected_files:
|
||||
max_path = max(max_path, len(path))
|
||||
max_content = max(max_content, len(content))
|
||||
|
||||
try:
|
||||
QUOTAS.limit_check(context,
|
||||
injected_file_path_bytes=max_path,
|
||||
injected_file_content_bytes=max_content)
|
||||
except t_exceptions.OverQuota as exc:
|
||||
# Favor path limit over content limit for reporting
|
||||
# purposes
|
||||
if 'injected_file_path_bytes' in exc.kwargs['overs']:
|
||||
raise t_exceptions.OnsetFilePathLimitExceeded()
|
||||
else:
|
||||
raise t_exceptions.OnsetFileContentLimitExceeded()
|
||||
|
||||
def _process_metadata_quota(self, context, t_server_dict):
|
||||
try:
|
||||
ctx = context.elevated()
|
||||
metadata = t_server_dict.get('metadata', None)
|
||||
self._check_metadata_properties_quota(ctx, metadata)
|
||||
except t_exceptions.InvalidMetadata as e1:
|
||||
LOG.exception(_LE('Invalid metadata %(exception)s'),
|
||||
{'exception': str(e1)})
|
||||
return utils.format_nova_error(400, _('Invalid metadata'))
|
||||
except t_exceptions.InvalidMetadataSize as e2:
|
||||
LOG.exception(_LE('Invalid metadata size %(exception)s'),
|
||||
{'exception': str(e2)})
|
||||
return utils.format_nova_error(400, _('Invalid metadata size'))
|
||||
except t_exceptions.MetadataLimitExceeded as e3:
|
||||
LOG.exception(_LE('Quota exceeded %(exception)s'),
|
||||
{'exception': str(e3)})
|
||||
return utils.format_nova_error(400,
|
||||
_('Quota exceeded in metadata'))
|
||||
|
||||
def _check_metadata_properties_quota(self, context, metadata=None):
|
||||
"""Enforce quota limits on metadata properties."""
|
||||
if not metadata:
|
||||
metadata = {}
|
||||
if not isinstance(metadata, dict):
|
||||
msg = (_("Metadata type should be dict."))
|
||||
raise t_exceptions.InvalidMetadata(reason=msg)
|
||||
num_metadata = len(metadata)
|
||||
try:
|
||||
QUOTAS.limit_check(context, metadata_items=num_metadata)
|
||||
except t_exceptions.OverQuota as exc:
|
||||
quota_metadata = exc.kwargs['quotas']['metadata_items']
|
||||
raise t_exceptions.MetadataLimitExceeded(allowed=quota_metadata)
|
||||
|
||||
# Because metadata is processed in the bottom pod, we just do
|
||||
# parameter validation here to ensure quota management
|
||||
for k, v in six.iteritems(metadata):
|
||||
try:
|
||||
utils.check_string_length(v)
|
||||
utils.check_string_length(k, min_len=1)
|
||||
except t_exceptions.InvalidInput as e:
|
||||
raise t_exceptions.InvalidMetadata(reason=str(e))
|
||||
|
||||
if len(k) > MAX_METADATA_KEY_LENGTH:
|
||||
msg = _("Metadata property key greater than 255 characters")
|
||||
raise t_exceptions.InvalidMetadataSize(reason=msg)
|
||||
if len(v) > MAX_METADATA_VALUE_LENGTH:
|
||||
msg = _("Metadata property value greater than 255 characters")
|
||||
raise t_exceptions.InvalidMetadataSize(reason=msg)
|
@ -1,72 +0,0 @@
|
||||
# Copyright (c) 2015 Huawei Tech. Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pecan
|
||||
from pecan import expose
|
||||
from pecan import rest
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
import tricircle.common.client as t_client
|
||||
from tricircle.common import constants
|
||||
import tricircle.common.context as t_context
|
||||
from tricircle.common.i18n import _
|
||||
from tricircle.common import utils
|
||||
import tricircle.db.api as db_api
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ServerIpsController(rest.RestController):
|
||||
|
||||
def __init__(self, project_id, server_id):
|
||||
self.project_id = project_id
|
||||
self.server_id = server_id
|
||||
self.clients = {constants.TOP: t_client.Client()}
|
||||
|
||||
def _get_client(self, pod_name=constants.TOP):
|
||||
if pod_name not in self.clients:
|
||||
self.clients[pod_name] = t_client.Client(pod_name)
|
||||
return self.clients[pod_name]
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def get_all(self, **kwargs):
|
||||
context = t_context.extract_context_from_environ()
|
||||
|
||||
server_mappings = db_api.get_server_mappings_by_top_id(
|
||||
context, self.server_id)
|
||||
if not server_mappings:
|
||||
return utils.format_nova_error(
|
||||
404, _('Server %s could not be found') % self.server_id)
|
||||
try:
|
||||
server_pod_name = server_mappings[0][0]['pod_name']
|
||||
api = self._get_client(server_pod_name).get_native_client(
|
||||
constants.RT_SERVER, context)
|
||||
resp, body = api.client.get('/servers/%s/ips' % self.server_id)
|
||||
pecan.response.status = resp.status_code
|
||||
if not body:
|
||||
return pecan.response
|
||||
else:
|
||||
return body
|
||||
except Exception as e:
|
||||
code = 500
|
||||
message = _('Fail to lists assigned IP addresses'
|
||||
'%(server_id)s: %(exception)s') % {
|
||||
'server_id': self.server_id,
|
||||
'exception': e}
|
||||
if hasattr(e, 'code'):
|
||||
code = e.code
|
||||
LOG.error(message)
|
||||
return utils.format_nova_error(code, message)
|
@ -1,259 +0,0 @@
|
||||
# Copyright (c) 2015 Huawei Tech. Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pecan
|
||||
from pecan import expose
|
||||
from pecan import rest
|
||||
import re
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
import tricircle.common.client as t_client
|
||||
import tricircle.common.context as t_context
|
||||
from tricircle.common import exceptions
|
||||
from tricircle.common.i18n import _
|
||||
from tricircle.common.i18n import _LE
|
||||
from tricircle.common import utils
|
||||
import tricircle.db.api as db_api
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VolumeController(rest.RestController):
|
||||
|
||||
def __init__(self, project_id, server_id):
|
||||
self.project_id = project_id
|
||||
self.server_id = server_id
|
||||
self.clients = {'top': t_client.Client()}
|
||||
|
||||
def _get_client(self, pod_name='top'):
|
||||
if pod_name not in self.clients:
|
||||
self.clients[pod_name] = t_client.Client(pod_name)
|
||||
return self.clients[pod_name]
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def post(self, **kw):
|
||||
context = t_context.extract_context_from_environ()
|
||||
|
||||
if 'volumeAttachment' not in kw:
|
||||
return utils.format_nova_error(
|
||||
400, _('volumeAttachment is not set'))
|
||||
body = kw['volumeAttachment']
|
||||
if 'volumeId' not in body:
|
||||
return utils.format_nova_error(
|
||||
400, _('Invalid input for field/attribute volumeAttachment'))
|
||||
try:
|
||||
server_mappings = db_api.get_server_mappings_by_top_id(
|
||||
context, self.server_id)
|
||||
volume_mappings = db_api.get_volume_mappings_by_top_id(
|
||||
context, body['volumeId'])
|
||||
except exceptions.ServerMappingsNotFound as e:
|
||||
return utils.format_nova_error(404, e.message)
|
||||
except exceptions.VolumeMappingsNotFound as e:
|
||||
return utils.format_nova_error(404, e.message)
|
||||
except Exception as e:
|
||||
LOG.exception(_LE('Fail to create volume attachment for given'
|
||||
'server %(server_id)s:'
|
||||
'%(exception)s'),
|
||||
{'server_id': self.server_id,
|
||||
'exception': e})
|
||||
return utils.format_nova_error(
|
||||
500, _('Fail to create volume attachment'))
|
||||
|
||||
server_pod_name = server_mappings[0][0]['pod_name']
|
||||
volume_pod_name = volume_mappings[0][0]['pod_name']
|
||||
if server_pod_name != volume_pod_name:
|
||||
LOG.error(_LE('Server %(server)s is in pod %(server_pod)s and '
|
||||
'volume %(volume)s is in pod %(volume_pod)s, which '
|
||||
'are not the same.'),
|
||||
{'server': self.server_id,
|
||||
'server_pod': server_pod_name,
|
||||
'volume': body['volumeId'],
|
||||
'volume_pod': volume_pod_name})
|
||||
return utils.format_nova_error(
|
||||
400, _('Server and volume not in the same pod'))
|
||||
|
||||
device = None
|
||||
if 'device' in body:
|
||||
device = body['device']
|
||||
# this regular expression is copied from nova/block_device.py
|
||||
match = re.match('(^/dev/x{0,1}[a-z]{0,1}d{0,1})([a-z]+)[0-9]*$',
|
||||
device)
|
||||
if not match:
|
||||
return utils.format_nova_error(
|
||||
400, _('The supplied device path (%s) is '
|
||||
'invalid.') % device)
|
||||
|
||||
try:
|
||||
api = self._get_client(server_pod_name).get_native_client(
|
||||
'server_volume', context)
|
||||
resp, body = api.client.post(
|
||||
"/servers/%s/os-volume_attachments" % self.server_id, body=kw)
|
||||
pecan.response.status = resp.status_code
|
||||
if not body:
|
||||
return pecan.response
|
||||
else:
|
||||
return body
|
||||
except Exception as e:
|
||||
code = 500
|
||||
message = _('Fail to create volume attachment for given server '
|
||||
'%(server_id)s: %(exception)s') % {
|
||||
'server_id': self.server_id,
|
||||
'exception': e}
|
||||
if hasattr(e, 'code'):
|
||||
code = e.code
|
||||
LOG.error(message)
|
||||
return utils.format_nova_error(code, message)
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def get_one(self, _id):
|
||||
"""Get the volume attachment identified by the attachment ID.
|
||||
|
||||
:param _id: the ID of volume attachment
|
||||
:returns: the volume attachment
|
||||
"""
|
||||
context = t_context.extract_context_from_environ()
|
||||
|
||||
try:
|
||||
server_mappings = db_api.get_server_mappings_by_top_id(
|
||||
context, self.server_id)
|
||||
except exceptions.ServerMappingsNotFound as e:
|
||||
return utils.format_nova_error(404, e.message)
|
||||
except Exception as e:
|
||||
LOG.exception(_LE('Fail to get volume attachment'
|
||||
'%(attachment_id)s from server %(server_id)s:'
|
||||
'%(exception)s'),
|
||||
{'attachment_id': _id,
|
||||
'server_id': self.server_id,
|
||||
'exception': e})
|
||||
return utils.format_nova_error(
|
||||
500, _('Fail to get volume attachment'))
|
||||
|
||||
server_pod_name = server_mappings[0][0]['pod_name']
|
||||
|
||||
try:
|
||||
api = self._get_client(server_pod_name).get_native_client(
|
||||
'server_volume', context)
|
||||
resp, body = api.client.get(
|
||||
"/servers/%s/os-volume_attachments/%s" %
|
||||
(self.server_id, _id,))
|
||||
pecan.response.status = resp.status_code
|
||||
if not body:
|
||||
return pecan.response
|
||||
else:
|
||||
return body
|
||||
except Exception as e:
|
||||
code = 500
|
||||
message = _('Fail to get volume attachment %(attachment_id)s'
|
||||
'from server %(server_id)s: %(exception)s') % {
|
||||
'attachment_id': _id,
|
||||
'server_id': self.server_id,
|
||||
'exception': e}
|
||||
if hasattr(e, 'code'):
|
||||
code = e.code
|
||||
LOG.error(message)
|
||||
return utils.format_nova_error(code, message)
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def get_all(self):
|
||||
"""GET a list of all volume attachments for a server.
|
||||
|
||||
:returns: a list of volume attachments
|
||||
"""
|
||||
context = t_context.extract_context_from_environ()
|
||||
|
||||
try:
|
||||
server_mappings = db_api.get_server_mappings_by_top_id(
|
||||
context, self.server_id)
|
||||
except exceptions.ServerMappingsNotFound as e:
|
||||
return utils.format_nova_error(404, e.message)
|
||||
except Exception as e:
|
||||
LOG.exception(_LE('Fail to get volume attachments of server'
|
||||
'%(server_id)s: %(exception)s'),
|
||||
{'server_id': self.server_id,
|
||||
'exception': e})
|
||||
return utils.format_nova_error(
|
||||
500, _('Fail to get volume attachments'))
|
||||
|
||||
server_pod_name = server_mappings[0][0]['pod_name']
|
||||
|
||||
try:
|
||||
api = self._get_client(server_pod_name).get_native_client(
|
||||
'server_volume', context)
|
||||
resp, body = api.client.get(
|
||||
"/servers/%s/os-volume_attachments" % self.server_id)
|
||||
pecan.response.status = resp.status_code
|
||||
if not body:
|
||||
return pecan.response
|
||||
else:
|
||||
return body
|
||||
except Exception as e:
|
||||
code = 500
|
||||
message = _('Fail to get volume attachments of server'
|
||||
'%(server_id)s: %(exception)s') % {
|
||||
'server_id': self.server_id,
|
||||
'exception': e}
|
||||
if hasattr(e, 'code'):
|
||||
code = e.code
|
||||
LOG.error(message)
|
||||
return utils.format_nova_error(code, message)
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def delete(self, _id):
|
||||
"""Detach a volume identified by the attachment ID from the given server ID.
|
||||
|
||||
:param _id: the ID of volume attachment
|
||||
"""
|
||||
context = t_context.extract_context_from_environ()
|
||||
|
||||
try:
|
||||
server_mappings = db_api.get_server_mappings_by_top_id(
|
||||
context, self.server_id)
|
||||
except exceptions.ServerMappingsNotFound as e:
|
||||
return utils.format_nova_error(404, e.message)
|
||||
except Exception as e:
|
||||
LOG.exception(_LE('Fail to delete volume attachment'
|
||||
'%(attachment_id)s from server %(server_id)s:'
|
||||
'%(exception)s'),
|
||||
{'attachment_id': _id,
|
||||
'server_id': self.server_id,
|
||||
'exception': e})
|
||||
return utils.format_nova_error(
|
||||
500, _('Fail to delete volume attachment'))
|
||||
|
||||
server_pod_name = server_mappings[0][0]['pod_name']
|
||||
|
||||
try:
|
||||
api = self._get_client(server_pod_name).get_native_client(
|
||||
'server_volume', context)
|
||||
resp, body = api.client.delete(
|
||||
"/servers/%s/os-volume_attachments/%s" %
|
||||
(self.server_id, _id,))
|
||||
pecan.response.status = resp.status_code
|
||||
if not body:
|
||||
return pecan.response
|
||||
else:
|
||||
return body
|
||||
except Exception as e:
|
||||
code = 500
|
||||
message = _('Fail to delete volume attachments %(attachment_id)s'
|
||||
'from server %(server_id)s: %(exception)s') % {
|
||||
'attachment_id': _id,
|
||||
'server_id': self.server_id,
|
||||
'exception': e}
|
||||
if hasattr(e, 'code'):
|
||||
code = e.code
|
||||
LOG.error(message)
|
||||
return utils.format_nova_error(code, message)
|
@ -1,22 +0,0 @@
|
||||
# Copyright 2015 Huawei Technologies Co., Ltd.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import tricircle.nova_apigw.app
|
||||
|
||||
|
||||
def list_opts():
|
||||
return [
|
||||
('DEFAULT', tricircle.nova_apigw.app.common_opts),
|
||||
]
|
@ -79,13 +79,6 @@ iniset $TEMPEST_CONF volume-feature-enabled api_v1 false
|
||||
|
||||
iniset $TEMPEST_CONF validation connect_method fixed
|
||||
|
||||
# Run the Compute Tempest tests
|
||||
# cd $TRICIRCLE_TEMPEST_PLUGIN_DIR
|
||||
# sudo BASE=$BASE ./tempest_compute.sh
|
||||
|
||||
# Run the Volume Tempest tests
|
||||
# cd $TRICIRCLE_TEMPEST_PLUGIN_DIR
|
||||
# sudo BASE=$BASE ./tempest_volume.sh
|
||||
|
||||
# Run the Network Tempest tests
|
||||
cd $TRICIRCLE_TEMPEST_PLUGIN_DIR
|
||||
|
@ -16,6 +16,4 @@
|
||||
|
||||
export localconf=$BASE/new/devstack/local.conf
|
||||
export TRICIRCLE_API_CONF=/etc/tricircle/api.conf
|
||||
export TRICIRCLE_CINDER_APIGW_CONF=/etc/tricircle/cinder_apigw.conf
|
||||
export TRICIRCLE_NOVA_APIGW_CONF=/etc/tricircle/nova_apigw.conf
|
||||
export TRICIRCLE_XJOB_CONF=/etc/tricircle/xjob.conf
|
||||
|
@ -1,601 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
export DEST=$BASE/new
|
||||
export TEMPEST_DIR=$DEST/tempest
|
||||
export TEMPEST_CONF=$TEMPEST_DIR/etc/tempest.conf
|
||||
|
||||
# preparation for the tests
|
||||
cd $TEMPEST_DIR
|
||||
|
||||
# Run functional test
|
||||
echo "Running Tricircle functional test suite..."
|
||||
|
||||
# all test cases with following prefix
|
||||
TESTCASES="(tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_stop_start_server"
|
||||
TESTCASES="$TESTCASES|tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_lock_unlock_server"
|
||||
TESTCASES="$TESTCASES|tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_pause_unpause_server"
|
||||
TESTCASES="$TESTCASES|tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_shelve_unshelve_server"
|
||||
TESTCASES="$TESTCASES|tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_suspend_resume_server"
|
||||
TESTCASES="$TESTCASES|tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_force_delete_nonexistent_server_id"
|
||||
TESTCASES="$TESTCASES|tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_resume_non_existent_server"
|
||||
TESTCASES="$TESTCASES|tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_resume_server_invalid_state"
|
||||
TESTCASES="$TESTCASES|tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_suspend_non_existent_server"
|
||||
TESTCASES="$TESTCASES|tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_suspend_server_invalid_state"
|
||||
TESTCASES="$TESTCASES|tempest.api.compute.admin.test_servers_negative.ServersAdminNegativeTestJSON.test_migrate_non_existent_server"
|
||||
TESTCASES="$TESTCASES|tempest.api.compute.admin.test_servers_negative.ServersAdminNegativeTestJSON.test_migrate_server_invalid_state"
|
||||
TESTCASES="$TESTCASES|tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_hard"
|
||||
TESTCASES="$TESTCASES|tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_soft"
|
||||
TESTCASES="$TESTCASES|tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_revert"
|
||||
TESTCASES="$TESTCASES|tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_confirm"
|
||||
TESTCASES="$TESTCASES|tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_confirm_from_stopped"
|
||||
TESTCASES="$TESTCASES|tempest.api.compute.admin.test_servers.ServersAdminTestJSON.test_reset_state_server"
|
||||
TESTCASES="$TESTCASES|tempest.api.compute.admin.test_servers_negative.ServersAdminNegativeTestJSON.test_reset_state_server_invalid_state"
|
||||
TESTCASES="$TESTCASES|tempest.api.compute.admin.test_servers_negative.ServersAdminNegativeTestJSON.test_reset_state_server_invalid_type"
|
||||
TESTCASES="$TESTCASES|tempest.api.compute.admin.test_servers_negative.ServersAdminNegativeTestJSON.test_reset_state_server_nonexistent_server"
|
||||
TESTCASES="$TESTCASES|tempest.api.compute.volumes.test_attach_volume.AttachVolumeShelveTestJSON.test_detach_volume_shelved_or_offload_server"
|
||||
TESTCASES="$TESTCASES|tempest.api.compute.volumes.test_attach_volume.AttachVolumeTestJSON"
|
||||
TESTCASES="$TESTCASES|tempest.api.compute.volumes.test_volumes_negative.VolumesNegativeTest.test_get_volume_without_passing_volume_id"
|
||||
# add new test cases like following line for volume_type test
|
||||
# TESTCASES="$TESTCASES|tempest.api.volume.admin.test_volumes_type"
|
||||
TESTCASES="$TESTCASES)"
|
||||
ostestr --regex $TESTCASES
|
||||
|
||||
# --------------------- IMPORTANT begin -------------------- #
|
||||
# all following test cases are from Cinder tempest test cases,
|
||||
# the purpose to list them here is to check which test cases
|
||||
# are still not covered and tested in Cinder-APIGW.
|
||||
#
|
||||
# Those test cases which have been covered by ostestr running
|
||||
# above should be marked with **DONE** after the "#".
|
||||
# please leave the length of each line > 80 characters in order
|
||||
# to keep one test case one line.
|
||||
#
|
||||
# When you add new feature to Cinder-APIGW, please select
|
||||
# proper test cases to test against the feature, and marked
|
||||
# these test cases with **DONE** after the "#". For those test
|
||||
# cases which are not needed to be tested in Cinder-APIGW, for
|
||||
# example V1(which has been deprecated) should be marked with
|
||||
# **SKIP** after "#"
|
||||
#
|
||||
# The test cases running through ostestr could be filtered
|
||||
# by regex expression, for example, for Cinder volume type
|
||||
# releated test cases could be executed by a single clause:
|
||||
# ostestr --regex tempest.api.volume.admin.test_volume_types
|
||||
# --------------------- IMPORTANT end -----------------------#
|
||||
|
||||
|
||||
|
||||
# tempest.api.compute.admin.test_agents.AgentsAdminTestJSON.test_create_agent[id-1fc6bdc8-0b6d-4cc7-9f30-9b04fabe5b90]
|
||||
# tempest.api.compute.admin.test_agents.AgentsAdminTestJSON.test_delete_agent[id-470e0b89-386f-407b-91fd-819737d0b335]
|
||||
# tempest.api.compute.admin.test_agents.AgentsAdminTestJSON.test_list_agents[id-6a326c69-654b-438a-80a3-34bcc454e138]
|
||||
# tempest.api.compute.admin.test_agents.AgentsAdminTestJSON.test_list_agents_with_filter[id-eabadde4-3cd7-4ec4-a4b5-5a936d2d4408]
|
||||
# tempest.api.compute.admin.test_agents.AgentsAdminTestJSON.test_update_agent[id-dc9ffd51-1c50-4f0e-a820-ae6d2a568a9e]
|
||||
# tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON.test_aggregate_add_host_create_server_with_az[id-96be03c7-570d-409c-90f8-e4db3c646996]
|
||||
# tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON.test_aggregate_add_host_get_details[id-eeef473c-7c52-494d-9f09-2ed7fc8fc036]
|
||||
# tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON.test_aggregate_add_host_list[id-7f6a1cc5-2446-4cdb-9baa-b6ae0a919b72]
|
||||
# tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON.test_aggregate_add_remove_host[id-c8e85064-e79b-4906-9931-c11c24294d02]
|
||||
# tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON.test_aggregate_create_delete[id-0d148aa3-d54c-4317-aa8d-42040a475e20]
|
||||
# tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON.test_aggregate_create_delete_with_az[id-5873a6f8-671a-43ff-8838-7ce430bb6d0b]
|
||||
# tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON.test_aggregate_create_update_metadata_get_details[id-36ec92ca-7a73-43bc-b920-7531809e8540]
|
||||
# tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON.test_aggregate_create_update_with_az[id-4d2b2004-40fa-40a1-aab2-66f4dab81beb]
|
||||
# tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON.test_aggregate_create_verify_entry_in_list[id-68089c38-04b1-4758-bdf0-cf0daec4defd]
|
||||
# tempest.api.compute.admin.test_aggregates_negative.AggregatesAdminNegativeTestJSON.test_aggregate_add_existent_host[id-19dd44e1-c435-4ee1-a402-88c4f90b5950,negative]
|
||||
# tempest.api.compute.admin.test_aggregates_negative.AggregatesAdminNegativeTestJSON.test_aggregate_add_host_as_user[id-7324c334-bd13-4c93-8521-5877322c3d51,negative]
|
||||
# tempest.api.compute.admin.test_aggregates_negative.AggregatesAdminNegativeTestJSON.test_aggregate_add_non_exist_host[id-0ef07828-12b4-45ba-87cc-41425faf5711,negative]
|
||||
# tempest.api.compute.admin.test_aggregates_negative.AggregatesAdminNegativeTestJSON.test_aggregate_create_aggregate_name_length_exceeds_255[id-4c194563-543b-4e70-a719-557bbe947fac,negative]
|
||||
# tempest.api.compute.admin.test_aggregates_negative.AggregatesAdminNegativeTestJSON.test_aggregate_create_aggregate_name_length_less_than_1[id-3b8a1929-3793-4e92-bcb4-dfa572ee6c1d,negative]
|
||||
# tempest.api.compute.admin.test_aggregates_negative.AggregatesAdminNegativeTestJSON.test_aggregate_create_as_user[id-86a1cb14-da37-4a70-b056-903fd56dfe29,negative]
|
||||
# tempest.api.compute.admin.test_aggregates_negative.AggregatesAdminNegativeTestJSON.test_aggregate_create_with_existent_aggregate_name[id-9c23a291-b0b1-487b-b464-132e061151b3,negative]
|
||||
# tempest.api.compute.admin.test_aggregates_negative.AggregatesAdminNegativeTestJSON.test_aggregate_delete_as_user[id-cd6de795-c15d-45f1-8d9e-813c6bb72a3d,negative]
|
||||
# tempest.api.compute.admin.test_aggregates_negative.AggregatesAdminNegativeTestJSON.test_aggregate_delete_with_invalid_id[id-c74f4bf1-4708-4ff2-95a0-f49eaca951bd,negative]
|
||||
# tempest.api.compute.admin.test_aggregates_negative.AggregatesAdminNegativeTestJSON.test_aggregate_get_details_as_user[id-557cad12-34c9-4ff4-95f0-22f0dfbaf7dc,negative]
|
||||
# tempest.api.compute.admin.test_aggregates_negative.AggregatesAdminNegativeTestJSON.test_aggregate_get_details_with_invalid_id[id-3c916244-2c46-49a4-9b55-b20bb0ae512c,negative]
|
||||
# tempest.api.compute.admin.test_aggregates_negative.AggregatesAdminNegativeTestJSON.test_aggregate_list_as_user[id-b7d475a6-5dcd-4ff4-b70a-cd9de66a6672,negative]
|
||||
# tempest.api.compute.admin.test_aggregates_negative.AggregatesAdminNegativeTestJSON.test_aggregate_remove_host_as_user[id-7a53af20-137a-4e44-a4ae-e19260e626d9,negative]
|
||||
# tempest.api.compute.admin.test_aggregates_negative.AggregatesAdminNegativeTestJSON.test_aggregate_remove_nonexistent_host[id-95d6a6fa-8da9-4426-84d0-eec0329f2e4d,negative]
|
||||
# tempest.api.compute.admin.test_availability_zone.AZAdminV2TestJSON.test_get_availability_zone_list[id-d3431479-8a09-4f76-aa2d-26dc580cb27c]
|
||||
# tempest.api.compute.admin.test_availability_zone.AZAdminV2TestJSON.test_get_availability_zone_list_detail[id-ef726c58-530f-44c2-968c-c7bed22d5b8c]
|
||||
# tempest.api.compute.admin.test_availability_zone_negative.AZAdminNegativeTestJSON.test_get_availability_zone_list_detail_with_non_admin_user[id-bf34dca2-fdc3-4073-9c02-7648d9eae0d7,negative]
|
||||
# tempest.api.compute.admin.test_baremetal_nodes.BaremetalNodesAdminTestJSON.test_list_get_baremetal_nodes[baremetal,id-e475aa6e-416d-4fa4-b3af-28d5e84250fb]
|
||||
# tempest.api.compute.admin.test_fixed_ips.FixedIPsTestJson.test_list_fixed_ip_details[id-16b7d848-2f7c-4709-85a3-2dfb4576cc52,network]
|
||||
# tempest.api.compute.admin.test_fixed_ips.FixedIPsTestJson.test_set_reserve[id-5485077b-7e46-4cec-b402-91dc3173433b,network]
|
||||
# tempest.api.compute.admin.test_fixed_ips.FixedIPsTestJson.test_set_unreserve[id-7476e322-b9ff-4710-bf82-49d51bac6e2e,network]
|
||||
# tempest.api.compute.admin.test_fixed_ips_negative.FixedIPsNegativeTestJson.test_fixed_ip_with_invalid_action[id-fd26ef50-f135-4232-9d32-281aab3f9176,negative,network]
|
||||
# tempest.api.compute.admin.test_fixed_ips_negative.FixedIPsNegativeTestJson.test_list_fixed_ip_details_with_non_admin_user[id-9f17f47d-daad-4adc-986e-12370c93e407,negative,network]
|
||||
# tempest.api.compute.admin.test_fixed_ips_negative.FixedIPsNegativeTestJson.test_set_reserve_with_invalid_ip[id-f51cf464-7fc5-4352-bc3e-e75cfa2cb717,negative,network]
|
||||
# tempest.api.compute.admin.test_fixed_ips_negative.FixedIPsNegativeTestJson.test_set_reserve_with_non_admin_user[id-ce60042c-fa60-4836-8d43-1c8e3359dc47,negative,network]
|
||||
# tempest.api.compute.admin.test_fixed_ips_negative.FixedIPsNegativeTestJson.test_set_unreserve_with_non_admin_user[id-f1f7a35b-0390-48c5-9803-5f27461439db,negative,network]
|
||||
# tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_create_flavor_using_string_ram[id-3b541a2e-2ac2-4b42-8b8d-ba6e22fcd4da]
|
||||
# tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_create_flavor_verify_entry_in_list_details[id-8261d7b0-be58-43ec-a2e5-300573c3f6c5]
|
||||
# tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_create_flavor_with_int_id[id-8b4330e1-12c4-4554-9390-e6639971f086]
|
||||
# tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_create_flavor_with_none_id[id-f83fe669-6758-448a-a85e-32d351f36fe0]
|
||||
# tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_create_flavor_with_uuid_id[id-94c9bb4e-2c2a-4f3c-bb1f-5f0daf918e6d]
|
||||
# tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_create_list_flavor_without_extra_data[id-63dc64e6-2e79-4fdf-868f-85500d308d66]
|
||||
# tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_create_server_with_non_public_flavor[id-bcc418ef-799b-47cc-baa1-ce01368b8987]
|
||||
# tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_is_public_string_variations[id-fb9cbde6-3a0e-41f2-a983-bdb0a823c44e]
|
||||
# tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_list_non_public_flavor[id-be6cc18c-7c5d-48c0-ac16-17eaf03c54eb]
|
||||
# tempest.api.compute.admin.test_flavors.FlavorsAdminTestJSON.test_list_public_flavor_with_other_user[id-b345b196-bfbd-4231-8ac1-6d7fe15ff3a3]
|
||||
# tempest.api.compute.admin.test_flavors_access.FlavorsAccessTestJSON.test_flavor_access_add_remove[id-59e622f6-bdf6-45e3-8ba8-fedad905a6b4]
|
||||
# tempest.api.compute.admin.test_flavors_access.FlavorsAccessTestJSON.test_flavor_access_list_with_private_flavor[id-ea2c2211-29fa-4db9-97c3-906d36fad3e0]
|
||||
# tempest.api.compute.admin.test_flavors_access_negative.FlavorsAccessNegativeTestJSON.test_add_flavor_access_duplicate[id-f3592cc0-0306-483c-b210-9a7b5346eddc,negative]
|
||||
# tempest.api.compute.admin.test_flavors_access_negative.FlavorsAccessNegativeTestJSON.test_flavor_access_list_with_public_flavor[id-0621c53e-d45d-40e7-951d-43e5e257b272,negative]
|
||||
# tempest.api.compute.admin.test_flavors_access_negative.FlavorsAccessNegativeTestJSON.test_flavor_non_admin_add[id-41eaaade-6d37-4f28-9c74-f21b46ca67bd,negative]
|
||||
# tempest.api.compute.admin.test_flavors_access_negative.FlavorsAccessNegativeTestJSON.test_flavor_non_admin_remove[id-073e79a6-c311-4525-82dc-6083d919cb3a,negative]
|
||||
# tempest.api.compute.admin.test_flavors_access_negative.FlavorsAccessNegativeTestJSON.test_remove_flavor_access_not_found[id-1f710927-3bc7-4381-9f82-0ca6e42644b7,negative]
|
||||
# tempest.api.compute.admin.test_flavors_extra_specs.FlavorsExtraSpecsTestJSON.test_flavor_non_admin_get_all_keys[id-a99dad88-ae1c-4fba-aeb4-32f898218bd0]
|
||||
# tempest.api.compute.admin.test_flavors_extra_specs.FlavorsExtraSpecsTestJSON.test_flavor_non_admin_get_specific_key[id-12805a7f-39a3-4042-b989-701d5cad9c90]
|
||||
# tempest.api.compute.admin.test_flavors_extra_specs.FlavorsExtraSpecsTestJSON.test_flavor_set_get_update_show_unset_keys[id-0b2f9d4b-1ca2-4b99-bb40-165d4bb94208]
|
||||
# tempest.api.compute.admin.test_flavors_extra_specs_negative.FlavorsExtraSpecsNegativeTestJSON.test_flavor_get_nonexistent_key[id-329a7be3-54b2-48be-8052-bf2ce4afd898,negative]
|
||||
# tempest.api.compute.admin.test_flavors_extra_specs_negative.FlavorsExtraSpecsNegativeTestJSON.test_flavor_non_admin_set_keys[id-a00a3b81-5641-45a8-ab2b-4a8ec41e1d7d,negative]
|
||||
# tempest.api.compute.admin.test_flavors_extra_specs_negative.FlavorsExtraSpecsNegativeTestJSON.test_flavor_non_admin_unset_keys[id-28f12249-27c7-44c1-8810-1f382f316b11,negative]
|
||||
# tempest.api.compute.admin.test_flavors_extra_specs_negative.FlavorsExtraSpecsNegativeTestJSON.test_flavor_non_admin_update_specific_key[id-1ebf4ef8-759e-48fe-a801-d451d80476fb,negative]
|
||||
# tempest.api.compute.admin.test_flavors_extra_specs_negative.FlavorsExtraSpecsNegativeTestJSON.test_flavor_unset_nonexistent_key[id-440b9f3f-3c7f-4293-a106-0ceda350f8de,negative]
|
||||
# tempest.api.compute.admin.test_flavors_extra_specs_negative.FlavorsExtraSpecsNegativeTestJSON.test_flavor_update_mismatch_key[id-25b822b8-9f49-44f6-80de-d99f0482e5cb,negative]
|
||||
# tempest.api.compute.admin.test_flavors_extra_specs_negative.FlavorsExtraSpecsNegativeTestJSON.test_flavor_update_more_key[id-f5889590-bf66-41cc-b4b1-6e6370cfd93f,negative]
|
||||
# tempest.api.compute.admin.test_floating_ips_bulk.FloatingIPsBulkAdminTestJSON.test_create_list_delete_floating_ips_bulk[id-2c8f145f-8012-4cb8-ac7e-95a587f0e4ab,network]
|
||||
# tempest.api.compute.admin.test_hosts.HostsAdminTestJSON.test_list_hosts[id-9bfaf98d-e2cb-44b0-a07e-2558b2821e4f]
|
||||
# tempest.api.compute.admin.test_hosts.HostsAdminTestJSON.test_list_hosts_with_a_blank_zone[id-9af3c171-fbf4-4150-a624-22109733c2a6]
|
||||
# tempest.api.compute.admin.test_hosts.HostsAdminTestJSON.test_list_hosts_with_nonexistent_zone[id-c6ddbadb-c94e-4500-b12f-8ffc43843ff8]
|
||||
# tempest.api.compute.admin.test_hosts.HostsAdminTestJSON.test_list_hosts_with_zone[id-5dc06f5b-d887-47a2-bb2a-67762ef3c6de]
|
||||
# tempest.api.compute.admin.test_hosts.HostsAdminTestJSON.test_show_host_detail[id-38adbb12-aee2-4498-8aec-329c72423aa4]
|
||||
# tempest.api.compute.admin.test_hosts_negative.HostsAdminNegativeTestJSON.test_list_hosts_with_non_admin_user[id-dd032027-0210-4d9c-860e-69b1b8deed5f,negative]
|
||||
# tempest.api.compute.admin.test_hosts_negative.HostsAdminNegativeTestJSON.test_reboot_host_with_non_admin_user[id-02d79bb9-eb57-4612-abf6-2cb38897d2f8,negative]
|
||||
# tempest.api.compute.admin.test_hosts_negative.HostsAdminNegativeTestJSON.test_reboot_nonexistent_host[id-f86bfd7b-0b13-4849-ae29-0322e83ee58b,negative]
|
||||
# tempest.api.compute.admin.test_hosts_negative.HostsAdminNegativeTestJSON.test_show_host_detail_with_non_admin_user[id-19ebe09c-bfd4-4b7c-81a2-e2e0710f59cc,negative]
|
||||
# tempest.api.compute.admin.test_hosts_negative.HostsAdminNegativeTestJSON.test_show_host_detail_with_nonexistent_hostname[id-e75b0a1a-041f-47a1-8b4a-b72a6ff36d3f,negative]
|
||||
# tempest.api.compute.admin.test_hosts_negative.HostsAdminNegativeTestJSON.test_shutdown_host_with_non_admin_user[id-a803529c-7e3f-4d3c-a7d6-8e1c203d27f6,negative]
|
||||
# tempest.api.compute.admin.test_hosts_negative.HostsAdminNegativeTestJSON.test_shutdown_nonexistent_host[id-9e637444-29cf-4244-88c8-831ae82c31b6,negative]
|
||||
# tempest.api.compute.admin.test_hosts_negative.HostsAdminNegativeTestJSON.test_startup_host_with_non_admin_user[id-9f4ebb7e-b2ae-4e5b-a38f-0fd1bb0ddfca,negative]
|
||||
# tempest.api.compute.admin.test_hosts_negative.HostsAdminNegativeTestJSON.test_startup_nonexistent_host[id-0d981ac3-4320-4898-b674-82b61fbb60e4,negative]
|
||||
# tempest.api.compute.admin.test_hosts_negative.HostsAdminNegativeTestJSON.test_update_host_with_invalid_maintenance_mode[id-ab1e230e-5e22-41a9-8699-82b9947915d4,negative]
|
||||
# tempest.api.compute.admin.test_hosts_negative.HostsAdminNegativeTestJSON.test_update_host_with_invalid_status[id-fbe2bf3e-3246-4a95-a59f-94e4e298ec77,negative]
|
||||
# tempest.api.compute.admin.test_hosts_negative.HostsAdminNegativeTestJSON.test_update_host_with_non_admin_user[id-e40c72b1-0239-4ed6-ba21-81a184df1f7c,negative]
|
||||
# tempest.api.compute.admin.test_hosts_negative.HostsAdminNegativeTestJSON.test_update_host_without_param[id-0cd85f75-6992-4a4a-b1bd-d11e37fd0eee,negative]
|
||||
# tempest.api.compute.admin.test_hosts_negative.HostsAdminNegativeTestJSON.test_update_nonexistent_host[id-23c92146-2100-4d68-b2d6-c7ade970c9c1,negative]
|
||||
# tempest.api.compute.admin.test_hypervisor.HypervisorAdminTestJSON.test_get_hypervisor_list[id-7f0ceacd-c64d-4e96-b8ee-d02943142cc5]
|
||||
# tempest.api.compute.admin.test_hypervisor.HypervisorAdminTestJSON.test_get_hypervisor_list_details[id-1e7fdac2-b672-4ad1-97a4-bad0e3030118]
|
||||
# tempest.api.compute.admin.test_hypervisor.HypervisorAdminTestJSON.test_get_hypervisor_show_details[id-94ff9eae-a183-428e-9cdb-79fde71211cc]
|
||||
# tempest.api.compute.admin.test_hypervisor.HypervisorAdminTestJSON.test_get_hypervisor_show_servers[id-e81bba3f-6215-4e39-a286-d52d2f906862]
|
||||
# tempest.api.compute.admin.test_hypervisor.HypervisorAdminTestJSON.test_get_hypervisor_stats[id-797e4f28-b6e0-454d-a548-80cc77c00816]
|
||||
# tempest.api.compute.admin.test_hypervisor.HypervisorAdminTestJSON.test_get_hypervisor_uptime[id-91a50d7d-1c2b-4f24-b55a-a1fe20efca70]
|
||||
# tempest.api.compute.admin.test_hypervisor.HypervisorAdminTestJSON.test_search_hypervisor[id-d7e1805b-3b14-4a3b-b6fd-50ec6d9f361f]
|
||||
# tempest.api.compute.admin.test_hypervisor_negative.HypervisorAdminNegativeTestJSON.test_get_hypervisor_list_details_with_non_admin_user[id-dc02db05-e801-4c5f-bc8e-d915290ab345,negative]
|
||||
# tempest.api.compute.admin.test_hypervisor_negative.HypervisorAdminNegativeTestJSON.test_get_hypervisor_list_with_non_admin_user[id-51b3d536-9b14-409c-9bce-c6f7c794994e,negative]
|
||||
# tempest.api.compute.admin.test_hypervisor_negative.HypervisorAdminNegativeTestJSON.test_get_hypervisor_stats_with_non_admin_user[id-e2b061bb-13f9-40d8-9d6e-d5bf17595849,negative]
|
||||
# tempest.api.compute.admin.test_hypervisor_negative.HypervisorAdminNegativeTestJSON.test_get_hypervisor_uptime_with_non_admin_user[id-6c3461f9-c04c-4e2a-bebb-71dc9cb47df2,negative]
|
||||
# tempest.api.compute.admin.test_hypervisor_negative.HypervisorAdminNegativeTestJSON.test_get_nonexistent_hypervisor_uptime[id-f60aa680-9a3a-4c7d-90e1-fae3a4891303,negative]
|
||||
# tempest.api.compute.admin.test_hypervisor_negative.HypervisorAdminNegativeTestJSON.test_search_hypervisor_with_non_admin_user[id-5b6a6c79-5dc1-4fa5-9c58-9c8085948e74,negative]
|
||||
# tempest.api.compute.admin.test_hypervisor_negative.HypervisorAdminNegativeTestJSON.test_search_nonexistent_hypervisor[id-19a45cc1-1000-4055-b6d2-28e8b2ec4faa,negative]
|
||||
# tempest.api.compute.admin.test_hypervisor_negative.HypervisorAdminNegativeTestJSON.test_show_hypervisor_with_non_admin_user[id-51e663d0-6b89-4817-a465-20aca0667d03,negative]
|
||||
# tempest.api.compute.admin.test_hypervisor_negative.HypervisorAdminNegativeTestJSON.test_show_nonexistent_hypervisor[id-c136086a-0f67-4b2b-bc61-8482bd68989f,negative]
|
||||
# tempest.api.compute.admin.test_hypervisor_negative.HypervisorAdminNegativeTestJSON.test_show_servers_with_non_admin_user[id-2a0a3938-832e-4859-95bf-1c57c236b924,negative]
|
||||
# tempest.api.compute.admin.test_hypervisor_negative.HypervisorAdminNegativeTestJSON.test_show_servers_with_nonexistent_hypervisor[id-02463d69-0ace-4d33-a4a8-93d7883a2bba,negative]
|
||||
# tempest.api.compute.admin.test_instance_usage_audit_log.InstanceUsageAuditLogTestJSON.test_get_instance_usage_audit_log[id-6e40459d-7c5f-400b-9e83-449fbc8e7feb]
|
||||
# tempest.api.compute.admin.test_instance_usage_audit_log.InstanceUsageAuditLogTestJSON.test_list_instance_usage_audit_logs[id-25319919-33d9-424f-9f99-2c203ee48b9d]
|
||||
# tempest.api.compute.admin.test_instance_usage_audit_log_negative.InstanceUsageAuditLogNegativeTestJSON.test_get_instance_usage_audit_logs_with_invalid_time[id-9b952047-3641-41c7-ba91-a809fc5974c8,negative]
|
||||
# tempest.api.compute.admin.test_instance_usage_audit_log_negative.InstanceUsageAuditLogNegativeTestJSON.test_instance_usage_audit_logs_with_nonadmin_user[id-a9d33178-d2c9-4131-ad3b-f4ca8d0308a2,negative]
|
||||
# tempest.api.compute.admin.test_keypairs_v210.KeyPairsV210TestJSON.test_admin_manage_keypairs_for_other_users[id-3c8484af-cfb3-48f6-b8ba-d5d58bbf3eac]
|
||||
# tempest.api.compute.admin.test_live_migration.LiveBlockMigrationTestJSON.test_iscsi_volume[id-e19c0cc6-6720-4ed8-be83-b6603ed5c812]
|
||||
# tempest.api.compute.admin.test_live_migration.LiveBlockMigrationTestJSON.test_live_block_migration[id-1dce86b8-eb04-4c03-a9d8-9c1dc3ee0c7b]
|
||||
# tempest.api.compute.admin.test_live_migration.LiveBlockMigrationTestJSON.test_live_block_migration_paused[id-1e107f21-61b2-4988-8f22-b196e938ab88]
|
||||
# tempest.api.compute.admin.test_live_migration.LiveBlockMigrationTestJSON.test_volume_backed_live_migration[id-5071cf17-3004-4257-ae61-73a84e28badd,volume]
|
||||
# tempest.api.compute.admin.test_migrations.MigrationsAdminTest.test_list_migrations[id-75c0b83d-72a0-4cf8-a153-631e83e7d53f]
|
||||
# tempest.api.compute.admin.test_migrations.MigrationsAdminTest.test_list_migrations_in_flavor_resize_situation[id-1b512062-8093-438e-b47a-37d2f597cd64]
|
||||
# tempest.api.compute.admin.test_migrations.MigrationsAdminTest.test_resize_server_revert_deleted_flavor[id-33f1fec3-ba18-4470-8e4e-1d888e7c3593]
|
||||
# tempest.api.compute.admin.test_networks.NetworksTest.test_get_network[id-d206d211-8912-486f-86e2-a9d090d1f416]
|
||||
# tempest.api.compute.admin.test_networks.NetworksTest.test_list_all_networks[id-df3d1046-6fa5-4b2c-ad0c-cfa46a351cb9]
|
||||
# tempest.api.compute.admin.test_quotas.QuotaClassesAdminTestJSON.test_update_default_quotas[id-7932ab0f-5136-4075-b201-c0e2338df51a]
|
||||
# tempest.api.compute.admin.test_quotas.QuotasAdminTestJSON.test_delete_quota[id-389d04f0-3a41-405f-9317-e5f86e3c44f0]
|
||||
# tempest.api.compute.admin.test_quotas.QuotasAdminTestJSON.test_get_default_quotas[id-3b0a7c8f-cf58-46b8-a60c-715a32a8ba7d]
|
||||
# tempest.api.compute.admin.test_quotas.QuotasAdminTestJSON.test_get_updated_quotas[id-ce9e0815-8091-4abd-8345-7fe5b85faa1d]
|
||||
# tempest.api.compute.admin.test_quotas.QuotasAdminTestJSON.test_update_all_quota_resources_for_tenant[id-55fbe2bf-21a9-435b-bbd2-4162b0ed799a]
|
||||
# tempest.api.compute.admin.test_quotas_negative.QuotasAdminNegativeTestJSON.test_create_server_when_cpu_quota_is_full[id-91058876-9947-4807-9f22-f6eb17140d9b,negative]
|
||||
# tempest.api.compute.admin.test_quotas_negative.QuotasAdminNegativeTestJSON.test_create_server_when_instances_quota_is_full[id-7c6be468-0274-449a-81c3-ac1c32ee0161,negative]
|
||||
# tempest.api.compute.admin.test_quotas_negative.QuotasAdminNegativeTestJSON.test_create_server_when_memory_quota_is_full[id-6fdd7012-584d-4327-a61c-49122e0d5864,negative]
|
||||
# tempest.api.compute.admin.test_quotas_negative.QuotasAdminNegativeTestJSON.test_security_groups_exceed_limit[id-7c6c8f3b-2bf6-4918-b240-57b136a66aa0,network]
|
||||
# tempest.api.compute.admin.test_quotas_negative.QuotasAdminNegativeTestJSON.test_security_groups_rules_exceed_limit[id-6e9f436d-f1ed-4f8e-a493-7275dfaa4b4d,negative,network]
|
||||
# tempest.api.compute.admin.test_quotas_negative.QuotasAdminNegativeTestJSON.test_update_quota_normal_user[id-733abfe8-166e-47bb-8363-23dbd7ff3476,negative]
|
||||
# tempest.api.compute.admin.test_security_group_default_rules.SecurityGroupDefaultRulesTest.test_create_delete_security_group_default_rules[id-6d880615-eec3-4d29-97c5-7a074dde239d]
|
||||
# tempest.api.compute.admin.test_security_group_default_rules.SecurityGroupDefaultRulesTest.test_create_security_group_default_rule_with_blank_cidr[id-29f2d218-69b0-4a95-8f3d-6bd0ef732b3a]
|
||||
# tempest.api.compute.admin.test_security_group_default_rules.SecurityGroupDefaultRulesTest.test_create_security_group_default_rule_without_cidr[id-4d752e0a-33a1-4c3a-b498-ff8667ca22e5]
|
||||
# tempest.api.compute.admin.test_security_group_default_rules.SecurityGroupDefaultRulesTest.test_default_security_group_default_rule_show[id-15cbb349-86b4-4f71-a048-04b7ef3f150b]
|
||||
# tempest.api.compute.admin.test_security_group_default_rules.SecurityGroupDefaultRulesTest.test_security_group_default_rules_list[id-6e6de55e-9146-4ae0-89f2-3569586e0b9b]
|
||||
# tempest.api.compute.admin.test_security_groups.SecurityGroupsTestAdminJSON.test_list_security_groups_list_all_tenants_filter[id-49667619-5af9-4c63-ab5d-2cfdd1c8f7f1,network]
|
||||
# tempest.api.compute.admin.test_servers.ServersAdminTestJSON.test_create_server_with_scheduling_hint[id-fdcd9b33-0903-4e00-a1f7-b5f6543068d6]
|
||||
# tempest.api.compute.admin.test_servers.ServersAdminTestJSON.test_get_server_diagnostics_by_admin[id-31ff3486-b8a0-4f56-a6c0-aab460531db3]
|
||||
# tempest.api.compute.admin.test_servers.ServersAdminTestJSON.test_list_servers_by_admin[id-51717b38-bdc1-458b-b636-1cf82d99f62f]
|
||||
# tempest.api.compute.admin.test_servers.ServersAdminTestJSON.test_list_servers_by_admin_with_all_tenants[id-9f5579ae-19b4-4985-a091-2a5d56106580]
|
||||
# tempest.api.compute.admin.test_servers.ServersAdminTestJSON.test_list_servers_by_admin_with_specified_tenant[id-7e5d6b8f-454a-4ba1-8ae2-da857af8338b]
|
||||
# tempest.api.compute.admin.test_servers.ServersAdminTestJSON.test_list_servers_filter_by_error_status[id-06f960bb-15bb-48dc-873d-f96e89be7870]
|
||||
# tempest.api.compute.admin.test_servers.ServersAdminTestJSON.test_list_servers_filter_by_exist_host[id-86c7a8f7-50cf-43a9-9bac-5b985317134f]
|
||||
# tempest.api.compute.admin.test_servers.ServersAdminTestJSON.test_rebuild_server_in_error_state[id-682cb127-e5bb-4f53-87ce-cb9003604442]
|
||||
# tempest.api.compute.admin.test_servers.ServersAdminTestJSON.test_reset_network_inject_network_info[id-7a1323b4-a6a2-497a-96cb-76c07b945c71]
|
||||
# **DONE** tempest.api.compute.admin.test_servers.ServersAdminTestJSON.test_reset_state_server[id-ee8ae470-db70-474d-b752-690b7892cab1]
|
||||
# tempest.api.compute.admin.test_servers_negative.ServersAdminNegativeTestJSON.test_get_server_diagnostics_by_non_admin[id-e84e2234-60d2-42fa-8b30-e2d3049724ac,negative]
|
||||
# **DONE** tempest.api.compute.admin.test_servers_negative.ServersAdminNegativeTestJSON.test_migrate_non_existent_server[id-46a4e1ca-87ae-4d28-987a-1b6b136a0221,negative]
|
||||
# **DONE** tempest.api.compute.admin.test_servers_negative.ServersAdminNegativeTestJSON.test_migrate_server_invalid_state[id-b0b17f83-d14e-4fc4-8f31-bcc9f3cfa629,negative]
|
||||
# **DONE** tempest.api.compute.admin.test_servers_negative.ServersAdminNegativeTestJSON.test_reset_state_server_invalid_state[id-b0b4d8af-1256-41ef-9ee7-25f1c19dde80,negative]
|
||||
# **DONE** tempest.api.compute.admin.test_servers_negative.ServersAdminNegativeTestJSON.test_reset_state_server_invalid_type[id-4cdcc984-fab0-4577-9a9d-6d558527ee9d,negative]
|
||||
# **DONE** tempest.api.compute.admin.test_servers_negative.ServersAdminNegativeTestJSON.test_reset_state_server_nonexistent_server[id-e741298b-8df2-46f0-81cb-8f814ff2504c,negative]
|
||||
# tempest.api.compute.admin.test_servers_negative.ServersAdminNegativeTestJSON.test_resize_server_using_overlimit_ram[id-28dcec23-f807-49da-822c-56a92ea3c687,negative]
|
||||
# tempest.api.compute.admin.test_servers_negative.ServersAdminNegativeTestJSON.test_resize_server_using_overlimit_vcpus[id-7368a427-2f26-4ad9-9ba9-911a0ec2b0db,negative]
|
||||
# tempest.api.compute.admin.test_servers_on_multinodes.ServersOnMultiNodesTest.test_create_servers_on_different_hosts[id-cc7ca884-6e3e-42a3-a92f-c522fcf25e8e]
|
||||
# tempest.api.compute.admin.test_servers_on_multinodes.ServersOnMultiNodesTest.test_create_servers_on_different_hosts_with_list_of_servers[id-7869cc84-d661-4e14-9f00-c18cdc89cf57]
|
||||
# tempest.api.compute.admin.test_servers_on_multinodes.ServersOnMultiNodesTest.test_create_servers_on_same_host[id-26a9d5df-6890-45f2-abc4-a659290cb130]
|
||||
# tempest.api.compute.admin.test_services.ServicesAdminTestJSON.test_get_service_by_host_name[id-affb42d5-5b4b-43c8-8b0b-6dca054abcca]
|
||||
# tempest.api.compute.admin.test_services.ServicesAdminTestJSON.test_get_service_by_service_and_host_name[id-39397f6f-37b8-4234-8671-281e44c74025]
|
||||
# tempest.api.compute.admin.test_services.ServicesAdminTestJSON.test_get_service_by_service_binary_name[id-f345b1ec-bc6e-4c38-a527-3ca2bc00bef5]
|
||||
# tempest.api.compute.admin.test_services.ServicesAdminTestJSON.test_list_services[id-5be41ef4-53d1-41cc-8839-5c2a48a1b283]
|
||||
# tempest.api.compute.admin.test_services_negative.ServicesAdminNegativeTestJSON.test_get_service_by_invalid_params[id-d0884a69-f693-4e79-a9af-232d15643bf7,negative]
|
||||
# tempest.api.compute.admin.test_services_negative.ServicesAdminNegativeTestJSON.test_get_service_by_invalid_service_and_valid_host[id-1e966d4a-226e-47c7-b601-0b18a27add54,negative]
|
||||
# tempest.api.compute.admin.test_services_negative.ServicesAdminNegativeTestJSON.test_get_service_with_valid_service_and_invalid_host[id-64e7e7fb-69e8-4cb6-a71d-8d5eb0c98655,negative]
|
||||
# tempest.api.compute.admin.test_services_negative.ServicesAdminNegativeTestJSON.test_list_services_with_non_admin_user[id-1126d1f8-266e-485f-a687-adc547492646,negative]
|
||||
# tempest.api.compute.admin.test_simple_tenant_usage.TenantUsagesTestJSON.test_get_usage_tenant[id-94135049-a4c5-4934-ad39-08fa7da4f22e]
|
||||
# tempest.api.compute.admin.test_simple_tenant_usage.TenantUsagesTestJSON.test_get_usage_tenant_with_non_admin_user[id-9d00a412-b40e-4fd9-8eba-97b496316116]
|
||||
# tempest.api.compute.admin.test_simple_tenant_usage.TenantUsagesTestJSON.test_list_usage_all_tenants[id-062c8ae9-9912-4249-8b51-e38d664e926e]
|
||||
# tempest.api.compute.admin.test_simple_tenant_usage_negative.TenantUsagesNegativeTestJSON.test_get_usage_tenant_with_empty_tenant_id[id-8b21e135-d94b-4991-b6e9-87059609c8ed,negative]
|
||||
# tempest.api.compute.admin.test_simple_tenant_usage_negative.TenantUsagesNegativeTestJSON.test_get_usage_tenant_with_invalid_date[id-4079dd2a-9e8d-479f-869d-6fa985ce45b6,negative]
|
||||
# tempest.api.compute.admin.test_simple_tenant_usage_negative.TenantUsagesNegativeTestJSON.test_list_usage_all_tenants_with_non_admin_user[id-bbe6fe2c-15d8-404c-a0a2-44fad0ad5cc7,negative]
|
||||
# tempest.api.compute.certificates.test_certificates.CertificatesV2TestJSON.test_create_root_certificate[id-c070a441-b08e-447e-a733-905909535b1b]
|
||||
# tempest.api.compute.certificates.test_certificates.CertificatesV2TestJSON.test_get_root_certificate[id-3ac273d0-92d2-4632-bdfc-afbc21d4606c]
|
||||
# tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_get_flavor[id-1f12046b-753d-40d2-abb6-d8eb8b30cb2f,smoke]
|
||||
# tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_list_flavors[id-e36c0eaa-dff5-4082-ad1f-3f9a80aa3f59,smoke]
|
||||
# tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_list_flavors_detailed_filter_by_min_disk[id-3df2743e-3034-4e57-a4cb-b6527f6eac79]
|
||||
# tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_list_flavors_detailed_filter_by_min_ram[id-09fe7509-b4ee-4b34-bf8b-39532dc47292]
|
||||
# tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_list_flavors_detailed_limit_results[id-b26f6327-2886-467a-82be-cef7a27709cb]
|
||||
# tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_list_flavors_detailed_using_marker[id-6db2f0c0-ddee-4162-9c84-0703d3dd1107]
|
||||
# tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_list_flavors_filter_by_min_disk[id-10645a4d-96f5-443f-831b-730711e11dd4]
|
||||
# tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_list_flavors_filter_by_min_ram[id-935cf550-e7c8-4da6-8002-00f92d5edfaa]
|
||||
# tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_list_flavors_limit_results[id-8d7691b3-6ed4-411a-abc9-2839a765adab]
|
||||
# tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_list_flavors_using_marker[id-e800f879-9828-4bd0-8eae-4f17189951fb]
|
||||
# tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_list_flavors_with_detail[id-6e85fde4-b3cd-4137-ab72-ed5f418e8c24]
|
||||
# tempest.api.compute.flavors.test_flavors_negative.FlavorDetailsNegativeTestJSON.test_flavor_details_negative[negative](inv_res_flavor)
|
||||
# tempest.api.compute.flavors.test_flavors_negative.FlavorsListWithDetailsNegativeTestJSON.test_flavors_list_with_details_negative[negative](minRam_gen_none)
|
||||
# tempest.api.compute.flavors.test_flavors_negative.FlavorsListWithDetailsNegativeTestJSON.test_flavors_list_with_details_negative[negative](minRam_gen_string)
|
||||
# tempest.api.compute.flavors.test_flavors_negative.FlavorsListWithDetailsNegativeTestJSON.test_flavors_list_with_details_negative[negative](minDisk_gen_none)
|
||||
# tempest.api.compute.flavors.test_flavors_negative.FlavorsListWithDetailsNegativeTestJSON.test_flavors_list_with_details_negative[negative](minDisk_gen_string)
|
||||
# tempest.api.compute.floating_ips.test_floating_ips_actions.FloatingIPsTestJSON.test_allocate_floating_ip[id-f7bfb946-297e-41b8-9e8c-aba8e9bb5194,network]
|
||||
# tempest.api.compute.floating_ips.test_floating_ips_actions.FloatingIPsTestJSON.test_associate_already_associated_floating_ip[id-6edef4b2-aaf1-4abc-bbe3-993e2561e0fe,network]
|
||||
# tempest.api.compute.floating_ips.test_floating_ips_actions.FloatingIPsTestJSON.test_associate_disassociate_floating_ip[id-307efa27-dc6f-48a0-8cd2-162ce3ef0b52,network]
|
||||
# tempest.api.compute.floating_ips.test_floating_ips_actions.FloatingIPsTestJSON.test_delete_floating_ip[id-de45e989-b5ca-4a9b-916b-04a52e7bbb8b,network]
|
||||
# tempest.api.compute.floating_ips.test_floating_ips_actions_negative.FloatingIPsNegativeTestJSON.test_allocate_floating_ip_from_nonexistent_pool[id-6e0f059b-e4dd-48fb-8207-06e3bba5b074,negative,network]
|
||||
# tempest.api.compute.floating_ips.test_floating_ips_actions_negative.FloatingIPsNegativeTestJSON.test_associate_ip_to_server_without_passing_floating_ip[id-804b4fcb-bbf5-412f-925d-896672b61eb3,negative,network]
|
||||
# tempest.api.compute.floating_ips.test_floating_ips_actions_negative.FloatingIPsNegativeTestJSON.test_associate_nonexistent_floating_ip[id-595fa616-1a71-4670-9614-46564ac49a4c,negative,network]
|
||||
# tempest.api.compute.floating_ips.test_floating_ips_actions_negative.FloatingIPsNegativeTestJSON.test_delete_nonexistent_floating_ip[id-ae1c55a8-552b-44d4-bfb6-2a115a15d0ba,negative,network]
|
||||
# tempest.api.compute.floating_ips.test_floating_ips_actions_negative.FloatingIPsNegativeTestJSON.test_dissociate_nonexistent_floating_ip[id-0a081a66-e568-4e6b-aa62-9587a876dca8,negative,network]
|
||||
# tempest.api.compute.floating_ips.test_list_floating_ips.FloatingIPDetailsTestJSON.test_get_floating_ip_details[id-eef497e0-8ff7-43c8-85ef-558440574f84,network]
|
||||
# tempest.api.compute.floating_ips.test_list_floating_ips.FloatingIPDetailsTestJSON.test_list_floating_ip_pools[id-df389fc8-56f5-43cc-b290-20eda39854d3,network]
|
||||
# tempest.api.compute.floating_ips.test_list_floating_ips.FloatingIPDetailsTestJSON.test_list_floating_ips[id-16db31c3-fb85-40c9-bbe2-8cf7b67ff99f,network]
|
||||
# tempest.api.compute.floating_ips.test_list_floating_ips_negative.FloatingIPDetailsNegativeTestJSON.test_get_nonexistent_floating_ip_details[id-7ab18834-4a4b-4f28-a2c5-440579866695,negative,network]
|
||||
# tempest.api.compute.images.test_image_metadata.ImagesMetadataTestJSON.test_delete_image_metadata_item[id-a013796c-ba37-4bb5-8602-d944511def14]
|
||||
# tempest.api.compute.images.test_image_metadata.ImagesMetadataTestJSON.test_get_image_metadata_item[id-4f5db52f-6685-4c75-b848-f4bb363f9aa6]
|
||||
# tempest.api.compute.images.test_image_metadata.ImagesMetadataTestJSON.test_list_image_metadata[id-37ec6edd-cf30-4c53-bd45-ae74db6b0531]
|
||||
# tempest.api.compute.images.test_image_metadata.ImagesMetadataTestJSON.test_set_image_metadata[id-ece7befc-d3ce-42a4-b4be-c3067a418c29]
|
||||
# tempest.api.compute.images.test_image_metadata.ImagesMetadataTestJSON.test_set_image_metadata_item[id-f2de776a-4778-4d90-a5da-aae63aee64ae]
|
||||
# tempest.api.compute.images.test_image_metadata.ImagesMetadataTestJSON.test_update_image_metadata[id-7b491c11-a9d5-40fe-a696-7f7e03d3fea2]
|
||||
# tempest.api.compute.images.test_image_metadata_negative.ImagesMetadataTestJSON.test_delete_nonexistent_image_metadata_item[id-848e157f-6bcf-4b2e-a5dd-5124025a8518,negative]
|
||||
# tempest.api.compute.images.test_image_metadata_negative.ImagesMetadataTestJSON.test_get_nonexistent_image_metadata_item[id-41ae052c-6ee6-405c-985e-5712393a620d,negative]
|
||||
# tempest.api.compute.images.test_image_metadata_negative.ImagesMetadataTestJSON.test_list_nonexistent_image_metadata[id-94069db2-792f-4fa8-8bd3-2271a6e0c095,negative]
|
||||
# tempest.api.compute.images.test_image_metadata_negative.ImagesMetadataTestJSON.test_set_nonexistent_image_metadata[id-dc64f2ce-77e8-45b0-88c8-e15041d08eaf,negative]
|
||||
# tempest.api.compute.images.test_image_metadata_negative.ImagesMetadataTestJSON.test_set_nonexistent_image_metadata_item[id-2154fd03-ab54-457c-8874-e6e3eb56e9cf,negative]
|
||||
# tempest.api.compute.images.test_image_metadata_negative.ImagesMetadataTestJSON.test_update_nonexistent_image_metadata[id-a403ef9e-9f95-427c-b70a-3ce3388796f1,negative]
|
||||
# tempest.api.compute.images.test_images.ImagesTestJSON.test_create_image_from_stopped_server[id-aaacd1d0-55a2-4ce8-818a-b5439df8adc9]
|
||||
# tempest.api.compute.images.test_images.ImagesTestJSON.test_delete_saving_image[id-aa06b52b-2db5-4807-b218-9441f75d74e3]
|
||||
# tempest.api.compute.images.test_images_negative.ImagesNegativeTestJSON.test_create_image_from_deleted_server[id-6cd5a89d-5b47-46a7-93bc-3916f0d84973,negative]
|
||||
# tempest.api.compute.images.test_images_negative.ImagesNegativeTestJSON.test_create_image_from_invalid_server[id-82c5b0c4-9dbd-463c-872b-20c4755aae7f,negative]
|
||||
# tempest.api.compute.images.test_images_negative.ImagesNegativeTestJSON.test_create_image_specify_uuid_35_characters_or_less[id-ec176029-73dc-4037-8d72-2e4ff60cf538,negative]
|
||||
# tempest.api.compute.images.test_images_negative.ImagesNegativeTestJSON.test_create_image_specify_uuid_37_characters_or_more[id-36741560-510e-4cc2-8641-55fe4dfb2437,negative]
|
||||
# tempest.api.compute.images.test_images_negative.ImagesNegativeTestJSON.test_delete_image_blank_id[id-e6e41425-af5c-4fe6-a4b5-7b7b963ffda5,negative]
|
||||
# tempest.api.compute.images.test_images_negative.ImagesNegativeTestJSON.test_delete_image_id_is_over_35_character_limit[id-b340030d-82cd-4066-a314-c72fb7c59277,negative]
|
||||
# tempest.api.compute.images.test_images_negative.ImagesNegativeTestJSON.test_delete_image_negative_image_id[id-68e2c175-bd26-4407-ac0f-4ea9ce2139ea,negative]
|
||||
# tempest.api.compute.images.test_images_negative.ImagesNegativeTestJSON.test_delete_image_non_hex_string_id[id-924540c3-f1f1-444c-8f58-718958b6724e,negative]
|
||||
# tempest.api.compute.images.test_images_negative.ImagesNegativeTestJSON.test_delete_image_with_invalid_image_id[id-381acb65-785a-4942-94ce-d8f8c84f1f0f,negative]
|
||||
# tempest.api.compute.images.test_images_negative.ImagesNegativeTestJSON.test_delete_non_existent_image[id-137aef61-39f7-44a1-8ddf-0adf82511701,negative]
|
||||
# tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_delete_image[id-3731d080-d4c5-4872-b41a-64d0d0021314]
|
||||
# tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_image_specify_multibyte_character_image_name[id-3b7c6fe4-dfe7-477c-9243-b06359db51e6]
|
||||
# tempest.api.compute.images.test_images_oneserver_negative.ImagesOneServerNegativeTestJSON.test_create_image_specify_invalid_metadata[id-55d1d38c-dd66-4933-9c8e-7d92aeb60ddc,negative]
|
||||
# tempest.api.compute.images.test_images_oneserver_negative.ImagesOneServerNegativeTestJSON.test_create_image_specify_metadata_over_limits[id-3d24d11f-5366-4536-bd28-cff32b748eca,negative]
|
||||
# tempest.api.compute.images.test_images_oneserver_negative.ImagesOneServerNegativeTestJSON.test_create_image_specify_name_over_256_chars[id-084f0cbc-500a-4963-8a4e-312905862581,negative]
|
||||
# tempest.api.compute.images.test_images_oneserver_negative.ImagesOneServerNegativeTestJSON.test_create_second_image_when_first_image_is_being_saved[id-0460efcf-ee88-4f94-acef-1bf658695456,negative]
|
||||
# tempest.api.compute.images.test_images_oneserver_negative.ImagesOneServerNegativeTestJSON.test_delete_image_that_is_not_yet_active[id-0894954d-2db2-4195-a45b-ffec0bc0187e,negative]
|
||||
# tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_changes_since[id-18bac3ae-da27-436c-92a9-b22474d13aab]
|
||||
# tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_name[id-33163b73-79f5-4d07-a7ea-9213bcc468ff]
|
||||
# tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_server_id[id-9f238683-c763-45aa-b848-232ec3ce3105]
|
||||
# tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_server_ref[id-05a377b8-28cf-4734-a1e6-2ab5c38bf606]
|
||||
# tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_status[id-a3f5b513-aeb3-42a9-b18e-f091ef73254d]
|
||||
# tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_type[id-e3356918-4d3e-4756-81d5-abc4524ba29f]
|
||||
# tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_limit_results[id-3a484ca9-67ba-451e-b494-7fcf28d32d62]
|
||||
# tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_changes_since[id-7d439e18-ac2e-4827-b049-7e18004712c4]
|
||||
# tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_name[id-644ea267-9bd9-4f3b-af9f-dffa02396a17]
|
||||
# tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_server_ref[id-8c78f822-203b-4bf6-8bba-56ebd551cf84]
|
||||
# tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_status[id-9b0ea018-6185-4f71-948a-a123a107988e]
|
||||
# tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_type[id-888c0cc0-7223-43c5-9db0-b125fd0a393b]
|
||||
# tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_limit_results[id-ba2fa9a9-b672-47cc-b354-3b4c0600e2cb]
|
||||
# tempest.api.compute.images.test_list_image_filters_negative.ListImageFiltersNegativeTestJSON.test_get_nonexistent_image[id-391b0440-432c-4d4b-b5da-c5096aa247eb,negative]
|
||||
# tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_get_image[id-490d0898-e12a-463f-aef0-c50156b9f789]
|
||||
# tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_list_images[id-fd51b7f4-d4a3-4331-9885-866658112a6f]
|
||||
# tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_list_images_with_detail[id-9f94cb6b-7f10-48c5-b911-a0b84d7d4cd6]
|
||||
# tempest.api.compute.keypairs.test_keypairs.KeyPairsV2TestJSON.test_get_keypair_detail[id-a4233d5d-52d8-47cc-9a25-e1864527e3df]
|
||||
# tempest.api.compute.keypairs.test_keypairs.KeyPairsV2TestJSON.test_keypair_create_delete[id-6c1d3123-4519-4742-9194-622cb1714b7d]
|
||||
# tempest.api.compute.keypairs.test_keypairs.KeyPairsV2TestJSON.test_keypair_create_with_pub_key[id-39c90c6a-304a-49dd-95ec-2366129def05]
|
||||
# tempest.api.compute.keypairs.test_keypairs.KeyPairsV2TestJSON.test_keypairs_create_list_delete[id-1d1dbedb-d7a0-432a-9d09-83f543c3c19b]
|
||||
# tempest.api.compute.keypairs.test_keypairs_negative.KeyPairsNegativeTestJSON.test_create_keypair_invalid_name[id-45fbe5e0-acb5-49aa-837a-ff8d0719db91,negative]
|
||||
# tempest.api.compute.keypairs.test_keypairs_negative.KeyPairsNegativeTestJSON.test_create_keypair_when_public_key_bits_exceeds_maximum[id-fc100c19-2926-4b9c-8fdc-d0589ee2f9ff,negative]
|
||||
# tempest.api.compute.keypairs.test_keypairs_negative.KeyPairsNegativeTestJSON.test_create_keypair_with_duplicate_name[id-0359a7f1-f002-4682-8073-0c91e4011b7c,negative]
|
||||
# tempest.api.compute.keypairs.test_keypairs_negative.KeyPairsNegativeTestJSON.test_create_keypair_with_empty_name_string[id-1398abe1-4a84-45fb-9294-89f514daff00,negative]
|
||||
# tempest.api.compute.keypairs.test_keypairs_negative.KeyPairsNegativeTestJSON.test_create_keypair_with_empty_public_key[id-dade320e-69ca-42a9-ba4a-345300f127e0,negative]
|
||||
# tempest.api.compute.keypairs.test_keypairs_negative.KeyPairsNegativeTestJSON.test_create_keypair_with_long_keynames[id-3faa916f-779f-4103-aca7-dc3538eee1b7,negative]
|
||||
# tempest.api.compute.keypairs.test_keypairs_negative.KeyPairsNegativeTestJSON.test_keypair_create_with_invalid_pub_key[id-29cca892-46ae-4d48-bc32-8fe7e731eb81,negative]
|
||||
# tempest.api.compute.keypairs.test_keypairs_negative.KeyPairsNegativeTestJSON.test_keypair_delete_nonexistent_key[id-7cc32e47-4c42-489d-9623-c5e2cb5a2fa5,negative]
|
||||
# tempest.api.compute.keypairs.test_keypairs_v22.KeyPairsV22TestJSON.test_get_keypair_detail[id-a4233d5d-52d8-47cc-9a25-e1864527e3df]
|
||||
# tempest.api.compute.keypairs.test_keypairs_v22.KeyPairsV22TestJSON.test_keypair_create_delete[id-6c1d3123-4519-4742-9194-622cb1714b7d]
|
||||
# tempest.api.compute.keypairs.test_keypairs_v22.KeyPairsV22TestJSON.test_keypair_create_with_pub_key[id-39c90c6a-304a-49dd-95ec-2366129def05]
|
||||
# tempest.api.compute.keypairs.test_keypairs_v22.KeyPairsV22TestJSON.test_keypairs_create_list_delete[id-1d1dbedb-d7a0-432a-9d09-83f543c3c19b]
|
||||
# tempest.api.compute.keypairs.test_keypairs_v22.KeyPairsV22TestJSON.test_keypairsv22_create_list_show[id-8726fa85-7f98-4b20-af9e-f710a4f3391c]
|
||||
# tempest.api.compute.keypairs.test_keypairs_v22.KeyPairsV22TestJSON.test_keypairsv22_create_list_show_with_type[id-89d59d43-f735-441a-abcf-0601727f47b6]
|
||||
# tempest.api.compute.limits.test_absolute_limits.AbsoluteLimitsTestJSON.test_absLimits_get[id-b54c66af-6ab6-4cf0-a9e5-a0cb58d75e0b]
|
||||
# tempest.api.compute.limits.test_absolute_limits_negative.AbsoluteLimitsNegativeTestJSON.test_max_image_meta_exceed_limit[id-215cd465-d8ae-49c9-bf33-9c911913a5c8,negative]
|
||||
# tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_create[id-850795d7-d4d3-4e55-b527-a774c0123d3a,network,smoke]
|
||||
# tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_create_with_optional_cidr[id-7a01873e-3c38-4f30-80be-31a043cfe2fd,network]
|
||||
# tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_create_with_optional_group_id[id-7f5d2899-7705-4d4b-8458-4505188ffab6,network]
|
||||
# tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_delete_when_peer_group_deleted[id-fc5c5acf-2091-43a6-a6ae-e42760e9ffaf,network]
|
||||
# tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_list[id-a6154130-5a55-4850-8be4-5e9e796dbf17,network,smoke]
|
||||
# tempest.api.compute.security_groups.test_security_group_rules_negative.SecurityGroupRulesNegativeTestJSON.test_create_security_group_rule_duplicate[id-8bd56d02-3ffa-4d67-9933-b6b9a01d6089,negative,network]
|
||||
# tempest.api.compute.security_groups.test_security_group_rules_negative.SecurityGroupRulesNegativeTestJSON.test_create_security_group_rule_with_invalid_from_port[id-12bbc875-1045-4f7a-be46-751277baedb9,negative,network]
|
||||
# tempest.api.compute.security_groups.test_security_group_rules_negative.SecurityGroupRulesNegativeTestJSON.test_create_security_group_rule_with_invalid_id[id-2244d7e4-adb7-4ecb-9930-2d77e123ce4f,negative,network]
|
||||
# tempest.api.compute.security_groups.test_security_group_rules_negative.SecurityGroupRulesNegativeTestJSON.test_create_security_group_rule_with_invalid_ip_protocol[id-84c81249-9f6e-439c-9bbf-cbb0d2cddbdf,negative,network]
|
||||
# tempest.api.compute.security_groups.test_security_group_rules_negative.SecurityGroupRulesNegativeTestJSON.test_create_security_group_rule_with_invalid_port_range[id-00296fa9-0576-496a-ae15-fbab843189e0,negative,network]
|
||||
# tempest.api.compute.security_groups.test_security_group_rules_negative.SecurityGroupRulesNegativeTestJSON.test_create_security_group_rule_with_invalid_to_port[id-ff88804d-144f-45d1-bf59-dd155838a43a,negative,network]
|
||||
# tempest.api.compute.security_groups.test_security_group_rules_negative.SecurityGroupRulesNegativeTestJSON.test_create_security_group_rule_with_non_existent_id[id-1d507e98-7951-469b-82c3-23f1e6b8c254,negative,network]
|
||||
# tempest.api.compute.security_groups.test_security_group_rules_negative.SecurityGroupRulesNegativeTestJSON.test_delete_security_group_rule_with_non_existent_id[id-56fddcca-dbb8-4494-a0db-96e9f869527c,negative,network]
|
||||
# tempest.api.compute.security_groups.test_security_groups.SecurityGroupsTestJSON.test_security_group_create_get_delete[id-ecc0da4a-2117-48af-91af-993cca39a615,network]
|
||||
# tempest.api.compute.security_groups.test_security_groups.SecurityGroupsTestJSON.test_security_groups_create_list_delete[id-eb2b087d-633d-4d0d-a7bd-9e6ba35b32de,network,smoke]
|
||||
# tempest.api.compute.security_groups.test_security_groups.SecurityGroupsTestJSON.test_server_security_groups[id-fe4abc0d-83f5-4c50-ad11-57a1127297a2,network]
|
||||
# tempest.api.compute.security_groups.test_security_groups.SecurityGroupsTestJSON.test_update_security_groups[id-7d4e1d3c-3209-4d6d-b020-986304ebad1f,network]
|
||||
# tempest.api.compute.security_groups.test_security_groups_negative.SecurityGroupsNegativeTestJSON.test_delete_nonexistent_security_group[id-6727c00b-214c-4f9e-9a52-017ac3e98411,negative,network]
|
||||
# tempest.api.compute.security_groups.test_security_groups_negative.SecurityGroupsNegativeTestJSON.test_delete_security_group_without_passing_id[id-1438f330-8fa4-4aeb-8a94-37c250106d7f,negative,network]
|
||||
# tempest.api.compute.security_groups.test_security_groups_negative.SecurityGroupsNegativeTestJSON.test_delete_the_default_security_group[id-36a1629f-c6da-4a26-b8b8-55e7e5d5cd58,negative,network]
|
||||
# tempest.api.compute.security_groups.test_security_groups_negative.SecurityGroupsNegativeTestJSON.test_security_group_create_with_duplicate_name[id-9fdb4abc-6b66-4b27-b89c-eb215a956168,negative,network]
|
||||
# tempest.api.compute.security_groups.test_security_groups_negative.SecurityGroupsNegativeTestJSON.test_security_group_create_with_invalid_group_description[id-777b6f14-aca9-4758-9e84-38783cfa58bc,negative,network]
|
||||
# tempest.api.compute.security_groups.test_security_groups_negative.SecurityGroupsNegativeTestJSON.test_security_group_create_with_invalid_group_name[id-1759c3cb-b0fc-44b7-86ce-c99236be911d,negative,network]
|
||||
# tempest.api.compute.security_groups.test_security_groups_negative.SecurityGroupsNegativeTestJSON.test_security_group_get_nonexistent_group[id-673eaec1-9b3e-48ed-bdf1-2786c1b9661c,negative,network]
|
||||
# tempest.api.compute.security_groups.test_security_groups_negative.SecurityGroupsNegativeTestJSON.test_update_non_existent_security_group[id-27edee9c-873d-4da6-a68a-3c256efebe8f,negative,network]
|
||||
# tempest.api.compute.security_groups.test_security_groups_negative.SecurityGroupsNegativeTestJSON.test_update_security_group_with_invalid_sg_des[id-97d12b1c-a610-4194-93f1-ba859e718b45,negative,network]
|
||||
# tempest.api.compute.security_groups.test_security_groups_negative.SecurityGroupsNegativeTestJSON.test_update_security_group_with_invalid_sg_id[id-00579617-fe04-4e1c-9d08-ca7467d2e34b,negative,network]
|
||||
# tempest.api.compute.security_groups.test_security_groups_negative.SecurityGroupsNegativeTestJSON.test_update_security_group_with_invalid_sg_name[id-cda8d8b4-59f8-4087-821d-20cf5a03b3b1,negative,network]
|
||||
# tempest.api.compute.servers.test_attach_interfaces.AttachInterfacesTestJSON.test_add_remove_fixed_ip[id-c7e0e60b-ee45-43d0-abeb-8596fd42a2f9,network,smoke]
|
||||
# tempest.api.compute.servers.test_attach_interfaces.AttachInterfacesTestJSON.test_create_list_show_delete_interfaces[id-73fe8f02-590d-4bf1-b184-e9ca81065051,network]
|
||||
# tempest.api.compute.servers.test_availability_zone.AZV2TestJSON.test_get_availability_zone_list_with_non_admin_user[id-a8333aa2-205c-449f-a828-d38c2489bf25]
|
||||
# tempest.api.compute.servers.test_create_server.ServersTestJSON.test_create_server_with_scheduler_hint_group[id-ed20d3fb-9d1f-4329-b160-543fbd5d9811]
|
||||
# tempest.api.compute.servers.test_create_server.ServersTestJSON.test_host_name_is_same_as_server_name[id-ac1ad47f-984b-4441-9274-c9079b7a0666]
|
||||
# tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers[id-9a438d88-10c6-4bcd-8b5b-5b6e25e1346f,smoke]
|
||||
# tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers_with_detail[id-585e934c-448e-43c4-acbf-d06a9b899997]
|
||||
# tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_created_server_vcpus[id-cbc0f52f-05aa-492b-bdc1-84b575ca294b]
|
||||
# tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_duplicate_network_nics[id-1678d144-ed74-43f8-8e57-ab10dbf9b3c2]
|
||||
# tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_multiple_nics_order[id-0578d144-ed74-43f8-8e57-ab10dbf9b3c2]
|
||||
# tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_server_details[id-5de47127-9977-400a-936f-abcfbec1218f,smoke]
|
||||
# tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_create_server_with_scheduler_hint_group[id-ed20d3fb-9d1f-4329-b160-543fbd5d9811]
|
||||
# tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_host_name_is_same_as_server_name[id-ac1ad47f-984b-4441-9274-c9079b7a0666]
|
||||
# tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers[id-9a438d88-10c6-4bcd-8b5b-5b6e25e1346f,smoke]
|
||||
# tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers_with_detail[id-585e934c-448e-43c4-acbf-d06a9b899997]
|
||||
# tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_created_server_vcpus[id-cbc0f52f-05aa-492b-bdc1-84b575ca294b]
|
||||
# tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_duplicate_network_nics[id-1678d144-ed74-43f8-8e57-ab10dbf9b3c2]
|
||||
# tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_multiple_nics_order[id-0578d144-ed74-43f8-8e57-ab10dbf9b3c2]
|
||||
# tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_server_details[id-5de47127-9977-400a-936f-abcfbec1218f,smoke]
|
||||
# tempest.api.compute.servers.test_create_server.ServersWithSpecificFlavorTestJSON.test_verify_created_server_ephemeral_disk[id-b3c7bcfc-bb5b-4e22-b517-c7f686b802ca]
|
||||
# tempest.api.compute.servers.test_delete_server.DeleteServersAdminTestJSON.test_admin_delete_servers_of_others[id-73177903-6737-4f27-a60c-379e8ae8cf48]
|
||||
# tempest.api.compute.servers.test_delete_server.DeleteServersAdminTestJSON.test_delete_server_while_in_error_state[id-99774678-e072-49d1-9d2a-49a59bc56063]
|
||||
# tempest.api.compute.servers.test_delete_server.DeleteServersTestJSON.test_delete_active_server[id-925fdfb4-5b13-47ea-ac8a-c36ae6fddb05]
|
||||
# tempest.api.compute.servers.test_delete_server.DeleteServersTestJSON.test_delete_server_while_in_attached_volume[id-d0f3f0d6-d9b6-4a32-8da4-23015dcab23c,volume]
|
||||
# tempest.api.compute.servers.test_delete_server.DeleteServersTestJSON.test_delete_server_while_in_building_state[id-9e6e0c87-3352-42f7-9faf-5d6210dbd159]
|
||||
# tempest.api.compute.servers.test_delete_server.DeleteServersTestJSON.test_delete_server_while_in_pause_state[id-943bd6e8-4d7a-4904-be83-7a6cc2d4213b]
|
||||
# tempest.api.compute.servers.test_delete_server.DeleteServersTestJSON.test_delete_server_while_in_shelved_state[id-bb0cb402-09dd-4947-b6e5-5e7e1cfa61ad]
|
||||
# tempest.api.compute.servers.test_delete_server.DeleteServersTestJSON.test_delete_server_while_in_shutoff_state[id-546d368c-bb6c-4645-979a-83ed16f3a6be]
|
||||
# tempest.api.compute.servers.test_delete_server.DeleteServersTestJSON.test_delete_server_while_in_suspended_state[id-1f82ebd3-8253-4f4e-b93f-de9b7df56d8b]
|
||||
# tempest.api.compute.servers.test_delete_server.DeleteServersTestJSON.test_delete_server_while_in_verify_resize_state[id-ab0c38b4-cdd8-49d3-9b92-0cb898723c01]
|
||||
# tempest.api.compute.servers.test_disk_config.ServerDiskConfigTestJSON.test_rebuild_server_with_auto_disk_config[id-9c9fae77-4feb-402f-8450-bf1c8b609713]
|
||||
# tempest.api.compute.servers.test_disk_config.ServerDiskConfigTestJSON.test_rebuild_server_with_manual_disk_config[id-bef56b09-2e8c-4883-a370-4950812f430e]
|
||||
# tempest.api.compute.servers.test_disk_config.ServerDiskConfigTestJSON.test_resize_server_from_auto_to_manual[id-693d16f3-556c-489a-8bac-3d0ca2490bad]
|
||||
# tempest.api.compute.servers.test_disk_config.ServerDiskConfigTestJSON.test_resize_server_from_manual_to_auto[id-414e7e93-45b5-44bc-8e03-55159c6bfc97]
|
||||
# tempest.api.compute.servers.test_disk_config.ServerDiskConfigTestJSON.test_update_server_from_auto_to_manual[id-5ef18867-358d-4de9-b3c9-94d4ba35742f]
|
||||
# tempest.api.compute.servers.test_instance_actions.InstanceActionsTestJSON.test_get_instance_action[id-aacc71ca-1d70-4aa5-bbf6-0ff71470e43c]
|
||||
# tempest.api.compute.servers.test_instance_actions.InstanceActionsTestJSON.test_list_instance_actions[id-77ca5cc5-9990-45e0-ab98-1de8fead201a]
|
||||
# tempest.api.compute.servers.test_instance_actions_negative.InstanceActionsNegativeTestJSON.test_get_instance_action_invalid_request[id-0269f40a-6f18-456c-b336-c03623c897f1,negative]
|
||||
# tempest.api.compute.servers.test_instance_actions_negative.InstanceActionsNegativeTestJSON.test_list_instance_actions_non_existent_server[id-67e1fce6-7ec2-45c6-92d4-0a8f1a632910,negative]
|
||||
# tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_flavor[id-80c574cc-0925-44ba-8602-299028357dd9]
|
||||
# tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_image[id-b3304c3b-97df-46d2-8cd3-e2b6659724e7]
|
||||
# tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_server_name[id-f9eb2b70-735f-416c-b260-9914ac6181e4]
|
||||
# tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_server_status[id-de2612ab-b7dd-4044-b0b1-d2539601911f]
|
||||
# tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_limit_results[id-67aec2d0-35fe-4503-9f92-f13272b867ed]
|
||||
# tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_exceed_limit[id-37791bbd-90c0-4de0-831e-5f38cba9c6b3]
|
||||
# tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_flavor[id-573637f5-7325-47bb-9144-3476d0416908]
|
||||
# tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_image[id-05e8a8e7-9659-459a-989d-92c2f501f4ba]
|
||||
# tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_limit[id-614cdfc1-d557-4bac-915b-3e67b48eee76]
|
||||
# tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_server_name[id-9b067a7b-7fee-4f6a-b29c-be43fe18fc5a]
|
||||
# tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_server_status[id-ca78e20e-fddb-4ce6-b7f7-bcbf8605e66e]
|
||||
# tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_shutoff_status[id-451dbbb2-f330-4a9f-b0e1-5f5d2cb0f34c]
|
||||
# tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_zero_limit[id-b1495414-2d93-414c-8019-849afe8d319e]
|
||||
# tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filtered_by_ip[id-43a1242e-7b31-48d1-88f2-3f72aa9f2077]
|
||||
# tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filtered_by_ip_regex[id-a905e287-c35e-42f2-b132-d02b09f3654a]
|
||||
# tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filtered_by_name_regex[id-24a89b0c-0d55-4a28-847f-45075f19b27b]
|
||||
# tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filtered_by_name_wildcard[id-e9f624ee-92af-4562-8bec-437945a18dcb]
|
||||
# tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_changes_since_future_date[id-74745ad8-b346-45b5-b9b8-509d7447fc1f,negative]
|
||||
# tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_changes_since_invalid_date[id-87d12517-e20a-4c9c-97b6-dd1628d6d6c9,negative]
|
||||
# tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits[id-12c80a9f-2dec-480e-882b-98ba15757659]
|
||||
# tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_greater_than_actual_count[id-d47c17fb-eebd-4287-8e95-f20a7e627b18,negative]
|
||||
# tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_pass_negative_value[id-62610dd9-4713-4ee0-8beb-fd2c1aa7f950,negative]
|
||||
# tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_pass_string[id-679bc053-5e70-4514-9800-3dfab1a380a6,negative]
|
||||
# tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_flavor[id-5913660b-223b-44d4-a651-a0fbfd44ca75,negative]
|
||||
# tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_image[id-ff01387d-c7ad-47b4-ae9e-64fa214638fe,negative]
|
||||
# tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_server_name[id-e2c77c4a-000a-4af3-a0bd-629a328bde7c,negative]
|
||||
# tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_detail_server_is_deleted[id-93055106-2d34-46fe-af68-d9ddbf7ee570,negative]
|
||||
# tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_status_non_existing[id-fcdf192d-0f74-4d89-911f-1ec002b822c4,negative]
|
||||
# tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_with_a_deleted_server[id-24a26f1a-1ddc-4eea-b0d7-a90cc874ad8f,negative]
|
||||
# tempest.api.compute.servers.test_multiple_create.MultipleCreateTestJSON.test_multiple_create[id-61e03386-89c3-449c-9bb1-a06f423fd9d1]
|
||||
# tempest.api.compute.servers.test_multiple_create.MultipleCreateTestJSON.test_multiple_create_with_reservation_return[id-864777fb-2f1e-44e3-b5b9-3eb6fa84f2f7]
|
||||
# tempest.api.compute.servers.test_multiple_create_negative.MultipleCreateNegativeTestJSON.test_max_count_less_than_min_count[id-476da616-f1ef-4271-a9b1-b9fc87727cdf,negative]
|
||||
# tempest.api.compute.servers.test_multiple_create_negative.MultipleCreateNegativeTestJSON.test_max_count_less_than_one[id-a6f9c2ab-e060-4b82-b23c-4532cb9390ff,negative]
|
||||
# tempest.api.compute.servers.test_multiple_create_negative.MultipleCreateNegativeTestJSON.test_max_count_non_integer[id-9c5698d1-d7af-4c80-b971-9d403135eea2,negative]
|
||||
# tempest.api.compute.servers.test_multiple_create_negative.MultipleCreateNegativeTestJSON.test_min_count_less_than_one[id-daf29d8d-e928-4a01-9a8c-b129603f3fc0,negative]
|
||||
# tempest.api.compute.servers.test_multiple_create_negative.MultipleCreateNegativeTestJSON.test_min_count_non_integer[id-999aa722-d624-4423-b813-0d1ac9884d7a,negative]
|
||||
# tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_change_server_password[id-6158df09-4b82-4ab3-af6d-29cf36af858d]
|
||||
# tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_create_backup[id-b963d4f1-94b3-4c40-9e97-7b583f46e470,image]
|
||||
# tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_get_console_output[id-4b8867e6-fffa-4d54-b1d1-6fdda57be2f3]
|
||||
# tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_get_console_output_server_id_in_shutoff_status[id-5b65d4e7-4ecd-437c-83c0-d6b79d927568]
|
||||
# tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_get_console_output_with_unlimited_size[id-89104062-69d8-4b19-a71b-f47b7af093d7]
|
||||
# tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_get_vnc_console[id-c6bc11bf-592e-4015-9319-1c98dc64daf5]
|
||||
# **DONE** tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_lock_unlock_server[id-80a8094c-211e-440a-ab88-9e59d556c7ee]
|
||||
# **DONE** tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_pause_unpause_server[id-bd61a9fd-062f-4670-972b-2d6c3e3b9e73]
|
||||
# **DONE** tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_hard[id-2cb1baf6-ac8d-4429-bf0d-ba8a0ba53e32,smoke]
|
||||
# **DONE** tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_soft[id-4640e3ef-a5df-482e-95a1-ceeeb0faa84d]
|
||||
# tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_rebuild_server[id-aaa6cdf3-55a7-461a-add9-1c8596b9a07c]
|
||||
# tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_rebuild_server_in_stop_state[id-30449a88-5aff-4f9b-9866-6ee9b17f906d]
|
||||
# **DONE**tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_confirm[id-1499262a-9328-4eda-9068-db1ac57498d2]
|
||||
# **DONE**tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_confirm_from_stopped[id-138b131d-66df-48c9-a171-64f45eb92962]
|
||||
# **DONE**tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_revert[id-c03aab19-adb1-44f5-917d-c419577e9e68]
|
||||
# **DONE** tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_shelve_unshelve_server[id-77eba8e0-036e-4635-944b-f7a8f3b78dc9]
|
||||
# **DONE** tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_stop_start_server[id-af8eafd4-38a7-4a4b-bdbc-75145a580560]
|
||||
# **DONE** tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_suspend_resume_server[id-0d8ee21e-b749-462d-83da-b85b41c86c7f]
|
||||
# tempest.api.compute.servers.test_server_addresses.ServerAddressesTestJSON.test_list_server_addresses[id-6eb718c0-02d9-4d5e-acd1-4e0c269cef39,network,smoke]
|
||||
# tempest.api.compute.servers.test_server_addresses.ServerAddressesTestJSON.test_list_server_addresses_by_network[id-87bbc374-5538-4f64-b673-2b0e4443cc30,network,smoke]
|
||||
# tempest.api.compute.servers.test_server_addresses_negative.ServerAddressesNegativeTestJSON.test_list_server_addresses_by_network_neg[id-a2ab5144-78c0-4942-a0ed-cc8edccfd9ba,negative,network]
|
||||
# tempest.api.compute.servers.test_server_addresses_negative.ServerAddressesNegativeTestJSON.test_list_server_addresses_invalid_server_id[id-02c3f645-2d2e-4417-8525-68c0407d001b,negative,network]
|
||||
# tempest.api.compute.servers.test_server_group.ServerGroupTestJSON.test_create_delete_multiple_server_groups_with_same_name_policy[id-154dc5a4-a2fe-44b5-b99e-f15806a4a113]
|
||||
# tempest.api.compute.servers.test_server_group.ServerGroupTestJSON.test_create_delete_server_group_with_affinity_policy[id-5dc57eda-35b7-4af7-9e5f-3c2be3d2d68b]
|
||||
# tempest.api.compute.servers.test_server_group.ServerGroupTestJSON.test_create_delete_server_group_with_anti_affinity_policy[id-3645a102-372f-4140-afad-13698d850d23]
|
||||
# tempest.api.compute.servers.test_server_group.ServerGroupTestJSON.test_list_server_groups[id-d4874179-27b4-4d7d-80e4-6c560cdfe321]
|
||||
# tempest.api.compute.servers.test_server_group.ServerGroupTestJSON.test_show_server_group[id-b3545034-dd78-48f0-bdc2-a4adfa6d0ead]
|
||||
# tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_delete_server_metadata_item[id-127642d6-4c7b-4486-b7cd-07265a378658]
|
||||
# tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_get_server_metadata_item[id-3043c57d-7e0e-49a6-9a96-ad569c265e6a]
|
||||
# tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_list_server_metadata[id-479da087-92b3-4dcf-aeb3-fd293b2d14ce]
|
||||
# tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_set_server_metadata[id-211021f6-21de-4657-a68f-908878cfe251]
|
||||
# tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_set_server_metadata_item[id-58c02d4f-5c67-40be-8744-d3fa5982eb1c]
|
||||
# tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_update_metadata_empty_body[id-0f58d402-e34a-481d-8af8-b392b17426d9]
|
||||
# tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_update_server_metadata[id-344d981e-0c33-4997-8a5d-6c1d803e4134]
|
||||
# tempest.api.compute.servers.test_server_metadata_negative.ServerMetadataNegativeTestJSON.test_create_server_metadata_blank_key[id-92431555-4d8b-467c-b95b-b17daa5e57ff,negative]
|
||||
# tempest.api.compute.servers.test_server_metadata_negative.ServerMetadataNegativeTestJSON.test_delete_metadata_non_existent_server[id-6bbd88e1-f8b3-424d-ba10-ae21c45ada8d,negative]
|
||||
# tempest.api.compute.servers.test_server_metadata_negative.ServerMetadataNegativeTestJSON.test_list_server_metadata_non_existent_server[id-f408e78e-3066-4097-9299-3b0182da812e,negative]
|
||||
# tempest.api.compute.servers.test_server_metadata_negative.ServerMetadataNegativeTestJSON.test_metadata_items_limit[id-d8c0a210-a5c3-4664-be04-69d96746b547,negative]
|
||||
# tempest.api.compute.servers.test_server_metadata_negative.ServerMetadataNegativeTestJSON.test_server_create_metadata_key_too_long[id-fe114a8f-3a57-4eff-9ee2-4e14628df049,negative]
|
||||
# tempest.api.compute.servers.test_server_metadata_negative.ServerMetadataNegativeTestJSON.test_server_metadata_non_existent_server[id-4d9cd7a3-2010-4b41-b8fe-3bbf0b169466,negative]
|
||||
# tempest.api.compute.servers.test_server_metadata_negative.ServerMetadataNegativeTestJSON.test_set_metadata_non_existent_server[id-0df38c2a-3d4e-4db5-98d8-d4d9fa843a12,negative]
|
||||
# tempest.api.compute.servers.test_server_metadata_negative.ServerMetadataNegativeTestJSON.test_set_server_metadata_blank_key[id-96100343-7fa9-40d8-80fa-d29ef588ce1c,negative]
|
||||
# tempest.api.compute.servers.test_server_metadata_negative.ServerMetadataNegativeTestJSON.test_set_server_metadata_missing_metadata[id-64a91aee-9723-4863-be44-4c9d9f1e7d0e,negative]
|
||||
# tempest.api.compute.servers.test_server_metadata_negative.ServerMetadataNegativeTestJSON.test_update_metadata_non_existent_server[id-904b13dc-0ef2-4e4c-91cd-3b4a0f2f49d8,negative]
|
||||
# tempest.api.compute.servers.test_server_metadata_negative.ServerMetadataNegativeTestJSON.test_update_metadata_with_blank_key[id-a452f38c-05c2-4b47-bd44-a4f0bf5a5e48,negative]
|
||||
# tempest.api.compute.servers.test_server_metadata_negative.ServerMetadataNegativeTestJSON.test_wrong_key_passed_in_body[id-0025fbd6-a4ba-4cde-b8c2-96805dcfdabc,negative]
|
||||
# tempest.api.compute.servers.test_server_password.ServerPasswordTestJSON.test_delete_server_password[id-f8229e8b-b625-4493-800a-bde86ac611ea]
|
||||
# tempest.api.compute.servers.test_server_password.ServerPasswordTestJSON.test_get_server_password[id-f83b582f-62a8-4f22-85b0-0dee50ff783a]
|
||||
# tempest.api.compute.servers.test_server_personality.ServerPersonalityTestJSON.test_can_create_server_with_max_number_personality_files[id-52f12ee8-5180-40cc-b417-31572ea3d555]
|
||||
# tempest.api.compute.servers.test_server_personality.ServerPersonalityTestJSON.test_create_server_with_personality[id-3cfe87fd-115b-4a02-b942-7dc36a337fdf]
|
||||
# tempest.api.compute.servers.test_server_personality.ServerPersonalityTestJSON.test_personality_files_exceed_limit[id-176cd8c9-b9e8-48ee-a480-180beab292bf]
|
||||
# tempest.api.compute.servers.test_server_personality.ServerPersonalityTestJSON.test_rebuild_server_with_personality[id-128966d8-71fc-443c-8cab-08e24114ecc9]
|
||||
# tempest.api.compute.servers.test_server_rescue.ServerRescueTestJSON.test_rescue_unrescue_instance[id-fd032140-714c-42e4-a8fd-adcd8df06be6]
|
||||
# tempest.api.compute.servers.test_server_rescue.ServerRescueTestJSON.test_rescued_vm_add_remove_security_group[id-affca41f-7195-492d-8065-e09eee245404]
|
||||
# tempest.api.compute.servers.test_server_rescue.ServerRescueTestJSON.test_rescued_vm_associate_dissociate_floating_ip[id-4842e0cf-e87d-4d9d-b61f-f4791da3cacc]
|
||||
# tempest.api.compute.servers.test_server_rescue_negative.ServerRescueNegativeTestJSON.test_rescue_non_existent_server[id-6dfc0a55-3a77-4564-a144-1587b7971dde,negative]
|
||||
# tempest.api.compute.servers.test_server_rescue_negative.ServerRescueNegativeTestJSON.test_rescue_paused_instance[id-cc3a883f-43c0-4fb6-a9bb-5579d64984ed,negative]
|
||||
# tempest.api.compute.servers.test_server_rescue_negative.ServerRescueNegativeTestJSON.test_rescued_vm_attach_volume[id-d0ccac79-0091-4cf4-a1ce-26162d0cc55f,negative,volume]
|
||||
# tempest.api.compute.servers.test_server_rescue_negative.ServerRescueNegativeTestJSON.test_rescued_vm_detach_volume[id-f56e465b-fe10-48bf-b75d-646cda3a8bc9,negative,volume]
|
||||
# tempest.api.compute.servers.test_server_rescue_negative.ServerRescueNegativeTestJSON.test_rescued_vm_reboot[id-db22b618-f157-4566-a317-1b6d467a8094,negative]
|
||||
# tempest.api.compute.servers.test_server_rescue_negative.ServerRescueNegativeTestJSON.test_rescued_vm_rebuild[id-70cdb8a1-89f8-437d-9448-8844fd82bf46,negative]
|
||||
# tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_server_with_admin_password[id-b92d5ec7-b1dd-44a2-87e4-45e888c46ef0]
|
||||
# tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_server_with_ipv6_addr_only[id-38fb1d02-c3c5-41de-91d3-9bc2025a75eb]
|
||||
# tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_specify_keypair[id-f9e15296-d7f9-4e62-b53f-a04e89160833]
|
||||
# tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_with_existing_server_name[id-8fea6be7-065e-47cf-89b8-496e6f96c699]
|
||||
# tempest.api.compute.servers.test_servers.ServersTestJSON.test_update_access_server_address[id-89b90870-bc13-4b73-96af-f9d4f2b70077]
|
||||
# tempest.api.compute.servers.test_servers.ServersTestJSON.test_update_server_name[id-5e6ccff8-349d-4852-a8b3-055df7988dd2]
|
||||
# tempest.api.compute.servers.test_servers.ServersTestJSON.test_update_server_name_in_stop_state[id-6ac19cb1-27a3-40ec-b350-810bdc04c08e]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_numeric_server_name[id-fd57f159-68d6-4c2a-902b-03070828a87e,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_server_metadata_exceeds_length_limit[id-7fc74810-0bd2-4cd7-8244-4f33a9db865a,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_server_name_length_exceeds_256[id-c3e0fb12-07fc-4d76-a22e-37409887afe8,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_flavor[id-18f5227f-d155-4429-807c-ccb103887537,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_image[id-fcba1052-0a50-4cf3-b1ac-fae241edf02f,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_network_uuid[id-4e72dc2d-44c5-4336-9667-f7972e95c402,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_non_existent_keypair[id-7a2efc39-530c-47de-b875-2dd01c8d39bd,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_nonexistent_security_group[id-c5fa6041-80cd-483b-aa6d-4e45f19d093c,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_delete_non_existent_server[id-1041b4e6-514b-4855-96a5-e974b60870a3,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_delete_server_pass_id_exceeding_length_limit[id-f4d7279b-5fd2-4bf2-9ba4-ae35df0d18c5,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_delete_server_pass_negative_id[id-75f79124-277c-45e6-a373-a1d6803f4cc4,negative]
|
||||
# **DONE** tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_force_delete_nonexistent_server_id[id-6f47992b-5144-4250-9f8b-f00aa33950f3,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_get_console_output_of_non_existent_server[id-7dd919e7-413f-4198-bebb-35e2a01b13e9,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_get_non_existent_server[id-3436b02f-1b1e-4f03-881e-c6a602327439,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_invalid_access_ip_v4_address[id-7f70a4d1-608f-4794-9e56-cb182765972c,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_invalid_ip_v6_address[id-5226dd80-1e9c-4d8a-b5f9-b26ca4763fd0,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_pause_non_existent_server[id-6a8dc0c6-6cd4-4c0a-9f32-413881828091,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_pause_paused_server[id-d1417e7f-a509-41b5-a102-d5eed8613369,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_personality_file_contents_not_encoded[id-b8a7235e-5246-4a8f-a08e-b34877c6586f,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_reboot_deleted_server[id-581a397d-5eab-486f-9cf9-1014bbd4c984,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_reboot_non_existent_server[id-d4c023a0-9c55-4747-9dd5-413b820143c7,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_deleted_server[id-98fa0458-1485-440f-873b-fe7f0d714930,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_non_existent_server[id-d86141a7-906e-4731-b187-d64a2ea61422,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_resize_nonexistent_server[id-7ea45b3e-e770-46fa-bfcc-9daaf6d987c0,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_resize_server_with_non_existent_flavor[id-ced1a1d7-2ab6-45c9-b90f-b27d87b30efd,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_resize_server_with_null_flavor[id-45436a7d-a388-4a35-a9d8-3adc5d0d940b,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_restore_nonexistent_server_id[id-9c6d38cc-fcfb-437a-85b9-7b788af8bf01,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_restore_server_invalid_state[id-7fcadfab-bd6a-4753-8db7-4a51e51aade9,negative]
|
||||
# **DONE** tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_resume_non_existent_server[id-221cd282-bddb-4837-a683-89c2487389b6,negative]
|
||||
# **DONE** tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_resume_server_invalid_state[id-ccb6294d-c4c9-498f-8a43-554c098bfadb,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_server_name_blank[id-dbbfd247-c40c-449e-8f6c-d2aa7c7da7cf,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_shelve_non_existent_server[id-abca56e2-a892-48ea-b5e5-e07e69774816,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_shelve_shelved_server[id-443e4f9b-e6bf-4389-b601-3a710f15fddd,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_stop_non_existent_server[id-a31460a9-49e1-42aa-82ee-06e0bb7c2d03,negative]
|
||||
# **DONE** tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_suspend_non_existent_server[id-d1f032d5-7b6e-48aa-b252-d5f16dd994ca,negative]
|
||||
# **DONE** tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_suspend_server_invalid_state[id-7f323206-05a9-4bf8-996b-dd5b2036501b,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_unpause_non_existent_server[id-705b8e3a-e8a7-477c-a19b-6868fc24ac75,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_unpause_server_invalid_state[id-c8e639a7-ece8-42dd-a2e0-49615917ba4f,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_unshelve_non_existent_server[id-23d23b37-afaf-40d7-aa5d-5726f82d8821,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_unshelve_server_invalid_state[id-8f198ded-1cca-4228-9e65-c6b449c54880,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_name_of_non_existent_server[id-aa8eed43-e2cb-4ebf-930b-da14f6a21d81,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_server_name_length_exceeds_256[id-5c8e244c-dada-4590-9944-749c455b431f,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_server_set_empty_name[id-38204696-17c6-44da-9590-40f87fb5a899,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestMultiTenantJSON.test_delete_a_server_of_another_tenant[id-5c75009d-3eea-423e-bea3-61b09fd25f9c,negative]
|
||||
# tempest.api.compute.servers.test_servers_negative.ServersNegativeTestMultiTenantJSON.test_update_server_of_another_tenant[id-543d84c1-dd2e-4c6d-8cb2-b9da0efaa384,negative]
|
||||
# tempest.api.compute.servers.test_virtual_interfaces.VirtualInterfacesTestJSON.test_list_virtual_interfaces[id-96c4e2ef-5e4d-4d7f-87f5-fed6dca18016,network]
|
||||
# tempest.api.compute.servers.test_virtual_interfaces_negative.VirtualInterfacesNegativeTestJSON.test_list_virtual_interfaces_invalid_server_id[id-64ebd03c-1089-4306-93fa-60f5eb5c803c,negative,network]
|
||||
# tempest.api.compute.test_extensions.ExtensionsTestJSON.test_get_extension[id-05762f39-bdfa-4cdb-9b46-b78f8e78e2fd]
|
||||
# tempest.api.compute.test_extensions.ExtensionsTestJSON.test_list_extensions[id-3bb27738-b759-4e0d-a5fa-37d7a6df07d1]
|
||||
# tempest.api.compute.test_live_block_migration_negative.LiveBlockMigrationNegativeTestJSON.test_invalid_host_for_migration[id-7fb7856e-ae92-44c9-861a-af62d7830bcb,negative]
|
||||
# tempest.api.compute.test_networks.ComputeNetworksTest.test_list_networks[id-3fe07175-312e-49a5-a623-5f52eeada4c2]
|
||||
# tempest.api.compute.test_quotas.QuotasTestJSON.test_compare_tenant_quotas_with_default_quotas[id-cd65d997-f7e4-4966-a7e9-d5001b674fdc]
|
||||
# tempest.api.compute.test_quotas.QuotasTestJSON.test_get_default_quotas[id-9bfecac7-b966-4f47-913f-1a9e2c12134a]
|
||||
# tempest.api.compute.test_quotas.QuotasTestJSON.test_get_quotas[id-f1ef0a97-dbbb-4cca-adc5-c9fbc4f76107]
|
||||
# tempest.api.compute.test_tenant_networks.ComputeTenantNetworksTest.test_list_show_tenant_networks[id-edfea98e-bbe3-4c7a-9739-87b986baff26,network]
|
||||
# **DONE** tempest.api.compute.test_versions.TestVersions.test_get_version_details[id-b953a29e-929c-4a8e-81be-ec3a7e03cb76]
|
||||
# **DONE** tempest.api.compute.test_versions.TestVersions.test_list_api_versions[id-6c0a0990-43b6-4529-9b61-5fd8daf7c55c]
|
||||
# tempest.api.compute.volumes.test_attach_volume.AttachVolumeShelveTestJSON.test_attach_detach_volume[id-52e9045a-e90d-4c0d-9087-79d657faffff]
|
||||
# tempest.api.compute.volumes.test_attach_volume.AttachVolumeShelveTestJSON.test_attach_volume_shelved_or_offload_server[id-13a940b6-3474-4c3c-b03f-29b89112bfee]
|
||||
# **DONE** tempest.api.compute.volumes.test_attach_volume.AttachVolumeShelveTestJSON.test_detach_volume_shelved_or_offload_server[id-b54e86dd-a070-49c4-9c07-59ae6dae15aa]
|
||||
# tempest.api.compute.volumes.test_attach_volume.AttachVolumeShelveTestJSON.test_list_get_volume_attachments[id-7fa563fe-f0f7-43eb-9e22-a1ece036b513]
|
||||
# **DONE** tempest.api.compute.volumes.test_attach_volume.AttachVolumeTestJSON.test_attach_detach_volume[id-52e9045a-e90d-4c0d-9087-79d657faffff]
|
||||
# **DONE** tempest.api.compute.volumes.test_attach_volume.AttachVolumeTestJSON.test_list_get_volume_attachments[id-7fa563fe-f0f7-43eb-9e22-a1ece036b513]
|
||||
# tempest.api.compute.volumes.test_volume_snapshots.VolumesSnapshotsTestJSON.test_volume_snapshot_create_get_list_delete[id-cd4ec87d-7825-450d-8040-6e2068f2da8f]
|
||||
# tempest.api.compute.volumes.test_volumes_get.VolumesGetTestJSON.test_volume_create_get_delete[id-f10f25eb-9775-4d9d-9cbe-1cf54dae9d5f]
|
||||
# tempest.api.compute.volumes.test_volumes_list.VolumesTestJSON.test_volume_list[id-bc2dd1a0-15af-48e5-9990-f2e75a48325d]
|
||||
# tempest.api.compute.volumes.test_volumes_list.VolumesTestJSON.test_volume_list_param_limit[id-1048ed81-2baf-487a-b284-c0622b86e7b8]
|
||||
# tempest.api.compute.volumes.test_volumes_list.VolumesTestJSON.test_volume_list_param_offset_and_limit[id-51c22651-a074-4ea7-af0b-094f9331303e]
|
||||
# tempest.api.compute.volumes.test_volumes_list.VolumesTestJSON.test_volume_list_with_detail_param_limit[id-33985568-4965-49d5-9bcc-0aa007ca5b7a]
|
||||
# tempest.api.compute.volumes.test_volumes_list.VolumesTestJSON.test_volume_list_with_detail_param_offset_and_limit[id-06b6abc4-3f10-48e9-a7a1-3facc98f03e5]
|
||||
# tempest.api.compute.volumes.test_volumes_list.VolumesTestJSON.test_volume_list_with_details[id-bad0567a-5a4f-420b-851e-780b55bb867c]
|
||||
# tempest.api.compute.volumes.test_volumes_negative.VolumesNegativeTest.test_create_volume_with_invalid_size[id-5125ae14-152b-40a7-b3c5-eae15e9022ef,negative]
|
||||
# tempest.api.compute.volumes.test_volumes_negative.VolumesNegativeTest.test_create_volume_with_out_passing_size[id-131cb3a1-75cc-4d40-b4c3-1317f64719b0,negative]
|
||||
# tempest.api.compute.volumes.test_volumes_negative.VolumesNegativeTest.test_create_volume_with_size_zero[id-8cce995e-0a83-479a-b94d-e1e40b8a09d1,negative]
|
||||
# tempest.api.compute.volumes.test_volumes_negative.VolumesNegativeTest.test_delete_invalid_volume_id[id-62972737-124b-4513-b6cf-2f019f178494,negative]
|
||||
# tempest.api.compute.volumes.test_volumes_negative.VolumesNegativeTest.test_delete_volume_without_passing_volume_id[id-0d1417c5-4ae8-4c2c-adc5-5f0b864253e5,negative]
|
||||
# tempest.api.compute.volumes.test_volumes_negative.VolumesNegativeTest.test_get_invalid_volume_id[id-f01904f2-e975-4915-98ce-cb5fa27bde4f,negative]
|
||||
# **DONE** tempest.api.compute.volumes.test_volumes_negative.VolumesNegativeTest.test_get_volume_without_passing_volume_id[id-62bab09a-4c03-4617-8cca-8572bc94af9b,negative]
|
||||
# tempest.api.compute.volumes.test_volumes_negative.VolumesNegativeTest.test_volume_delete_nonexistent_volume_id[id-54a34226-d910-4b00-9ef8-8683e6c55846,negative]
|
||||
# tempest.api.compute.volumes.test_volumes_negative.VolumesNegativeTest.test_volume_get_nonexistent_volume_id[id-c03ea686-905b-41a2-8748-9635154b7c57,negative]
|
@ -1,326 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
export DEST=$BASE/new
|
||||
export TEMPEST_DIR=$DEST/tempest
|
||||
export TEMPEST_CONF=$TEMPEST_DIR/etc/tempest.conf
|
||||
|
||||
# preparation for the tests
|
||||
cd $TEMPEST_DIR
|
||||
|
||||
# Run functional test
|
||||
echo "Running Tricircle functional test suite..."
|
||||
|
||||
# all test cases with following prefix
|
||||
TESTCASES="(tempest.api.volume.test_volumes_list"
|
||||
TESTCASES="$TESTCASES|tempest.api.volume.test_volumes_get"
|
||||
# add new test cases like following line for volume_type test
|
||||
# TESTCASES="$TESTCASES|tempest.api.volume.admin.test_volumes_type"
|
||||
TESTCASES="$TESTCASES)"
|
||||
|
||||
ostestr --regex $TESTCASES
|
||||
|
||||
# --------------------- IMPORTANT begin -------------------- #
|
||||
# all following test cases are from Cinder tempest test cases,
|
||||
# the purpose to list them here is to check which test cases
|
||||
# are still not covered and tested in Cinder-APIGW.
|
||||
#
|
||||
# Those test cases which have been covered by ostestr running
|
||||
# above should be marked with **DONE** after the "#".
|
||||
# please leave the length of each line > 80 characters in order
|
||||
# to keep one test case one line.
|
||||
#
|
||||
# When you add new feature to Cinder-APIGW, please select
|
||||
# proper test cases to test against the feature, and marked
|
||||
# these test cases with **DONE** after the "#". For those test
|
||||
# cases which are not needed to be tested in Cinder-APIGW, for
|
||||
# example V1(which has been deprecated) should be marked with
|
||||
# **SKIP** after "#"
|
||||
#
|
||||
# The test cases running through ostestr could be filtered
|
||||
# by regex expression, for example, for Cinder volume type
|
||||
# releated test cases could be executed by a single clause:
|
||||
# ostestr --regex tempest.api.volume.admin.test_volume_types
|
||||
# --------------------- IMPORTANT end -----------------------#
|
||||
|
||||
|
||||
# tempest.api.volume.admin.test_multi_backend.VolumeMultiBackendV1Test.test_backend_name_distinction[id-46435ab1-a0af-4401-8373-f14e66b0dd58]
|
||||
# tempest.api.volume.admin.test_multi_backend.VolumeMultiBackendV1Test.test_backend_name_distinction_with_prefix[id-4236305b-b65a-4bfc-a9d2-69cb5b2bf2ed]
|
||||
# tempest.api.volume.admin.test_multi_backend.VolumeMultiBackendV1Test.test_backend_name_reporting[id-c1a41f3f-9dad-493e-9f09-3ff197d477cc]
|
||||
# tempest.api.volume.admin.test_multi_backend.VolumeMultiBackendV1Test.test_backend_name_reporting_with_prefix[id-f38e647f-ab42-4a31-a2e7-ca86a6485215]
|
||||
# tempest.api.volume.admin.test_multi_backend.VolumeMultiBackendV2Test.test_backend_name_distinction[id-46435ab1-a0af-4401-8373-f14e66b0dd58]
|
||||
# tempest.api.volume.admin.test_multi_backend.VolumeMultiBackendV2Test.test_backend_name_distinction_with_prefix[id-4236305b-b65a-4bfc-a9d2-69cb5b2bf2ed]
|
||||
# tempest.api.volume.admin.test_multi_backend.VolumeMultiBackendV2Test.test_backend_name_reporting[id-c1a41f3f-9dad-493e-9f09-3ff197d477cc]
|
||||
# tempest.api.volume.admin.test_multi_backend.VolumeMultiBackendV2Test.test_backend_name_reporting_with_prefix[id-f38e647f-ab42-4a31-a2e7-ca86a6485215]
|
||||
# tempest.api.volume.admin.test_snapshots_actions.SnapshotsActionsV1Test.test_reset_snapshot_status[id-3e13ca2f-48ea-49f3-ae1a-488e9180d535]
|
||||
# tempest.api.volume.admin.test_snapshots_actions.SnapshotsActionsV1Test.test_snapshot_force_delete_when_snapshot_is_creating[id-05f711b6-e629-4895-8103-7ca069f2073a]
|
||||
# tempest.api.volume.admin.test_snapshots_actions.SnapshotsActionsV1Test.test_snapshot_force_delete_when_snapshot_is_deleting[id-92ce8597-b992-43a1-8868-6316b22a969e]
|
||||
# tempest.api.volume.admin.test_snapshots_actions.SnapshotsActionsV1Test.test_snapshot_force_delete_when_snapshot_is_error[id-645a4a67-a1eb-4e8e-a547-600abac1525d]
|
||||
# tempest.api.volume.admin.test_snapshots_actions.SnapshotsActionsV1Test.test_snapshot_force_delete_when_snapshot_is_error_deleting[id-bf89080f-8129-465e-9327-b2f922666ba5]
|
||||
# tempest.api.volume.admin.test_snapshots_actions.SnapshotsActionsV1Test.test_update_snapshot_status[id-41288afd-d463-485e-8f6e-4eea159413eb]
|
||||
# tempest.api.volume.admin.test_snapshots_actions.SnapshotsActionsV2Test.test_reset_snapshot_status[id-3e13ca2f-48ea-49f3-ae1a-488e9180d535]
|
||||
# tempest.api.volume.admin.test_snapshots_actions.SnapshotsActionsV2Test.test_snapshot_force_delete_when_snapshot_is_creating[id-05f711b6-e629-4895-8103-7ca069f2073a]
|
||||
# tempest.api.volume.admin.test_snapshots_actions.SnapshotsActionsV2Test.test_snapshot_force_delete_when_snapshot_is_deleting[id-92ce8597-b992-43a1-8868-6316b22a969e]
|
||||
# tempest.api.volume.admin.test_snapshots_actions.SnapshotsActionsV2Test.test_snapshot_force_delete_when_snapshot_is_error[id-645a4a67-a1eb-4e8e-a547-600abac1525d]
|
||||
# tempest.api.volume.admin.test_snapshots_actions.SnapshotsActionsV2Test.test_snapshot_force_delete_when_snapshot_is_error_deleting[id-bf89080f-8129-465e-9327-b2f922666ba5]
|
||||
# tempest.api.volume.admin.test_snapshots_actions.SnapshotsActionsV2Test.test_update_snapshot_status[id-41288afd-d463-485e-8f6e-4eea159413eb]
|
||||
# tempest.api.volume.admin.test_volume_hosts.VolumeHostsAdminV1TestsJSON.test_list_hosts[id-d5f3efa2-6684-4190-9ced-1c2f526352ad]
|
||||
# tempest.api.volume.admin.test_volume_hosts.VolumeHostsAdminV2TestsJSON.test_list_hosts[id-d5f3efa2-6684-4190-9ced-1c2f526352ad]
|
||||
# tempest.api.volume.admin.test_volume_quotas.BaseVolumeQuotasAdminV2TestJSON.test_delete_quota[id-874b35a9-51f1-4258-bec5-cd561b6690d3]
|
||||
# tempest.api.volume.admin.test_volume_quotas.BaseVolumeQuotasAdminV2TestJSON.test_list_default_quotas[id-2be020a2-5fdd-423d-8d35-a7ffbc36e9f7]
|
||||
# tempest.api.volume.admin.test_volume_quotas.BaseVolumeQuotasAdminV2TestJSON.test_list_quotas[id-59eada70-403c-4cef-a2a3-a8ce2f1b07a0]
|
||||
# tempest.api.volume.admin.test_volume_quotas.BaseVolumeQuotasAdminV2TestJSON.test_quota_usage[id-ae8b6091-48ad-4bfa-a188-bbf5cc02115f]
|
||||
# tempest.api.volume.admin.test_volume_quotas.BaseVolumeQuotasAdminV2TestJSON.test_show_quota_usage[id-18c51ae9-cb03-48fc-b234-14a19374dbed]
|
||||
# tempest.api.volume.admin.test_volume_quotas.BaseVolumeQuotasAdminV2TestJSON.test_update_all_quota_resources_for_tenant[id-3d45c99e-cc42-4424-a56e-5cbd212b63a6]
|
||||
# tempest.api.volume.admin.test_volume_quotas.VolumeQuotasAdminV1TestJSON.test_delete_quota[id-874b35a9-51f1-4258-bec5-cd561b6690d3]
|
||||
# tempest.api.volume.admin.test_volume_quotas.VolumeQuotasAdminV1TestJSON.test_list_default_quotas[id-2be020a2-5fdd-423d-8d35-a7ffbc36e9f7]
|
||||
# tempest.api.volume.admin.test_volume_quotas.VolumeQuotasAdminV1TestJSON.test_list_quotas[id-59eada70-403c-4cef-a2a3-a8ce2f1b07a0]
|
||||
# tempest.api.volume.admin.test_volume_quotas.VolumeQuotasAdminV1TestJSON.test_quota_usage[id-ae8b6091-48ad-4bfa-a188-bbf5cc02115f]
|
||||
# tempest.api.volume.admin.test_volume_quotas.VolumeQuotasAdminV1TestJSON.test_show_quota_usage[id-18c51ae9-cb03-48fc-b234-14a19374dbed]
|
||||
# tempest.api.volume.admin.test_volume_quotas.VolumeQuotasAdminV1TestJSON.test_update_all_quota_resources_for_tenant[id-3d45c99e-cc42-4424-a56e-5cbd212b63a6]
|
||||
# tempest.api.volume.admin.test_volume_quotas_negative.BaseVolumeQuotasNegativeV2TestJSON.test_quota_volume_gigabytes[id-2dc27eee-8659-4298-b900-169d71a91374,negative]
|
||||
# tempest.api.volume.admin.test_volume_quotas_negative.BaseVolumeQuotasNegativeV2TestJSON.test_quota_volumes[id-bf544854-d62a-47f2-a681-90f7a47d86b6,negative]
|
||||
# tempest.api.volume.admin.test_volume_quotas_negative.VolumeQuotasNegativeV1TestJSON.test_quota_volume_gigabytes[id-2dc27eee-8659-4298-b900-169d71a91374,negative]
|
||||
# tempest.api.volume.admin.test_volume_quotas_negative.VolumeQuotasNegativeV1TestJSON.test_quota_volumes[id-bf544854-d62a-47f2-a681-90f7a47d86b6,negative]
|
||||
# tempest.api.volume.admin.test_volume_services.VolumesServicesV1TestJSON.test_get_service_by_host_name[id-178710e4-7596-4e08-9333-745cb8bc4f8d]
|
||||
# tempest.api.volume.admin.test_volume_services.VolumesServicesV1TestJSON.test_get_service_by_service_and_host_name[id-ffa6167c-4497-4944-a464-226bbdb53908]
|
||||
# tempest.api.volume.admin.test_volume_services.VolumesServicesV1TestJSON.test_get_service_by_service_binary_name[id-63a3e1ca-37ee-4983-826d-83276a370d25]
|
||||
# tempest.api.volume.admin.test_volume_services.VolumesServicesV1TestJSON.test_list_services[id-e0218299-0a59-4f43-8b2b-f1c035b3d26d]
|
||||
# tempest.api.volume.admin.test_volume_services.VolumesServicesV2TestJSON.test_get_service_by_host_name[id-178710e4-7596-4e08-9333-745cb8bc4f8d]
|
||||
# tempest.api.volume.admin.test_volume_services.VolumesServicesV2TestJSON.test_get_service_by_service_and_host_name[id-ffa6167c-4497-4944-a464-226bbdb53908]
|
||||
# tempest.api.volume.admin.test_volume_services.VolumesServicesV2TestJSON.test_get_service_by_service_binary_name[id-63a3e1ca-37ee-4983-826d-83276a370d25]
|
||||
# tempest.api.volume.admin.test_volume_services.VolumesServicesV2TestJSON.test_list_services[id-e0218299-0a59-4f43-8b2b-f1c035b3d26d]
|
||||
# tempest.api.volume.admin.test_volume_snapshot_quotas_negative.VolumeSnapshotNegativeV1TestJSON.test_quota_volume_gigabytes_snapshots[id-c99a1ca9-6cdf-498d-9fdf-25832babef27,negative]
|
||||
# tempest.api.volume.admin.test_volume_snapshot_quotas_negative.VolumeSnapshotNegativeV1TestJSON.test_quota_volume_snapshots[id-02bbf63f-6c05-4357-9d98-2926a94064ff,negative]
|
||||
# tempest.api.volume.admin.test_volume_snapshot_quotas_negative.VolumeSnapshotQuotasNegativeV2TestJSON.test_quota_volume_gigabytes_snapshots[id-c99a1ca9-6cdf-498d-9fdf-25832babef27,negative]
|
||||
# tempest.api.volume.admin.test_volume_snapshot_quotas_negative.VolumeSnapshotQuotasNegativeV2TestJSON.test_quota_volume_snapshots[id-02bbf63f-6c05-4357-9d98-2926a94064ff,negative]
|
||||
# tempest.api.volume.admin.test_volume_types.VolumeTypesV1Test.test_volume_crud_with_volume_type_and_extra_specs[id-c03cc62c-f4e9-4623-91ec-64ce2f9c1260]
|
||||
# tempest.api.volume.admin.test_volume_types.VolumeTypesV1Test.test_volume_type_create_get_delete[id-4e955c3b-49db-4515-9590-0c99f8e471ad]
|
||||
# tempest.api.volume.admin.test_volume_types.VolumeTypesV1Test.test_volume_type_encryption_create_get_delete[id-7830abd0-ff99-4793-a265-405684a54d46]
|
||||
# tempest.api.volume.admin.test_volume_types.VolumeTypesV1Test.test_volume_type_list[id-9d9b28e3-1b2e-4483-a2cc-24aa0ea1de54]
|
||||
# tempest.api.volume.admin.test_volume_types.VolumeTypesV2Test.test_volume_crud_with_volume_type_and_extra_specs[id-c03cc62c-f4e9-4623-91ec-64ce2f9c1260]
|
||||
# tempest.api.volume.admin.test_volume_types.VolumeTypesV2Test.test_volume_type_create_get_delete[id-4e955c3b-49db-4515-9590-0c99f8e471ad]
|
||||
# tempest.api.volume.admin.test_volume_types.VolumeTypesV2Test.test_volume_type_encryption_create_get_delete[id-7830abd0-ff99-4793-a265-405684a54d46]
|
||||
# tempest.api.volume.admin.test_volume_types.VolumeTypesV2Test.test_volume_type_list[id-9d9b28e3-1b2e-4483-a2cc-24aa0ea1de54]
|
||||
# tempest.api.volume.admin.test_volume_types_extra_specs.VolumeTypesExtraSpecsV1Test.test_volume_type_extra_spec_create_get_delete[id-d4772798-601f-408a-b2a5-29e8a59d1220]
|
||||
# tempest.api.volume.admin.test_volume_types_extra_specs.VolumeTypesExtraSpecsV1Test.test_volume_type_extra_specs_list[id-b42923e9-0452-4945-be5b-d362ae533e60]
|
||||
# tempest.api.volume.admin.test_volume_types_extra_specs.VolumeTypesExtraSpecsV1Test.test_volume_type_extra_specs_update[id-0806db36-b4a0-47a1-b6f3-c2e7f194d017]
|
||||
# tempest.api.volume.admin.test_volume_types_extra_specs.VolumeTypesExtraSpecsV2Test.test_volume_type_extra_spec_create_get_delete[id-d4772798-601f-408a-b2a5-29e8a59d1220]
|
||||
# tempest.api.volume.admin.test_volume_types_extra_specs.VolumeTypesExtraSpecsV2Test.test_volume_type_extra_specs_list[id-b42923e9-0452-4945-be5b-d362ae533e60]
|
||||
# tempest.api.volume.admin.test_volume_types_extra_specs.VolumeTypesExtraSpecsV2Test.test_volume_type_extra_specs_update[id-0806db36-b4a0-47a1-b6f3-c2e7f194d017]
|
||||
# tempest.api.volume.admin.test_volume_types_extra_specs_negative.ExtraSpecsNegativeV1Test.test_create_invalid_body[id-bc772c71-1ed4-4716-b945-8b5ed0f15e87]
|
||||
# tempest.api.volume.admin.test_volume_types_extra_specs_negative.ExtraSpecsNegativeV1Test.test_create_none_body[id-c821bdc8-43a4-4bf4-86c8-82f3858d5f7d]
|
||||
# tempest.api.volume.admin.test_volume_types_extra_specs_negative.ExtraSpecsNegativeV1Test.test_create_nonexistent_type_id[id-49d5472c-a53d-4eab-a4d3-450c4db1c545]
|
||||
# tempest.api.volume.admin.test_volume_types_extra_specs_negative.ExtraSpecsNegativeV1Test.test_delete_nonexistent_volume_type_id[id-031cda8b-7d23-4246-8bf6-bbe73fd67074]
|
||||
# tempest.api.volume.admin.test_volume_types_extra_specs_negative.ExtraSpecsNegativeV1Test.test_get_nonexistent_extra_spec_id[id-c881797d-12ff-4f1a-b09d-9f6212159753]
|
||||
# tempest.api.volume.admin.test_volume_types_extra_specs_negative.ExtraSpecsNegativeV1Test.test_get_nonexistent_volume_type_id[id-9f402cbd-1838-4eb4-9554-126a6b1908c9]
|
||||
# tempest.api.volume.admin.test_volume_types_extra_specs_negative.ExtraSpecsNegativeV1Test.test_list_nonexistent_volume_type_id[id-dee5cf0c-cdd6-4353-b70c-e847050d71fb]
|
||||
# tempest.api.volume.admin.test_volume_types_extra_specs_negative.ExtraSpecsNegativeV1Test.test_update_multiple_extra_spec[id-a77dfda2-9100-448e-9076-ed1711f4bdfc]
|
||||
# tempest.api.volume.admin.test_volume_types_extra_specs_negative.ExtraSpecsNegativeV1Test.test_update_no_body[id-08961d20-5cbb-4910-ac0f-89ad6dbb2da1]
|
||||
# tempest.api.volume.admin.test_volume_types_extra_specs_negative.ExtraSpecsNegativeV1Test.test_update_none_extra_spec_id[id-9bf7a657-b011-4aec-866d-81c496fbe5c8]
|
||||
# tempest.api.volume.admin.test_volume_types_extra_specs_negative.ExtraSpecsNegativeV1Test.test_update_nonexistent_extra_spec_id[id-25e5a0ee-89b3-4c53-8310-236f76c75365]
|
||||
# tempest.api.volume.admin.test_volume_types_extra_specs_negative.ExtraSpecsNegativeV2Test.test_create_invalid_body[id-bc772c71-1ed4-4716-b945-8b5ed0f15e87]
|
||||
# tempest.api.volume.admin.test_volume_types_extra_specs_negative.ExtraSpecsNegativeV2Test.test_create_none_body[id-c821bdc8-43a4-4bf4-86c8-82f3858d5f7d]
|
||||
# tempest.api.volume.admin.test_volume_types_extra_specs_negative.ExtraSpecsNegativeV2Test.test_create_nonexistent_type_id[id-49d5472c-a53d-4eab-a4d3-450c4db1c545]
|
||||
# tempest.api.volume.admin.test_volume_types_extra_specs_negative.ExtraSpecsNegativeV2Test.test_delete_nonexistent_volume_type_id[id-031cda8b-7d23-4246-8bf6-bbe73fd67074]
|
||||
# tempest.api.volume.admin.test_volume_types_extra_specs_negative.ExtraSpecsNegativeV2Test.test_get_nonexistent_extra_spec_id[id-c881797d-12ff-4f1a-b09d-9f6212159753]
|
||||
# tempest.api.volume.admin.test_volume_types_extra_specs_negative.ExtraSpecsNegativeV2Test.test_get_nonexistent_volume_type_id[id-9f402cbd-1838-4eb4-9554-126a6b1908c9]
|
||||
# tempest.api.volume.admin.test_volume_types_extra_specs_negative.ExtraSpecsNegativeV2Test.test_list_nonexistent_volume_type_id[id-dee5cf0c-cdd6-4353-b70c-e847050d71fb]
|
||||
# tempest.api.volume.admin.test_volume_types_extra_specs_negative.ExtraSpecsNegativeV2Test.test_update_multiple_extra_spec[id-a77dfda2-9100-448e-9076-ed1711f4bdfc]
|
||||
# tempest.api.volume.admin.test_volume_types_extra_specs_negative.ExtraSpecsNegativeV2Test.test_update_no_body[id-08961d20-5cbb-4910-ac0f-89ad6dbb2da1]
|
||||
# tempest.api.volume.admin.test_volume_types_extra_specs_negative.ExtraSpecsNegativeV2Test.test_update_none_extra_spec_id[id-9bf7a657-b011-4aec-866d-81c496fbe5c8]
|
||||
# tempest.api.volume.admin.test_volume_types_extra_specs_negative.ExtraSpecsNegativeV2Test.test_update_nonexistent_extra_spec_id[id-25e5a0ee-89b3-4c53-8310-236f76c75365]
|
||||
# tempest.api.volume.admin.test_volume_types_negative.VolumeTypesNegativeV1Test.test_create_with_empty_name[id-878b4e57-faa2-4659-b0d1-ce740a06ae81]
|
||||
# tempest.api.volume.admin.test_volume_types_negative.VolumeTypesNegativeV1Test.test_create_with_nonexistent_volume_type[id-b48c98f2-e662-4885-9b71-032256906314]
|
||||
# tempest.api.volume.admin.test_volume_types_negative.VolumeTypesNegativeV1Test.test_delete_nonexistent_type_id[id-6b3926d2-7d73-4896-bc3d-e42dfd11a9f6]
|
||||
# tempest.api.volume.admin.test_volume_types_negative.VolumeTypesNegativeV1Test.test_get_nonexistent_type_id[id-994610d6-0476-4018-a644-a2602ef5d4aa]
|
||||
# tempest.api.volume.admin.test_volume_types_negative.VolumeTypesNegativeV2Test.test_create_with_empty_name[id-878b4e57-faa2-4659-b0d1-ce740a06ae81]
|
||||
# tempest.api.volume.admin.test_volume_types_negative.VolumeTypesNegativeV2Test.test_create_with_nonexistent_volume_type[id-b48c98f2-e662-4885-9b71-032256906314]
|
||||
# tempest.api.volume.admin.test_volume_types_negative.VolumeTypesNegativeV2Test.test_delete_nonexistent_type_id[id-6b3926d2-7d73-4896-bc3d-e42dfd11a9f6]
|
||||
# tempest.api.volume.admin.test_volume_types_negative.VolumeTypesNegativeV2Test.test_get_nonexistent_type_id[id-994610d6-0476-4018-a644-a2602ef5d4aa]
|
||||
# tempest.api.volume.admin.test_volumes_actions.VolumesActionsV1Test.test_volume_force_delete_when_volume_is_attaching[id-db8d607a-aa2e-4beb-b51d-d4005c232011]
|
||||
# tempest.api.volume.admin.test_volumes_actions.VolumesActionsV1Test.test_volume_force_delete_when_volume_is_creating[id-21737d5a-92f2-46d7-b009-a0cc0ee7a570]
|
||||
# tempest.api.volume.admin.test_volumes_actions.VolumesActionsV1Test.test_volume_force_delete_when_volume_is_error[id-3e33a8a8-afd4-4d64-a86b-c27a185c5a4a]
|
||||
# tempest.api.volume.admin.test_volumes_actions.VolumesActionsV1Test.test_volume_reset_status[id-d063f96e-a2e0-4f34-8b8a-395c42de1845]
|
||||
# tempest.api.volume.admin.test_volumes_actions.VolumesActionsV2Test.test_volume_force_delete_when_volume_is_attaching[id-db8d607a-aa2e-4beb-b51d-d4005c232011]
|
||||
# tempest.api.volume.admin.test_volumes_actions.VolumesActionsV2Test.test_volume_force_delete_when_volume_is_creating[id-21737d5a-92f2-46d7-b009-a0cc0ee7a570]
|
||||
# tempest.api.volume.admin.test_volumes_actions.VolumesActionsV2Test.test_volume_force_delete_when_volume_is_error[id-3e33a8a8-afd4-4d64-a86b-c27a185c5a4a]
|
||||
# tempest.api.volume.admin.test_volumes_actions.VolumesActionsV2Test.test_volume_reset_status[id-d063f96e-a2e0-4f34-8b8a-395c42de1845]
|
||||
# tempest.api.volume.admin.test_volumes_backup.VolumesBackupsV1Test.test_volume_backup_create_get_detailed_list_restore_delete[id-a66eb488-8ee1-47d4-8e9f-575a095728c6]
|
||||
# tempest.api.volume.admin.test_volumes_backup.VolumesBackupsV1Test.test_volume_backup_export_import[id-a99c54a1-dd80-4724-8a13-13bf58d4068d]
|
||||
# tempest.api.volume.admin.test_volumes_backup.VolumesBackupsV2Test.test_volume_backup_create_get_detailed_list_restore_delete[id-a66eb488-8ee1-47d4-8e9f-575a095728c6]
|
||||
# tempest.api.volume.admin.test_volumes_backup.VolumesBackupsV2Test.test_volume_backup_export_import[id-a99c54a1-dd80-4724-8a13-13bf58d4068d]
|
||||
# tempest.api.volume.test_availability_zone.AvailabilityZoneV1TestJSON.test_get_availability_zone_list[id-01f1ae88-eba9-4c6b-a011-6f7ace06b725]
|
||||
# tempest.api.volume.test_availability_zone.AvailabilityZoneV2TestJSON.test_get_availability_zone_list[id-01f1ae88-eba9-4c6b-a011-6f7ace06b725]
|
||||
# tempest.api.volume.test_extensions.ExtensionsV1TestJSON.test_list_extensions[id-94607eb0-43a5-47ca-82aa-736b41bd2e2c]
|
||||
# tempest.api.volume.test_extensions.ExtensionsV2TestJSON.test_list_extensions[id-94607eb0-43a5-47ca-82aa-736b41bd2e2c]
|
||||
# tempest.api.volume.test_qos.QosSpecsV1TestJSON.test_associate_disassociate_qos[id-1dd93c76-6420-485d-a771-874044c416ac]
|
||||
# tempest.api.volume.test_qos.QosSpecsV1TestJSON.test_create_delete_qos_with_back_end_consumer[id-b115cded-8f58-4ee4-aab5-9192cfada08f]
|
||||
# tempest.api.volume.test_qos.QosSpecsV1TestJSON.test_create_delete_qos_with_both_consumer[id-f88d65eb-ea0d-487d-af8d-71f4011575a4]
|
||||
# tempest.api.volume.test_qos.QosSpecsV1TestJSON.test_create_delete_qos_with_front_end_consumer[id-7e15f883-4bef-49a9-95eb-f94209a1ced1]
|
||||
# tempest.api.volume.test_qos.QosSpecsV1TestJSON.test_get_qos[id-7aa214cc-ac1a-4397-931f-3bb2e83bb0fd]
|
||||
# tempest.api.volume.test_qos.QosSpecsV1TestJSON.test_list_qos[id-75e04226-bcf7-4595-a34b-fdf0736f38fc]
|
||||
# tempest.api.volume.test_qos.QosSpecsV1TestJSON.test_set_unset_qos_key[id-ed00fd85-4494-45f2-8ceb-9e2048919aed]
|
||||
# tempest.api.volume.test_qos.QosSpecsV2TestJSON.test_associate_disassociate_qos[id-1dd93c76-6420-485d-a771-874044c416ac]
|
||||
# tempest.api.volume.test_qos.QosSpecsV2TestJSON.test_create_delete_qos_with_back_end_consumer[id-b115cded-8f58-4ee4-aab5-9192cfada08f]
|
||||
# tempest.api.volume.test_qos.QosSpecsV2TestJSON.test_create_delete_qos_with_both_consumer[id-f88d65eb-ea0d-487d-af8d-71f4011575a4]
|
||||
# tempest.api.volume.test_qos.QosSpecsV2TestJSON.test_create_delete_qos_with_front_end_consumer[id-7e15f883-4bef-49a9-95eb-f94209a1ced1]
|
||||
# tempest.api.volume.test_qos.QosSpecsV2TestJSON.test_get_qos[id-7aa214cc-ac1a-4397-931f-3bb2e83bb0fd]
|
||||
# tempest.api.volume.test_qos.QosSpecsV2TestJSON.test_list_qos[id-75e04226-bcf7-4595-a34b-fdf0736f38fc]
|
||||
# tempest.api.volume.test_qos.QosSpecsV2TestJSON.test_set_unset_qos_key[id-ed00fd85-4494-45f2-8ceb-9e2048919aed]
|
||||
# tempest.api.volume.test_snapshot_metadata.SnapshotV1MetadataTestJSON.test_create_get_delete_snapshot_metadata[id-a2f20f99-e363-4584-be97-bc33afb1a56c]
|
||||
# tempest.api.volume.test_snapshot_metadata.SnapshotV1MetadataTestJSON.test_update_snapshot_metadata[id-bd2363bc-de92-48a4-bc98-28943c6e4be1]
|
||||
# tempest.api.volume.test_snapshot_metadata.SnapshotV1MetadataTestJSON.test_update_snapshot_metadata_item[id-e8ff85c5-8f97-477f-806a-3ac364a949ed]
|
||||
# tempest.api.volume.test_snapshot_metadata.SnapshotV2MetadataTestJSON.test_create_get_delete_snapshot_metadata[id-a2f20f99-e363-4584-be97-bc33afb1a56c]
|
||||
# tempest.api.volume.test_snapshot_metadata.SnapshotV2MetadataTestJSON.test_update_snapshot_metadata[id-bd2363bc-de92-48a4-bc98-28943c6e4be1]
|
||||
# tempest.api.volume.test_snapshot_metadata.SnapshotV2MetadataTestJSON.test_update_snapshot_metadata_item[id-e8ff85c5-8f97-477f-806a-3ac364a949ed]
|
||||
# tempest.api.volume.test_volume_metadata.VolumesV1MetadataTest.test_create_get_delete_volume_metadata[id-6f5b125b-f664-44bf-910f-751591fe5769]
|
||||
# tempest.api.volume.test_volume_metadata.VolumesV1MetadataTest.test_update_volume_metadata[id-774d2918-9beb-4f30-b3d1-2a4e8179ec0a]
|
||||
# tempest.api.volume.test_volume_metadata.VolumesV1MetadataTest.test_update_volume_metadata_item[id-862261c5-8df4-475a-8c21-946e50e36a20]
|
||||
# tempest.api.volume.test_volume_metadata.VolumesV2MetadataTest.test_create_get_delete_volume_metadata[id-6f5b125b-f664-44bf-910f-751591fe5769]
|
||||
# tempest.api.volume.test_volume_metadata.VolumesV2MetadataTest.test_update_volume_metadata[id-774d2918-9beb-4f30-b3d1-2a4e8179ec0a]
|
||||
# tempest.api.volume.test_volume_metadata.VolumesV2MetadataTest.test_update_volume_metadata_item[id-862261c5-8df4-475a-8c21-946e50e36a20]
|
||||
# tempest.api.volume.test_volume_transfers.VolumesV1TransfersTest.test_create_get_list_accept_volume_transfer[id-4d75b645-a478-48b1-97c8-503f64242f1a]
|
||||
# tempest.api.volume.test_volume_transfers.VolumesV1TransfersTest.test_create_list_delete_volume_transfer[id-ab526943-b725-4c07-b875-8e8ef87a2c30]
|
||||
# tempest.api.volume.test_volume_transfers.VolumesV2TransfersTest.test_create_get_list_accept_volume_transfer[id-4d75b645-a478-48b1-97c8-503f64242f1a]
|
||||
# tempest.api.volume.test_volume_transfers.VolumesV2TransfersTest.test_create_list_delete_volume_transfer[id-ab526943-b725-4c07-b875-8e8ef87a2c30]
|
||||
# tempest.api.volume.test_volumes_actions.VolumesV1ActionsTest.test_attach_detach_volume_to_instance[compute,id-fff42874-7db5-4487-a8e1-ddda5fb5288d,smoke,stress]
|
||||
# tempest.api.volume.test_volumes_actions.VolumesV1ActionsTest.test_get_volume_attachment[compute,id-9516a2c8-9135-488c-8dd6-5677a7e5f371,stress]
|
||||
# tempest.api.volume.test_volumes_actions.VolumesV1ActionsTest.test_reserve_unreserve_volume[id-92c4ef64-51b2-40c0-9f7e-4749fbaaba33]
|
||||
# tempest.api.volume.test_volumes_actions.VolumesV1ActionsTest.test_volume_bootable[id-63e21b4c-0a0c-41f6-bfc3-7c2816815599]
|
||||
# tempest.api.volume.test_volumes_actions.VolumesV1ActionsTest.test_volume_readonly_update[id-fff74e1e-5bd3-4b33-9ea9-24c103bc3f59]
|
||||
# tempest.api.volume.test_volumes_actions.VolumesV1ActionsTest.test_volume_upload[id-d8f1ca95-3d5b-44a3-b8ca-909691c9532d,image]
|
||||
# tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_attach_detach_volume_to_instance[compute,id-fff42874-7db5-4487-a8e1-ddda5fb5288d,smoke,stress]
|
||||
# tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_get_volume_attachment[compute,id-9516a2c8-9135-488c-8dd6-5677a7e5f371,stress]
|
||||
# tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_reserve_unreserve_volume[id-92c4ef64-51b2-40c0-9f7e-4749fbaaba33]
|
||||
# tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_volume_bootable[id-63e21b4c-0a0c-41f6-bfc3-7c2816815599]
|
||||
# tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_volume_readonly_update[id-fff74e1e-5bd3-4b33-9ea9-24c103bc3f59]
|
||||
# tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_volume_upload[id-d8f1ca95-3d5b-44a3-b8ca-909691c9532d,image]
|
||||
# tempest.api.volume.test_volumes_extend.VolumesV1ExtendTest.test_volume_extend[id-9a36df71-a257-43a5-9555-dc7c88e66e0e]
|
||||
# tempest.api.volume.test_volumes_extend.VolumesV2ExtendTest.test_volume_extend[id-9a36df71-a257-43a5-9555-dc7c88e66e0e]
|
||||
# **SKIP** tempest.api.volume.test_volumes_get.VolumesV1GetTest.test_volume_create_get_update_delete[id-27fb0e9f-fb64-41dd-8bdb-1ffa762f0d51,smoke]
|
||||
# **SKIP** tempest.api.volume.test_volumes_get.VolumesV1GetTest.test_volume_create_get_update_delete_as_clone[id-3f591b4a-7dc6-444c-bd51-77469506b3a1]
|
||||
# **SKIP** tempest.api.volume.test_volumes_get.VolumesV1GetTest.test_volume_create_get_update_delete_from_image[id-54a01030-c7fc-447c-86ee-c1182beae638,image,smoke]
|
||||
# **DONE** tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete[id-27fb0e9f-fb64-41dd-8bdb-1ffa762f0d51,smoke]
|
||||
# **DONE** tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete_as_clone[id-3f591b4a-7dc6-444c-bd51-77469506b3a1]
|
||||
# **DONE** tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete_from_image[id-54a01030-c7fc-447c-86ee-c1182beae638,image,smoke]
|
||||
# **SKIP** tempest.api.volume.test_volumes_list.VolumesV1ListTestJSON.test_volume_list[id-0b6ddd39-b948-471f-8038-4787978747c4,smoke]
|
||||
# **SKIP** tempest.api.volume.test_volumes_list.VolumesV1ListTestJSON.test_volume_list_by_name[id-a28e8da4-0b56-472f-87a8-0f4d3f819c02]
|
||||
# **SKIP** tempest.api.volume.test_volumes_list.VolumesV1ListTestJSON.test_volume_list_details_by_name[id-2de3a6d4-12aa-403b-a8f2-fdeb42a89623]
|
||||
# **SKIP** tempest.api.volume.test_volumes_list.VolumesV1ListTestJSON.test_volume_list_param_display_name_and_status[id-777c87c1-2fc4-4883-8b8e-5c0b951d1ec8]
|
||||
# **SKIP** tempest.api.volume.test_volumes_list.VolumesV1ListTestJSON.test_volume_list_with_detail_param_display_name_and_status[id-856ab8ca-6009-4c37-b691-be1065528ad4]
|
||||
# **SKIP** tempest.api.volume.test_volumes_list.VolumesV1ListTestJSON.test_volume_list_with_detail_param_metadata[id-1ca92d3c-4a8e-4b43-93f5-e4c7fb3b291d]
|
||||
# **SKIP** tempest.api.volume.test_volumes_list.VolumesV1ListTestJSON.test_volume_list_with_details[id-adcbb5a7-5ad8-4b61-bd10-5380e111a877]
|
||||
# **SKIP** tempest.api.volume.test_volumes_list.VolumesV1ListTestJSON.test_volume_list_with_param_metadata[id-b5ebea1b-0603-40a0-bb41-15fcd0a53214]
|
||||
# **SKIP** tempest.api.volume.test_volumes_list.VolumesV1ListTestJSON.test_volumes_list_by_availability_zone[id-c0cfa863-3020-40d7-b587-e35f597d5d87]
|
||||
# **SKIP** tempest.api.volume.test_volumes_list.VolumesV1ListTestJSON.test_volumes_list_by_status[id-39654e13-734c-4dab-95ce-7613bf8407ce]
|
||||
# **SKIP** tempest.api.volume.test_volumes_list.VolumesV1ListTestJSON.test_volumes_list_details_by_availability_zone[id-e1b80d13-94f0-4ba2-a40e-386af29f8db1]
|
||||
# **SKIP** tempest.api.volume.test_volumes_list.VolumesV1ListTestJSON.test_volumes_list_details_by_status[id-2943f712-71ec-482a-bf49-d5ca06216b9f]
|
||||
# **DONE** tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list[id-0b6ddd39-b948-471f-8038-4787978747c4,smoke]
|
||||
# **DONE** tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_by_name[id-a28e8da4-0b56-472f-87a8-0f4d3f819c02]
|
||||
# **DONE** tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_details_by_name[id-2de3a6d4-12aa-403b-a8f2-fdeb42a89623]
|
||||
# **DONE** tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_param_display_name_and_status[id-777c87c1-2fc4-4883-8b8e-5c0b951d1ec8]
|
||||
# **DONE** tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_detail_param_display_name_and_status[id-856ab8ca-6009-4c37-b691-be1065528ad4]
|
||||
# **DONE** tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_detail_param_metadata[id-1ca92d3c-4a8e-4b43-93f5-e4c7fb3b291d]
|
||||
# **DONE** tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_details[id-adcbb5a7-5ad8-4b61-bd10-5380e111a877]
|
||||
# **DONE** tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_param_metadata[id-b5ebea1b-0603-40a0-bb41-15fcd0a53214]
|
||||
# **DONE** tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_by_availability_zone[id-c0cfa863-3020-40d7-b587-e35f597d5d87]
|
||||
# **DONE** tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_by_status[id-39654e13-734c-4dab-95ce-7613bf8407ce]
|
||||
# **DONE** tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_details_by_availability_zone[id-e1b80d13-94f0-4ba2-a40e-386af29f8db1]
|
||||
# **DONE** tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_details_by_status[id-2943f712-71ec-482a-bf49-d5ca06216b9f]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV1NegativeTest.test_attach_volumes_with_nonexistent_volume_id[compute,id-f5e56b0a-5d02-43c1-a2a7-c9b792c2e3f6,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV1NegativeTest.test_create_volume_with_invalid_size[id-1ed83a8a-682d-4dfb-a30e-ee63ffd6c049,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV1NegativeTest.test_create_volume_with_nonexistent_snapshot_id[id-0c36f6ae-4604-4017-b0a9-34fdc63096f9,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV1NegativeTest.test_create_volume_with_nonexistent_source_volid[id-47c73e08-4be8-45bb-bfdf-0c4e79b88344,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV1NegativeTest.test_create_volume_with_nonexistent_volume_type[id-10254ed8-3849-454e-862e-3ab8e6aa01d2,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV1NegativeTest.test_create_volume_with_out_passing_size[id-9387686f-334f-4d31-a439-33494b9e2683,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV1NegativeTest.test_create_volume_with_size_negative[id-8b472729-9eba-446e-a83b-916bdb34bef7,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV1NegativeTest.test_create_volume_with_size_zero[id-41331caa-eaf4-4001-869d-bc18c1869360,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV1NegativeTest.test_delete_invalid_volume_id[id-1f035827-7c32-4019-9240-b4ec2dbd9dfd,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV1NegativeTest.test_delete_volume_without_passing_volume_id[id-441a1550-5d44-4b30-af0f-a6d402f52026,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV1NegativeTest.test_detach_volumes_with_invalid_volume_id[id-9f9c24e4-011d-46b5-b992-952140ce237a,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV1NegativeTest.test_get_invalid_volume_id[id-30799cfd-7ee4-446c-b66c-45b383ed211b,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV1NegativeTest.test_get_volume_without_passing_volume_id[id-c6c3db06-29ad-4e91-beb0-2ab195fe49e3,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV1NegativeTest.test_list_volumes_detail_with_invalid_status[id-ba94b27b-be3f-496c-a00e-0283b373fa75,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV1NegativeTest.test_list_volumes_detail_with_nonexistent_name[id-9ca17820-a0e7-4cbd-a7fa-f4468735e359,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV1NegativeTest.test_list_volumes_with_invalid_status[id-143b279b-7522-466b-81be-34a87d564a7c,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV1NegativeTest.test_list_volumes_with_nonexistent_name[id-0f4aa809-8c7b-418f-8fb3-84c7a5dfc52f,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV1NegativeTest.test_reserve_volume_with_negative_volume_status[id-449c4ed2-ecdd-47bb-98dc-072aeccf158c,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV1NegativeTest.test_reserve_volume_with_nonexistent_volume_id[id-ac6084c0-0546-45f9-b284-38a367e0e0e2,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV1NegativeTest.test_unreserve_volume_with_nonexistent_volume_id[id-eb467654-3dc1-4a72-9b46-47c29d22654c,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV1NegativeTest.test_update_volume_with_empty_volume_id[id-72aeca85-57a5-4c1f-9057-f320f9ea575b,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV1NegativeTest.test_update_volume_with_invalid_volume_id[id-e66e40d6-65e6-4e75-bdc7-636792fa152d,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV1NegativeTest.test_update_volume_with_nonexistent_volume_id[id-0186422c-999a-480e-a026-6a665744c30c,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV1NegativeTest.test_volume_delete_nonexistent_volume_id[id-555efa6e-efcd-44ef-8a3b-4a7ca4837a29,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV1NegativeTest.test_volume_extend_with_None_size[id-355218f1-8991-400a-a6bb-971239287d92,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV1NegativeTest.test_volume_extend_with_non_number_size[id-5d0b480d-e833-439f-8a5a-96ad2ed6f22f,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV1NegativeTest.test_volume_extend_with_nonexistent_volume_id[id-8f05a943-013c-4063-ac71-7baf561e82eb,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV1NegativeTest.test_volume_extend_with_size_smaller_than_original_size[id-e0c75c74-ee34-41a9-9288-2a2051452854,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV1NegativeTest.test_volume_extend_without_passing_volume_id[id-aff8ba64-6d6f-4f2e-bc33-41a08ee9f115,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV1NegativeTest.test_volume_get_nonexistent_volume_id[id-f131c586-9448-44a4-a8b0-54ca838aa43e,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_attach_volumes_with_nonexistent_volume_id[compute,id-f5e56b0a-5d02-43c1-a2a7-c9b792c2e3f6,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_invalid_size[id-1ed83a8a-682d-4dfb-a30e-ee63ffd6c049,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_nonexistent_snapshot_id[id-0c36f6ae-4604-4017-b0a9-34fdc63096f9,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_nonexistent_source_volid[id-47c73e08-4be8-45bb-bfdf-0c4e79b88344,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_nonexistent_volume_type[id-10254ed8-3849-454e-862e-3ab8e6aa01d2,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_out_passing_size[id-9387686f-334f-4d31-a439-33494b9e2683,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_size_negative[id-8b472729-9eba-446e-a83b-916bdb34bef7,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_size_zero[id-41331caa-eaf4-4001-869d-bc18c1869360,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_delete_invalid_volume_id[id-1f035827-7c32-4019-9240-b4ec2dbd9dfd,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_delete_volume_without_passing_volume_id[id-441a1550-5d44-4b30-af0f-a6d402f52026,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_detach_volumes_with_invalid_volume_id[id-9f9c24e4-011d-46b5-b992-952140ce237a,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_get_invalid_volume_id[id-30799cfd-7ee4-446c-b66c-45b383ed211b,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_get_volume_without_passing_volume_id[id-c6c3db06-29ad-4e91-beb0-2ab195fe49e3,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_detail_with_invalid_status[id-ba94b27b-be3f-496c-a00e-0283b373fa75,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_detail_with_nonexistent_name[id-9ca17820-a0e7-4cbd-a7fa-f4468735e359,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_with_invalid_status[id-143b279b-7522-466b-81be-34a87d564a7c,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_with_nonexistent_name[id-0f4aa809-8c7b-418f-8fb3-84c7a5dfc52f,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_reserve_volume_with_negative_volume_status[id-449c4ed2-ecdd-47bb-98dc-072aeccf158c,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_reserve_volume_with_nonexistent_volume_id[id-ac6084c0-0546-45f9-b284-38a367e0e0e2,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_unreserve_volume_with_nonexistent_volume_id[id-eb467654-3dc1-4a72-9b46-47c29d22654c,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_update_volume_with_empty_volume_id[id-72aeca85-57a5-4c1f-9057-f320f9ea575b,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_update_volume_with_invalid_volume_id[id-e66e40d6-65e6-4e75-bdc7-636792fa152d,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_update_volume_with_nonexistent_volume_id[id-0186422c-999a-480e-a026-6a665744c30c,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_volume_delete_nonexistent_volume_id[id-555efa6e-efcd-44ef-8a3b-4a7ca4837a29,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_volume_extend_with_None_size[id-355218f1-8991-400a-a6bb-971239287d92,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_volume_extend_with_non_number_size[id-5d0b480d-e833-439f-8a5a-96ad2ed6f22f,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_volume_extend_with_nonexistent_volume_id[id-8f05a943-013c-4063-ac71-7baf561e82eb,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_volume_extend_with_size_smaller_than_original_size[id-e0c75c74-ee34-41a9-9288-2a2051452854,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_volume_extend_without_passing_volume_id[id-aff8ba64-6d6f-4f2e-bc33-41a08ee9f115,negative]
|
||||
# tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_volume_get_nonexistent_volume_id[id-f131c586-9448-44a4-a8b0-54ca838aa43e,negative]
|
||||
# tempest.api.volume.test_volumes_snapshots.VolumesV1SnapshotTestJSON.test_snapshot_create_get_list_update_delete[id-2a8abbe4-d871-46db-b049-c41f5af8216e]
|
||||
# tempest.api.volume.test_volumes_snapshots.VolumesV1SnapshotTestJSON.test_snapshot_create_with_volume_in_use[compute,id-b467b54c-07a4-446d-a1cf-651dedcc3ff1]
|
||||
# tempest.api.volume.test_volumes_snapshots.VolumesV1SnapshotTestJSON.test_snapshot_list_param_limit[id-db4d8e0a-7a2e-41cc-a712-961f6844e896]
|
||||
# tempest.api.volume.test_volumes_snapshots.VolumesV1SnapshotTestJSON.test_snapshot_list_param_limit_equals_infinite[id-a1427f61-420e-48a5-b6e3-0b394fa95400]
|
||||
# tempest.api.volume.test_volumes_snapshots.VolumesV1SnapshotTestJSON.test_snapshot_list_param_limit_equals_zero[id-e3b44b7f-ae87-45b5-8a8c-66110eb24d0a]
|
||||
# tempest.api.volume.test_volumes_snapshots.VolumesV1SnapshotTestJSON.test_snapshots_list_details_with_params[id-220a1022-1fcd-4a74-a7bd-6b859156cda2]
|
||||
# tempest.api.volume.test_volumes_snapshots.VolumesV1SnapshotTestJSON.test_snapshots_list_with_params[id-59f41f43-aebf-48a9-ab5d-d76340fab32b]
|
||||
# tempest.api.volume.test_volumes_snapshots.VolumesV1SnapshotTestJSON.test_volume_from_snapshot[id-677863d1-3142-456d-b6ac-9924f667a7f4]
|
||||
# tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_snapshot_create_get_list_update_delete[id-2a8abbe4-d871-46db-b049-c41f5af8216e]
|
||||
# tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_snapshot_create_with_volume_in_use[compute,id-b467b54c-07a4-446d-a1cf-651dedcc3ff1]
|
||||
# tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_snapshot_list_param_limit[id-db4d8e0a-7a2e-41cc-a712-961f6844e896]
|
||||
# tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_snapshot_list_param_limit_equals_infinite[id-a1427f61-420e-48a5-b6e3-0b394fa95400]
|
||||
# tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_snapshot_list_param_limit_equals_zero[id-e3b44b7f-ae87-45b5-8a8c-66110eb24d0a]
|
||||
# tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_snapshots_list_details_with_params[id-220a1022-1fcd-4a74-a7bd-6b859156cda2]
|
||||
# tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_snapshots_list_with_params[id-59f41f43-aebf-48a9-ab5d-d76340fab32b]
|
||||
# tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_volume_from_snapshot[id-677863d1-3142-456d-b6ac-9924f667a7f4]
|
||||
# tempest.api.volume.test_volumes_snapshots_negative.VolumesV1SnapshotNegativeTestJSON.test_create_snapshot_with_nonexistent_volume_id[id-e3e466af-70ab-4f4b-a967-ab04e3532ea7,negative]
|
||||
# tempest.api.volume.test_volumes_snapshots_negative.VolumesV1SnapshotNegativeTestJSON.test_create_snapshot_without_passing_volume_id[id-bb9da53e-d335-4309-9c15-7e76fd5e4d6d,negative]
|
||||
# tempest.api.volume.test_volumes_snapshots_negative.VolumesV2SnapshotNegativeTestJSON.test_create_snapshot_with_nonexistent_volume_id[id-e3e466af-70ab-4f4b-a967-ab04e3532ea7,negative]
|
||||
# tempest.api.volume.test_volumes_snapshots_negative.VolumesV2SnapshotNegativeTestJSON.test_create_snapshot_without_passing_volume_id[id-bb9da53e-d335-4309-9c15-7e76fd5e4d6d,negative]
|
||||
# tempest.api.volume.v2.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_details_pagination[id-e9138a2c-f67b-4796-8efa-635c196d01de]
|
||||
# tempest.api.volume.v2.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_details_with_multiple_params[id-2a7064eb-b9c3-429b-b888-33928fc5edd3]
|
||||
# tempest.api.volume.v2.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_pagination[id-af55e775-8e4b-4feb-8719-215c43b0238c]
|
||||
# tempest.api.volume.v2.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_detail_param_marker[id-46eff077-100b-427f-914e-3db2abcdb7e2]
|
@ -1,172 +0,0 @@
|
||||
# Copyright (c) 2015 Huawei Technologies Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pecan
|
||||
from pecan.configuration import set_config
|
||||
from pecan.testing import load_test_app
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_config import fixture as fixture_config
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from tricircle.cinder_apigw import app
|
||||
from tricircle.tests import base
|
||||
|
||||
|
||||
OPT_GROUP_NAME = 'keystone_authtoken'
|
||||
cfg.CONF.import_group(OPT_GROUP_NAME, "keystonemiddleware.auth_token")
|
||||
|
||||
|
||||
class Cinder_API_GW_FunctionalTest(base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(Cinder_API_GW_FunctionalTest, self).setUp()
|
||||
|
||||
self.addCleanup(set_config, {}, overwrite=True)
|
||||
|
||||
cfg.CONF.register_opts(app.common_opts)
|
||||
|
||||
self.CONF = self.useFixture(fixture_config.Config()).conf
|
||||
|
||||
self.CONF.set_override('auth_strategy', 'noauth')
|
||||
|
||||
self.app = self._make_app()
|
||||
|
||||
def _make_app(self, enable_acl=False):
|
||||
self.config = {
|
||||
'app': {
|
||||
'root':
|
||||
'tricircle.cinder_apigw.controllers.root.RootController',
|
||||
'modules': ['tricircle.cinder_apigw'],
|
||||
'enable_acl': enable_acl,
|
||||
'errors': {
|
||||
400: '/error',
|
||||
'__force_dict__': True
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
return load_test_app(self.config)
|
||||
|
||||
def tearDown(self):
|
||||
super(Cinder_API_GW_FunctionalTest, self).tearDown()
|
||||
cfg.CONF.unregister_opts(app.common_opts)
|
||||
pecan.set_config({}, overwrite=True)
|
||||
|
||||
|
||||
class TestRootController(Cinder_API_GW_FunctionalTest):
|
||||
"""Test version listing on root URI."""
|
||||
|
||||
def test_get(self):
|
||||
response = self.app.get('/')
|
||||
self.assertEqual(response.status_int, 200)
|
||||
json_body = jsonutils.loads(response.body)
|
||||
versions = json_body.get('versions')
|
||||
self.assertEqual(1, len(versions))
|
||||
self.assertEqual(versions[0]["id"], "v2.0")
|
||||
|
||||
def _test_method_returns_405(self, method):
|
||||
api_method = getattr(self.app, method)
|
||||
response = api_method('/', expect_errors=True)
|
||||
self.assertEqual(response.status_int, 405)
|
||||
|
||||
def test_post(self):
|
||||
self._test_method_returns_405('post')
|
||||
|
||||
def test_put(self):
|
||||
self._test_method_returns_405('put')
|
||||
|
||||
def test_patch(self):
|
||||
self._test_method_returns_405('patch')
|
||||
|
||||
def test_delete(self):
|
||||
self._test_method_returns_405('delete')
|
||||
|
||||
def test_head(self):
|
||||
self._test_method_returns_405('head')
|
||||
|
||||
|
||||
class TestV2Controller(Cinder_API_GW_FunctionalTest):
|
||||
|
||||
def test_get(self):
|
||||
response = self.app.get('/v2/')
|
||||
self.assertEqual(response.status_int, 200)
|
||||
json_body = jsonutils.loads(response.body)
|
||||
version = json_body.get('version')
|
||||
self.assertEqual(version["id"], "v2.0")
|
||||
|
||||
def _test_method_returns_405(self, method):
|
||||
api_method = getattr(self.app, method)
|
||||
response = api_method('/v2/', expect_errors=True)
|
||||
self.assertEqual(response.status_int, 405)
|
||||
|
||||
def test_post(self):
|
||||
self._test_method_returns_405('post')
|
||||
|
||||
def test_put(self):
|
||||
self._test_method_returns_405('put')
|
||||
|
||||
def test_patch(self):
|
||||
self._test_method_returns_405('patch')
|
||||
|
||||
def test_delete(self):
|
||||
self._test_method_returns_405('delete')
|
||||
|
||||
def test_head(self):
|
||||
self._test_method_returns_405('head')
|
||||
|
||||
|
||||
class TestErrors(Cinder_API_GW_FunctionalTest):
|
||||
|
||||
def test_404(self):
|
||||
response = self.app.get('/assert_called_once', expect_errors=True)
|
||||
self.assertEqual(response.status_int, 404)
|
||||
|
||||
def test_bad_method(self):
|
||||
response = self.app.patch('/v2/123',
|
||||
expect_errors=True)
|
||||
self.assertEqual(response.status_int, 404)
|
||||
|
||||
|
||||
class TestRequestID(Cinder_API_GW_FunctionalTest):
|
||||
|
||||
def test_request_id(self):
|
||||
response = self.app.get('/')
|
||||
self.assertIn('x-openstack-request-id', response.headers)
|
||||
self.assertTrue(
|
||||
response.headers['x-openstack-request-id'].startswith('req-'))
|
||||
id_part = response.headers['x-openstack-request-id'].split('req-')[1]
|
||||
self.assertTrue(uuidutils.is_uuid_like(id_part))
|
||||
|
||||
|
||||
class TestKeystoneAuth(Cinder_API_GW_FunctionalTest):
|
||||
|
||||
def setUp(self):
|
||||
super(Cinder_API_GW_FunctionalTest, self).setUp()
|
||||
|
||||
self.addCleanup(set_config, {}, overwrite=True)
|
||||
|
||||
cfg.CONF.register_opts(app.common_opts)
|
||||
|
||||
self.CONF = self.useFixture(fixture_config.Config()).conf
|
||||
|
||||
cfg.CONF.set_override('auth_strategy', 'keystone')
|
||||
|
||||
self.app = self._make_app()
|
||||
|
||||
def test_auth_enforced(self):
|
||||
response = self.app.get('/', expect_errors=True)
|
||||
self.assertEqual(response.status_int, 401)
|
@ -1,629 +0,0 @@
|
||||
# Copyright (c) 2015 Huawei Technologies Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from mock import patch
|
||||
import urlparse
|
||||
|
||||
import pecan
|
||||
from pecan.configuration import set_config
|
||||
from pecan.testing import load_test_app
|
||||
|
||||
from requests import Response
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_config import fixture as fixture_config
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from tricircle.cinder_apigw import app
|
||||
|
||||
from tricircle.common import constants as cons
|
||||
from tricircle.common import context
|
||||
from tricircle.common import httpclient as hclient
|
||||
|
||||
from tricircle.db import api as db_api
|
||||
from tricircle.db import core
|
||||
|
||||
from tricircle.tests import base
|
||||
|
||||
|
||||
OPT_GROUP_NAME = 'keystone_authtoken'
|
||||
cfg.CONF.import_group(OPT_GROUP_NAME, "keystonemiddleware.auth_token")
|
||||
|
||||
FAKE_AZ = 'fake_az'
|
||||
fake_volumes = []
|
||||
|
||||
|
||||
def fake_volumes_forward_req(ctx, action, b_header, b_url, b_req_body):
|
||||
resp = Response()
|
||||
resp.status_code = 404
|
||||
|
||||
parse = urlparse.urlsplit(b_url)
|
||||
if action == 'POST':
|
||||
b_body = jsonutils.loads(b_req_body)
|
||||
if b_body.get('volume'):
|
||||
vol = b_body['volume']
|
||||
vol['id'] = uuidutils.generate_uuid()
|
||||
stored_vol = {
|
||||
'volume': vol,
|
||||
'host': parse.hostname
|
||||
}
|
||||
fake_volumes.append(stored_vol)
|
||||
resp.status_code = 202
|
||||
vol_dict = {'volume': vol}
|
||||
|
||||
resp._content = jsonutils.dumps(vol_dict)
|
||||
# resp.json = vol_dict
|
||||
return resp
|
||||
|
||||
b_path = parse.path
|
||||
pos = b_path.rfind('/volumes')
|
||||
op = ''
|
||||
if pos > 0:
|
||||
op = b_path[pos:]
|
||||
op = op[len('/volumes'):]
|
||||
|
||||
if action == 'GET':
|
||||
if op == '' or op == '/detail':
|
||||
tenant_id = b_path[:pos]
|
||||
pos2 = tenant_id.rfind('/')
|
||||
if pos2 > 0:
|
||||
tenant_id = tenant_id[(pos2 + 1):]
|
||||
else:
|
||||
resp.status_code = 404
|
||||
return resp
|
||||
ret_vols = []
|
||||
cmp_host = parse.hostname
|
||||
for temp_vol in fake_volumes:
|
||||
if temp_vol['host'] != cmp_host:
|
||||
continue
|
||||
|
||||
if temp_vol['volume']['project_id'] == tenant_id:
|
||||
ret_vols.append(temp_vol['volume'])
|
||||
|
||||
vol_dicts = {'volumes': ret_vols}
|
||||
resp._content = jsonutils.dumps(vol_dicts)
|
||||
resp.status_code = 200
|
||||
return resp
|
||||
elif op != '':
|
||||
if op[0] == '/':
|
||||
_id = op[1:]
|
||||
for vol in fake_volumes:
|
||||
if vol['volume']['id'] == _id:
|
||||
vol_dict = {'volume': vol['volume']}
|
||||
resp._content = jsonutils.dumps(vol_dict)
|
||||
resp.status_code = 200
|
||||
return resp
|
||||
if action == 'DELETE':
|
||||
if op != '':
|
||||
if op[0] == '/':
|
||||
_id = op[1:]
|
||||
for vol in fake_volumes:
|
||||
if vol['volume']['id'] == _id:
|
||||
fake_volumes.remove(vol)
|
||||
resp.status_code = 202
|
||||
return resp
|
||||
if action == 'PUT':
|
||||
b_body = jsonutils.loads(b_req_body)
|
||||
update_vol = b_body.get('volume', {})
|
||||
if op != '':
|
||||
if op[0] == '/':
|
||||
_id = op[1:]
|
||||
for vol in fake_volumes:
|
||||
if vol['volume']['id'] == _id:
|
||||
vol['volume'].update(update_vol)
|
||||
vol_dict = {'volume': vol['volume']}
|
||||
resp._content = jsonutils.dumps(vol_dict)
|
||||
resp.status_code = 200
|
||||
return resp
|
||||
else:
|
||||
resp.status_code = 404
|
||||
|
||||
return resp
|
||||
|
||||
|
||||
class CinderVolumeFunctionalTest(base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(CinderVolumeFunctionalTest, self).setUp()
|
||||
|
||||
self.addCleanup(set_config, {}, overwrite=True)
|
||||
|
||||
cfg.CONF.register_opts(app.common_opts)
|
||||
|
||||
self.CONF = self.useFixture(fixture_config.Config()).conf
|
||||
|
||||
self.CONF.set_override('auth_strategy', 'noauth')
|
||||
|
||||
self.app = self._make_app()
|
||||
|
||||
self._init_db()
|
||||
|
||||
def _make_app(self, enable_acl=False):
|
||||
self.config = {
|
||||
'app': {
|
||||
'root':
|
||||
'tricircle.cinder_apigw.controllers.root.RootController',
|
||||
'modules': ['tricircle.cinder_apigw'],
|
||||
'enable_acl': enable_acl,
|
||||
'errors': {
|
||||
400: '/error',
|
||||
'__force_dict__': True
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
return load_test_app(self.config)
|
||||
|
||||
def _init_db(self):
|
||||
core.initialize()
|
||||
core.ModelBase.metadata.create_all(core.get_engine())
|
||||
# enforce foreign key constraint for sqlite
|
||||
core.get_engine().execute('pragma foreign_keys=on')
|
||||
self.context = context.Context()
|
||||
|
||||
pod_dict = {
|
||||
'pod_id': 'fake_pod_id',
|
||||
'pod_name': 'fake_pod_name',
|
||||
'az_name': FAKE_AZ
|
||||
}
|
||||
|
||||
config_dict = {
|
||||
'service_id': 'fake_service_id',
|
||||
'pod_id': 'fake_pod_id',
|
||||
'service_type': cons.ST_CINDER,
|
||||
'service_url': 'http://127.0.0.1:8774/v2/$(tenant_id)s'
|
||||
}
|
||||
|
||||
pod_dict2 = {
|
||||
'pod_id': 'fake_pod_id' + '2',
|
||||
'pod_name': 'fake_pod_name' + '2',
|
||||
'az_name': FAKE_AZ + '2'
|
||||
}
|
||||
|
||||
config_dict2 = {
|
||||
'service_id': 'fake_service_id' + '2',
|
||||
'pod_id': 'fake_pod_id' + '2',
|
||||
'service_type': cons.ST_CINDER,
|
||||
'service_url': 'http://10.0.0.2:8774/v2/$(tenant_id)s'
|
||||
}
|
||||
|
||||
top_pod = {
|
||||
'pod_id': 'fake_top_pod_id',
|
||||
'pod_name': 'RegionOne',
|
||||
'az_name': ''
|
||||
}
|
||||
|
||||
top_config = {
|
||||
'service_id': 'fake_top_service_id',
|
||||
'pod_id': 'fake_top_pod_id',
|
||||
'service_type': cons.ST_CINDER,
|
||||
'service_url': 'http://127.0.0.1:19998/v2/$(tenant_id)s'
|
||||
}
|
||||
|
||||
db_api.create_pod(self.context, pod_dict)
|
||||
db_api.create_pod(self.context, pod_dict2)
|
||||
db_api.create_pod(self.context, top_pod)
|
||||
db_api.create_pod_service_configuration(self.context, config_dict)
|
||||
db_api.create_pod_service_configuration(self.context, config_dict2)
|
||||
db_api.create_pod_service_configuration(self.context, top_config)
|
||||
|
||||
def tearDown(self):
|
||||
super(CinderVolumeFunctionalTest, self).tearDown()
|
||||
cfg.CONF.unregister_opts(app.common_opts)
|
||||
pecan.set_config({}, overwrite=True)
|
||||
core.ModelBase.metadata.drop_all(core.get_engine())
|
||||
del fake_volumes[:]
|
||||
|
||||
|
||||
class TestVolumeController(CinderVolumeFunctionalTest):
|
||||
|
||||
@patch.object(hclient, 'forward_req',
|
||||
new=fake_volumes_forward_req)
|
||||
def test_post_error_case(self):
|
||||
|
||||
volumes = [
|
||||
{
|
||||
"volume_xxx":
|
||||
{
|
||||
"name": 'vol_1',
|
||||
"size": 10,
|
||||
"project_id": 'my_tenant_id',
|
||||
"metadata": {}
|
||||
},
|
||||
"expected_error": 400
|
||||
},
|
||||
|
||||
# no AZ parameter
|
||||
{
|
||||
"volume":
|
||||
{
|
||||
"name": 'vol_1',
|
||||
"size": 10,
|
||||
"project_id": 'my_tenant_id',
|
||||
"metadata": {}
|
||||
},
|
||||
"expected_error": 202
|
||||
},
|
||||
|
||||
# incorrect AZ parameter
|
||||
{
|
||||
"volume":
|
||||
{
|
||||
"name": 'vol_1',
|
||||
"availability_zone": FAKE_AZ + FAKE_AZ,
|
||||
"size": 10,
|
||||
"project_id": 'my_tenant_id',
|
||||
"metadata": {}
|
||||
},
|
||||
"expected_error": 500
|
||||
},
|
||||
|
||||
]
|
||||
|
||||
self._test_and_check(volumes, 'my_tenant_id')
|
||||
|
||||
def fake_create_resource(context, ag_name, az_name):
|
||||
raise Exception
|
||||
|
||||
@patch.object(hclient, 'forward_req',
|
||||
new=fake_volumes_forward_req)
|
||||
@patch.object(core, 'create_resource',
|
||||
new=fake_create_resource)
|
||||
def test_post_exception(self):
|
||||
volumes = [
|
||||
# no 'volume' parameter
|
||||
{
|
||||
"volume":
|
||||
{
|
||||
"name": 'vol_1',
|
||||
"availability_zone": FAKE_AZ,
|
||||
"attach_status": "detached",
|
||||
"volume_type": '',
|
||||
"project_id": 'my_tenant_id',
|
||||
"metadata": {}
|
||||
},
|
||||
"expected_error": 500
|
||||
}
|
||||
]
|
||||
|
||||
self._test_and_check(volumes, 'my_tenant_id')
|
||||
|
||||
@patch.object(hclient, 'forward_req',
|
||||
new=fake_volumes_forward_req)
|
||||
def test_post_one_and_get_one(self):
|
||||
|
||||
tenant1_volumes = [
|
||||
# normal volume with correct parameter
|
||||
{
|
||||
"volume":
|
||||
{
|
||||
"name": 'vol_1',
|
||||
"availability_zone": FAKE_AZ,
|
||||
"source_volid": '',
|
||||
"consistencygroup_id": '',
|
||||
"snapshot_id": '',
|
||||
"source_replica": '',
|
||||
"size": 10,
|
||||
"user_id": '',
|
||||
"imageRef": '',
|
||||
"attach_status": "detached",
|
||||
"volume_type": '',
|
||||
"project_id": 'my_tenant_id',
|
||||
"metadata": {}
|
||||
},
|
||||
"expected_error": 202
|
||||
},
|
||||
|
||||
# same tenant, multiple volumes
|
||||
{
|
||||
"volume":
|
||||
{
|
||||
"name": 'vol_2',
|
||||
"availability_zone": FAKE_AZ,
|
||||
"source_volid": '',
|
||||
"consistencygroup_id": '',
|
||||
"snapshot_id": '',
|
||||
"source_replica": '',
|
||||
"size": 20,
|
||||
"user_id": '',
|
||||
"imageRef": '',
|
||||
"attach_status": "detached",
|
||||
"volume_type": '',
|
||||
"project_id": 'my_tenant_id',
|
||||
"metadata": {}
|
||||
},
|
||||
"expected_error": 202
|
||||
},
|
||||
|
||||
# same tenant, different az
|
||||
{
|
||||
"volume":
|
||||
{
|
||||
"name": 'vol_3',
|
||||
"availability_zone": FAKE_AZ + '2',
|
||||
"source_volid": '',
|
||||
"consistencygroup_id": '',
|
||||
"snapshot_id": '',
|
||||
"source_replica": '',
|
||||
"size": 20,
|
||||
"user_id": '',
|
||||
"imageRef": '',
|
||||
"attach_status": "detached",
|
||||
"volume_type": '',
|
||||
"project_id": 'my_tenant_id',
|
||||
"metadata": {}
|
||||
},
|
||||
"expected_error": 202
|
||||
},
|
||||
]
|
||||
|
||||
tenant2_volumes = [
|
||||
# different tenant, same az
|
||||
{
|
||||
"volume":
|
||||
{
|
||||
"name": 'vol_4',
|
||||
"availability_zone": FAKE_AZ,
|
||||
"source_volid": '',
|
||||
"consistencygroup_id": '',
|
||||
"snapshot_id": '',
|
||||
"source_replica": '',
|
||||
"size": 20,
|
||||
"user_id": '',
|
||||
"imageRef": '',
|
||||
"attach_status": "detached",
|
||||
"volume_type": '',
|
||||
"project_id": 'my_tenant_id_2',
|
||||
"metadata": {}
|
||||
},
|
||||
"expected_error": 202
|
||||
},
|
||||
]
|
||||
|
||||
self._test_and_check(tenant1_volumes, 'my_tenant_id')
|
||||
self._test_and_check(tenant2_volumes, 'my_tenant_id_2')
|
||||
|
||||
self._test_detail_check('my_tenant_id', 3)
|
||||
self._test_detail_check('my_tenant_id_2', 1)
|
||||
|
||||
@patch.object(hclient, 'forward_req',
|
||||
new=fake_volumes_forward_req)
|
||||
def test_post_one_and_delete_one(self):
|
||||
|
||||
volumes = [
|
||||
# normal volume with correct parameter
|
||||
{
|
||||
"volume":
|
||||
{
|
||||
"name": 'vol_1',
|
||||
"availability_zone": FAKE_AZ,
|
||||
"source_volid": '',
|
||||
"consistencygroup_id": '',
|
||||
"snapshot_id": '',
|
||||
"source_replica": '',
|
||||
"size": 10,
|
||||
"user_id": '',
|
||||
"imageRef": '',
|
||||
"attach_status": "detached",
|
||||
"volume_type": '',
|
||||
"project_id": 'my_tenant_id',
|
||||
"metadata": {}
|
||||
},
|
||||
"expected_error": 202
|
||||
},
|
||||
]
|
||||
|
||||
self._test_and_check_delete(volumes, 'my_tenant_id')
|
||||
|
||||
@patch.object(hclient, 'forward_req',
|
||||
new=fake_volumes_forward_req)
|
||||
def test_get(self):
|
||||
response = self.app.get('/v2/my_tenant_id/volumes')
|
||||
self.assertEqual(response.status_int, 200)
|
||||
json_body = jsonutils.loads(response.body)
|
||||
vols = json_body.get('volumes')
|
||||
self.assertEqual(0, len(vols))
|
||||
|
||||
@patch.object(hclient, 'forward_req',
|
||||
new=fake_volumes_forward_req)
|
||||
def test_get_all(self):
|
||||
update_dict = {'pod_az_name': 'fake_pod_az2'}
|
||||
# update pod2 to set pod_az_name
|
||||
db_api.update_pod(self.context, 'fake_pod_id2', update_dict)
|
||||
|
||||
volumes = [
|
||||
# normal volume with correct parameter
|
||||
{
|
||||
"volume":
|
||||
{
|
||||
"name": 'vol_1',
|
||||
"availability_zone": FAKE_AZ,
|
||||
"source_volid": '',
|
||||
"consistencygroup_id": '',
|
||||
"snapshot_id": '',
|
||||
"source_replica": '',
|
||||
"size": 10,
|
||||
"user_id": '',
|
||||
"imageRef": '',
|
||||
"attach_status": "detached",
|
||||
"volume_type": '',
|
||||
"project_id": 'my_tenant_id',
|
||||
"metadata": {}
|
||||
},
|
||||
"expected_error": 202
|
||||
},
|
||||
|
||||
# same tenant, multiple volumes
|
||||
{
|
||||
"volume":
|
||||
{
|
||||
"name": 'vol_2',
|
||||
"availability_zone": FAKE_AZ,
|
||||
"source_volid": '',
|
||||
"consistencygroup_id": '',
|
||||
"snapshot_id": '',
|
||||
"source_replica": '',
|
||||
"size": 20,
|
||||
"user_id": '',
|
||||
"imageRef": '',
|
||||
"attach_status": "detached",
|
||||
"volume_type": '',
|
||||
"project_id": 'my_tenant_id',
|
||||
"metadata": {}
|
||||
},
|
||||
"expected_error": 202
|
||||
},
|
||||
|
||||
# same tenant, different az
|
||||
{
|
||||
"volume":
|
||||
{
|
||||
"name": 'vol_3',
|
||||
"availability_zone": FAKE_AZ + '2',
|
||||
"source_volid": '',
|
||||
"consistencygroup_id": '',
|
||||
"snapshot_id": '',
|
||||
"source_replica": '',
|
||||
"size": 20,
|
||||
"user_id": '',
|
||||
"imageRef": '',
|
||||
"attach_status": "detached",
|
||||
"volume_type": '',
|
||||
"project_id": 'my_tenant_id',
|
||||
"metadata": {}
|
||||
},
|
||||
"expected_error": 202
|
||||
},
|
||||
]
|
||||
tenant_id = 'my_tenant_id'
|
||||
for volume in volumes:
|
||||
self.app.post_json('/v2/' + tenant_id + '/volumes',
|
||||
dict(volume=volume['volume']),
|
||||
expect_errors=True)
|
||||
query_string = '?availability_zone=' + FAKE_AZ
|
||||
resp = self.app.get('/v2/' + tenant_id + '/volumes' + query_string)
|
||||
self.assertEqual(resp.status_int, 200)
|
||||
json_body = jsonutils.loads(resp.body)
|
||||
ret_vols = json_body.get('volumes')
|
||||
self.assertEqual(len(ret_vols), 2)
|
||||
|
||||
query_string = '?availability_zone=' + FAKE_AZ + '2'
|
||||
resp = self.app.get('/v2/' + tenant_id + '/volumes' + query_string)
|
||||
self.assertEqual(resp.status_int, 200)
|
||||
json_body = jsonutils.loads(resp.body)
|
||||
ret_vols = json_body.get('volumes')
|
||||
self.assertEqual(len(ret_vols), 1)
|
||||
|
||||
@patch.object(hclient, 'forward_req',
|
||||
new=fake_volumes_forward_req)
|
||||
def test_put(self):
|
||||
volume = {
|
||||
"volume":
|
||||
{
|
||||
"name": 'vol_1',
|
||||
"availability_zone": FAKE_AZ,
|
||||
"source_volid": '',
|
||||
"consistencygroup_id": '',
|
||||
"snapshot_id": '',
|
||||
"source_replica": '',
|
||||
"size": 10,
|
||||
"user_id": '',
|
||||
"imageRef": '',
|
||||
"attach_status": "detached",
|
||||
"volume_type": '',
|
||||
"project_id": 'my_tenant_id',
|
||||
"metadata": {}
|
||||
},
|
||||
"expected_error": 202
|
||||
}
|
||||
|
||||
tenant_id = 'my_tenant_id'
|
||||
resp = self.app.post_json('/v2/' + tenant_id + '/volumes',
|
||||
dict(volume=volume['volume']),
|
||||
expect_errors=True)
|
||||
volume_dict = jsonutils.loads(resp.body)
|
||||
volume_id = volume_dict['volume']['id']
|
||||
|
||||
update_dict = {"volume": {"name": 'vol_2'}}
|
||||
resp = self.app.put_json('/v2/' + tenant_id + '/volumes/' + volume_id,
|
||||
dict(volume=update_dict['volume']),
|
||||
expect_errors=True)
|
||||
volume_dict = jsonutils.loads(resp.body)
|
||||
self.assertEqual(resp.status_int, 200)
|
||||
self.assertEqual(volume_dict['volume']['name'], 'vol_2')
|
||||
|
||||
def _test_and_check(self, volumes, tenant_id):
|
||||
for test_vol in volumes:
|
||||
if test_vol.get('volume'):
|
||||
response = self.app.post_json(
|
||||
'/v2/' + tenant_id + '/volumes',
|
||||
dict(volume=test_vol['volume']),
|
||||
expect_errors=True)
|
||||
elif test_vol.get('volume_xxx'):
|
||||
response = self.app.post_json(
|
||||
'/v2/' + tenant_id + '/volumes',
|
||||
dict(volume_xxx=test_vol['volume_xxx']),
|
||||
expect_errors=True)
|
||||
else:
|
||||
return
|
||||
|
||||
self.assertEqual(response.status_int,
|
||||
test_vol['expected_error'])
|
||||
|
||||
if response.status_int == 202:
|
||||
json_body = jsonutils.loads(response.body)
|
||||
res_vol = json_body.get('volume')
|
||||
query_resp = self.app.get(
|
||||
'/v2/' + tenant_id + '/volumes/' + res_vol['id'])
|
||||
self.assertEqual(query_resp.status_int, 200)
|
||||
json_body = jsonutils.loads(query_resp.body)
|
||||
query_vol = json_body.get('volume')
|
||||
|
||||
self.assertEqual(res_vol['id'], query_vol['id'])
|
||||
self.assertEqual(res_vol['name'], query_vol['name'])
|
||||
self.assertEqual(res_vol['availability_zone'],
|
||||
query_vol['availability_zone'])
|
||||
self.assertIn(res_vol['availability_zone'],
|
||||
[FAKE_AZ, FAKE_AZ + '2'])
|
||||
|
||||
def _test_and_check_delete(self, volumes, tenant_id):
|
||||
for test_vol in volumes:
|
||||
if test_vol.get('volume'):
|
||||
response = self.app.post_json(
|
||||
'/v2/' + tenant_id + '/volumes',
|
||||
dict(volume=test_vol['volume']),
|
||||
expect_errors=True)
|
||||
self.assertEqual(response.status_int,
|
||||
test_vol['expected_error'])
|
||||
if response.status_int == 202:
|
||||
json_body = jsonutils.loads(response.body)
|
||||
_id = json_body.get('volume')['id']
|
||||
query_resp = self.app.get(
|
||||
'/v2/' + tenant_id + '/volumes/' + _id)
|
||||
self.assertEqual(query_resp.status_int, 200)
|
||||
|
||||
delete_resp = self.app.delete(
|
||||
'/v2/' + tenant_id + '/volumes/' + _id)
|
||||
self.assertEqual(delete_resp.status_int, 202)
|
||||
|
||||
def _test_detail_check(self, tenant_id, vol_size):
|
||||
resp = self.app.get(
|
||||
'/v2/' + tenant_id + '/volumes' + '/detail',
|
||||
expect_errors=True)
|
||||
self.assertEqual(resp.status_int, 200)
|
||||
json_body = jsonutils.loads(resp.body)
|
||||
ret_vols = json_body.get('volumes')
|
||||
self.assertEqual(len(ret_vols), vol_size)
|
@ -1,447 +0,0 @@
|
||||
# Copyright (c) 2015 Huawei Tech. Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
from novaclient import api_versions
|
||||
from novaclient.v2 import client as n_client
|
||||
import pecan
|
||||
from pecan.configuration import set_config
|
||||
from pecan.testing import load_test_app
|
||||
|
||||
from tricircle.common import constants
|
||||
from tricircle.common import constants as cons
|
||||
from tricircle.common import context
|
||||
from tricircle.common import resource_handle
|
||||
from tricircle.db import api as db_api
|
||||
from tricircle.db import core
|
||||
from tricircle.nova_apigw import app
|
||||
from tricircle.nova_apigw.controllers import server
|
||||
from tricircle.tests import base
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_config import fixture as fixture_config
|
||||
|
||||
FAKE_AZ = 'fake_az'
|
||||
|
||||
|
||||
def get_tricircle_client(self, pod):
|
||||
return FakeTricircleClient()
|
||||
|
||||
|
||||
class FakeTricircleClient(object):
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def list_servers(self, cxt, filters=None):
|
||||
handle = FakeNovaAPIGWResourceHandle()
|
||||
return handle.handle_list(cxt, 'server', filters)
|
||||
|
||||
|
||||
class FakeNovaAPIGWResourceHandle(resource_handle.NovaResourceHandle):
|
||||
def __init__(self):
|
||||
self.auth_url = 'auth_url'
|
||||
self.endpoint_url = 'endpoint_url'
|
||||
|
||||
def handle_list(self, cxt, resource, filters):
|
||||
super(FakeNovaAPIGWResourceHandle, self).handle_list(
|
||||
cxt, resource, filters)
|
||||
return []
|
||||
|
||||
|
||||
class FakeNovaClient(object):
|
||||
def __init__(self):
|
||||
self.servers = FakeNovaServer()
|
||||
|
||||
def set_management_url(self, url):
|
||||
pass
|
||||
|
||||
|
||||
class FakeNovaServer(object):
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def list(self, detailed=True, search_opts=None, marker=None, limit=None,
|
||||
sort_keys=None, sort_dirs=None):
|
||||
return []
|
||||
|
||||
|
||||
class MicroVersionFunctionTest(base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(MicroVersionFunctionTest, self).setUp()
|
||||
|
||||
self.addCleanup(set_config, {}, overwrite=True)
|
||||
|
||||
cfg.CONF.register_opts(app.common_opts)
|
||||
|
||||
self.CONF = self.useFixture(fixture_config.Config()).conf
|
||||
|
||||
self.CONF.set_override('auth_strategy', 'noauth')
|
||||
self.CONF.set_override('tricircle_db_connection', 'sqlite:///:memory:')
|
||||
|
||||
core.initialize()
|
||||
core.ModelBase.metadata.create_all(core.get_engine())
|
||||
|
||||
self.app = self._make_app()
|
||||
|
||||
self._init_db()
|
||||
|
||||
def _make_app(self, enable_acl=False):
|
||||
self.config = {
|
||||
'app': {
|
||||
'root': 'tricircle.nova_apigw.controllers.root.RootController',
|
||||
'modules': ['tricircle.nova_apigw'],
|
||||
'enable_acl': enable_acl,
|
||||
'errors': {
|
||||
400: '/error',
|
||||
'__force_dict__': True
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
return load_test_app(self.config)
|
||||
|
||||
def _init_db(self):
|
||||
core.initialize()
|
||||
core.ModelBase.metadata.create_all(core.get_engine())
|
||||
# enforce foreign key constraint for sqlite
|
||||
core.get_engine().execute('pragma foreign_keys=on')
|
||||
self.context = context.Context()
|
||||
|
||||
pod_dict = {
|
||||
'pod_id': 'fake_pod_id',
|
||||
'pod_name': 'fake_pod_name',
|
||||
'az_name': FAKE_AZ
|
||||
}
|
||||
|
||||
config_dict = {
|
||||
'service_id': 'fake_service_id',
|
||||
'pod_id': 'fake_pod_id',
|
||||
'service_type': cons.ST_NOVA,
|
||||
'service_url': 'http://127.0.0.1:8774/v2/$(tenant_id)s'
|
||||
}
|
||||
|
||||
pod_dict2 = {
|
||||
'pod_id': 'fake_pod_id' + '2',
|
||||
'pod_name': 'fake_pod_name' + '2',
|
||||
'az_name': FAKE_AZ + '2'
|
||||
}
|
||||
|
||||
config_dict2 = {
|
||||
'service_id': 'fake_service_id' + '2',
|
||||
'pod_id': 'fake_pod_id' + '2',
|
||||
'service_type': cons.ST_CINDER,
|
||||
'service_url': 'http://10.0.0.2:8774/v2/$(tenant_id)s'
|
||||
}
|
||||
|
||||
top_pod = {
|
||||
'pod_id': 'fake_top_pod_id',
|
||||
'pod_name': 'RegionOne',
|
||||
'az_name': ''
|
||||
}
|
||||
|
||||
top_config = {
|
||||
'service_id': 'fake_top_service_id',
|
||||
'pod_id': 'fake_top_pod_id',
|
||||
'service_type': cons.ST_CINDER,
|
||||
'service_url': 'http://127.0.0.1:19998/v2/$(tenant_id)s'
|
||||
}
|
||||
|
||||
db_api.create_pod(self.context, pod_dict)
|
||||
db_api.create_pod(self.context, pod_dict2)
|
||||
db_api.create_pod(self.context, top_pod)
|
||||
db_api.create_pod_service_configuration(self.context, config_dict)
|
||||
db_api.create_pod_service_configuration(self.context, config_dict2)
|
||||
db_api.create_pod_service_configuration(self.context, top_config)
|
||||
|
||||
def tearDown(self):
|
||||
super(MicroVersionFunctionTest, self).tearDown()
|
||||
cfg.CONF.unregister_opts(app.common_opts)
|
||||
pecan.set_config({}, overwrite=True)
|
||||
core.ModelBase.metadata.drop_all(core.get_engine())
|
||||
|
||||
|
||||
class MicroversionsTest(MicroVersionFunctionTest):
|
||||
|
||||
min_version = constants.NOVA_APIGW_MIN_VERSION
|
||||
max_version = 'compute %s' % constants.NOVA_APIGW_MAX_VERSION
|
||||
lower_boundary = str(float(constants.NOVA_APIGW_MIN_VERSION) - 0.1)
|
||||
upper_boundary = 'compute %s' % str(
|
||||
float(constants.NOVA_APIGW_MAX_VERSION) + 0.1)
|
||||
vaild_version = 'compute 2.30'
|
||||
vaild_leagcy_version = '2.5'
|
||||
invaild_major = 'compute a.2'
|
||||
invaild_minor = 'compute 2.a'
|
||||
latest_version = 'compute 2.latest'
|
||||
invaild_compute_format = 'compute2.30'
|
||||
only_major = '2'
|
||||
invaild_major2 = '1.5'
|
||||
invaild_major3 = 'compute 3.2'
|
||||
invaild_version = '2.30'
|
||||
invaild_leagecy_version = 'compute 2.5'
|
||||
invaild_version2 = 'aa 2.30'
|
||||
invaild_version3 = 'compute 2.30 2.31'
|
||||
invaild_version4 = 'acompute 2.30'
|
||||
|
||||
tenant_id = 'tenant_id'
|
||||
|
||||
def _make_headers(self, version, type='current'):
|
||||
headers = {}
|
||||
headers['X_TENANT_ID'] = self.tenant_id
|
||||
if version is None:
|
||||
type = 'leagecy'
|
||||
version = constants.NOVA_APIGW_MIN_VERSION
|
||||
|
||||
if type == 'both':
|
||||
headers[constants.NOVA_API_VERSION_REQUEST_HEADER] = version
|
||||
headers[constants.LEGACY_NOVA_API_VERSION_REQUEST_HEADER] = '2.5'
|
||||
elif type == 'current':
|
||||
headers[constants.NOVA_API_VERSION_REQUEST_HEADER] = version
|
||||
else:
|
||||
headers[constants.LEGACY_NOVA_API_VERSION_REQUEST_HEADER] = version
|
||||
|
||||
return headers
|
||||
|
||||
@mock.patch.object(server.ServerController, '_get_client',
|
||||
new=get_tricircle_client)
|
||||
@mock.patch.object(n_client, 'Client')
|
||||
def test_microversions_no_header(self, mock_client):
|
||||
headers = self._make_headers(None)
|
||||
url = '/v2.1/' + self.tenant_id + '/servers/detail'
|
||||
mock_client.return_value = FakeNovaClient()
|
||||
self.app.get(url, headers=headers)
|
||||
mock_client.assert_called_with(
|
||||
api_version=api_versions.APIVersion(
|
||||
constants.NOVA_APIGW_MIN_VERSION),
|
||||
auth_token=None, auth_url='auth_url',
|
||||
direct_use=False, project_id=None,
|
||||
timeout=60, username=None, api_key=None)
|
||||
|
||||
@mock.patch.object(server.ServerController, '_get_client',
|
||||
new=get_tricircle_client)
|
||||
@mock.patch.object(n_client, 'Client')
|
||||
def test_microversions_vaild_version(self, mock_client):
|
||||
headers = self._make_headers(self.vaild_version)
|
||||
url = '/v2.1/' + self.tenant_id + '/servers/detail'
|
||||
mock_client.return_value = FakeNovaClient()
|
||||
self.app.get(url, headers=headers)
|
||||
mock_client.assert_called_with(
|
||||
api_version=api_versions.APIVersion(self.vaild_version.split()[1]),
|
||||
auth_token=None, auth_url='auth_url',
|
||||
direct_use=False, project_id=None,
|
||||
timeout=60, username=None, api_key=None)
|
||||
|
||||
@mock.patch.object(server.ServerController, '_get_client',
|
||||
new=get_tricircle_client)
|
||||
@mock.patch.object(n_client, 'Client')
|
||||
def test_microversions_vaild_leagcy_version(self, mock_client):
|
||||
headers = self._make_headers(self.vaild_leagcy_version, 'leagcy')
|
||||
url = '/v2.1/' + self.tenant_id + '/servers/detail'
|
||||
mock_client.return_value = FakeNovaClient()
|
||||
self.app.get(url, headers=headers)
|
||||
mock_client.assert_called_with(
|
||||
api_version=api_versions.APIVersion(self.vaild_leagcy_version),
|
||||
auth_token=None, auth_url='auth_url',
|
||||
direct_use=False, project_id=None,
|
||||
timeout=60, username=None, api_key=None)
|
||||
|
||||
@mock.patch.object(server.ServerController, '_get_client',
|
||||
new=get_tricircle_client)
|
||||
@mock.patch.object(n_client, 'Client')
|
||||
def test_microversions_latest_version(self, mock_client):
|
||||
headers = self._make_headers(self.latest_version)
|
||||
url = '/v2.1/' + self.tenant_id + '/servers/detail'
|
||||
mock_client.return_value = FakeNovaClient()
|
||||
self.app.get(url, headers=headers)
|
||||
mock_client.assert_called_with(
|
||||
api_version=api_versions.APIVersion(
|
||||
constants.NOVA_APIGW_MAX_VERSION),
|
||||
auth_token=None, auth_url='auth_url',
|
||||
direct_use=False, project_id=None,
|
||||
timeout=60, username=None, api_key=None)
|
||||
|
||||
@mock.patch.object(server.ServerController, '_get_client',
|
||||
new=get_tricircle_client)
|
||||
@mock.patch.object(n_client, 'Client')
|
||||
def test_microversions_min_version(self, mock_client):
|
||||
headers = self._make_headers(self.min_version, 'leagecy')
|
||||
url = '/v2.1/' + self.tenant_id + '/servers/detail'
|
||||
mock_client.return_value = FakeNovaClient()
|
||||
self.app.get(url, headers=headers)
|
||||
mock_client.assert_called_with(
|
||||
api_version=api_versions.APIVersion(self.min_version),
|
||||
auth_token=None, auth_url='auth_url',
|
||||
direct_use=False, project_id=None,
|
||||
timeout=60, username=None, api_key=None)
|
||||
|
||||
@mock.patch.object(server.ServerController, '_get_client',
|
||||
new=get_tricircle_client)
|
||||
@mock.patch.object(n_client, 'Client')
|
||||
def test_microversions_max_version(self, mock_client):
|
||||
headers = self._make_headers(self.max_version)
|
||||
url = '/v2.1/' + self.tenant_id + '/servers/detail'
|
||||
mock_client.return_value = FakeNovaClient()
|
||||
self.app.get(url, headers=headers)
|
||||
mock_client.assert_called_with(
|
||||
api_version=api_versions.APIVersion(self.max_version.split()[1]),
|
||||
auth_token=None, auth_url='auth_url',
|
||||
direct_use=False, project_id=None,
|
||||
timeout=60, username=None, api_key=None)
|
||||
|
||||
@mock.patch.object(server.ServerController, '_get_client',
|
||||
new=get_tricircle_client)
|
||||
@mock.patch.object(n_client, 'Client')
|
||||
def test_microversions_invaild_major(self, mock_client):
|
||||
headers = self._make_headers(self.invaild_major)
|
||||
url = '/v2.1/' + self.tenant_id + '/servers/detail'
|
||||
mock_client.return_value = FakeNovaClient()
|
||||
res = self.app.get(url, headers=headers, expect_errors=True)
|
||||
self.assertEqual(406, res.status_int)
|
||||
|
||||
@mock.patch.object(server.ServerController, '_get_client',
|
||||
new=get_tricircle_client)
|
||||
@mock.patch.object(n_client, 'Client')
|
||||
def test_microversions_invaild_major2(self, mock_client):
|
||||
headers = self._make_headers(self.invaild_major2, 'leagecy')
|
||||
url = '/v2.1/' + self.tenant_id + '/servers/detail'
|
||||
mock_client.return_value = FakeNovaClient()
|
||||
res = self.app.get(url, headers=headers, expect_errors=True)
|
||||
self.assertEqual(406, res.status_int)
|
||||
|
||||
@mock.patch.object(server.ServerController, '_get_client',
|
||||
new=get_tricircle_client)
|
||||
@mock.patch.object(n_client, 'Client')
|
||||
def test_microversions_invaild_major3(self, mock_client):
|
||||
headers = self._make_headers(self.invaild_major3)
|
||||
url = '/v2.1/' + self.tenant_id + '/servers/detail'
|
||||
mock_client.return_value = FakeNovaClient()
|
||||
res = self.app.get(url, headers=headers, expect_errors=True)
|
||||
self.assertEqual(406, res.status_int)
|
||||
|
||||
@mock.patch.object(server.ServerController, '_get_client',
|
||||
new=get_tricircle_client)
|
||||
@mock.patch.object(n_client, 'Client')
|
||||
def test_microversions_invaild_minor(self, mock_client):
|
||||
headers = self._make_headers(self.invaild_minor)
|
||||
url = '/v2.1/' + self.tenant_id + '/servers/detail'
|
||||
mock_client.return_value = FakeNovaClient()
|
||||
res = self.app.get(url, headers=headers, expect_errors=True)
|
||||
self.assertEqual(406, res.status_int)
|
||||
|
||||
@mock.patch.object(server.ServerController, '_get_client',
|
||||
new=get_tricircle_client)
|
||||
@mock.patch.object(n_client, 'Client')
|
||||
def test_microversions_lower_boundary(self, mock_client):
|
||||
headers = self._make_headers(self.lower_boundary)
|
||||
url = '/v2.1/' + self.tenant_id + '/servers/detail'
|
||||
mock_client.return_value = FakeNovaClient()
|
||||
res = self.app.get(url, headers=headers, expect_errors=True)
|
||||
self.assertEqual(406, res.status_int)
|
||||
|
||||
@mock.patch.object(server.ServerController, '_get_client',
|
||||
new=get_tricircle_client)
|
||||
@mock.patch.object(n_client, 'Client')
|
||||
def test_microversions_upper_boundary(self, mock_client):
|
||||
headers = self._make_headers(self.upper_boundary)
|
||||
url = '/v2.1/' + self.tenant_id + '/servers/detail'
|
||||
mock_client.return_value = FakeNovaClient()
|
||||
res = self.app.get(url, headers=headers, expect_errors=True)
|
||||
self.assertEqual(406, res.status_int)
|
||||
|
||||
@mock.patch.object(server.ServerController, '_get_client',
|
||||
new=get_tricircle_client)
|
||||
@mock.patch.object(n_client, 'Client')
|
||||
def test_microversions_invaild_compute_format(self, mock_client):
|
||||
headers = self._make_headers(self.invaild_compute_format)
|
||||
url = '/v2.1/' + self.tenant_id + '/servers/detail'
|
||||
mock_client.return_value = FakeNovaClient()
|
||||
res = self.app.get(url, headers=headers, expect_errors=True)
|
||||
self.assertEqual(406, res.status_int)
|
||||
|
||||
@mock.patch.object(server.ServerController, '_get_client',
|
||||
new=get_tricircle_client)
|
||||
@mock.patch.object(n_client, 'Client')
|
||||
def test_microversions_only_major(self, mock_client):
|
||||
headers = self._make_headers(self.only_major, 'leagecy')
|
||||
url = '/v2.1/' + self.tenant_id + '/servers/detail'
|
||||
mock_client.return_value = FakeNovaClient()
|
||||
res = self.app.get(url, headers=headers, expect_errors=True)
|
||||
self.assertEqual(406, res.status_int)
|
||||
|
||||
@mock.patch.object(server.ServerController, '_get_client',
|
||||
new=get_tricircle_client)
|
||||
@mock.patch.object(n_client, 'Client')
|
||||
def test_microversions_invaild_version(self, mock_client):
|
||||
headers = self._make_headers(self.invaild_version)
|
||||
url = '/v2.1/' + self.tenant_id + '/servers/detail'
|
||||
mock_client.return_value = FakeNovaClient()
|
||||
res = self.app.get(url, headers=headers, expect_errors=True)
|
||||
self.assertEqual(406, res.status_int)
|
||||
|
||||
@mock.patch.object(server.ServerController, '_get_client',
|
||||
new=get_tricircle_client)
|
||||
@mock.patch.object(n_client, 'Client')
|
||||
def test_microversions_invaild_leagecy_version(self, mock_client):
|
||||
headers = self._make_headers(self.invaild_leagecy_version, 'leagecy')
|
||||
url = '/v2.1/' + self.tenant_id + '/servers/detail'
|
||||
mock_client.return_value = FakeNovaClient()
|
||||
res = self.app.get(url, headers=headers, expect_errors=True)
|
||||
self.assertEqual(406, res.status_int)
|
||||
|
||||
@mock.patch.object(server.ServerController, '_get_client',
|
||||
new=get_tricircle_client)
|
||||
@mock.patch.object(n_client, 'Client')
|
||||
def test_microversions_both_version(self, mock_client):
|
||||
headers = self._make_headers(self.vaild_version, 'both')
|
||||
url = '/v2.1/' + self.tenant_id + '/servers/detail'
|
||||
mock_client.return_value = FakeNovaClient()
|
||||
self.app.get(url, headers=headers, expect_errors=True)
|
||||
# The new format microversion priority to leagecy
|
||||
mock_client.assert_called_with(
|
||||
api_version=api_versions.APIVersion(self.vaild_version.split()[1]),
|
||||
auth_token=None, auth_url='auth_url',
|
||||
direct_use=False, project_id=None,
|
||||
timeout=60, username=None, api_key=None)
|
||||
|
||||
@mock.patch.object(server.ServerController, '_get_client',
|
||||
new=get_tricircle_client)
|
||||
@mock.patch.object(n_client, 'Client')
|
||||
def test_microversions_invaild_version2(self, mock_client):
|
||||
headers = self._make_headers(self.invaild_version2)
|
||||
url = '/v2.1/' + self.tenant_id + '/servers/detail'
|
||||
mock_client.return_value = FakeNovaClient()
|
||||
res = self.app.get(url, headers=headers, expect_errors=True)
|
||||
self.assertEqual(406, res.status_int)
|
||||
|
||||
@mock.patch.object(server.ServerController, '_get_client',
|
||||
new=get_tricircle_client)
|
||||
@mock.patch.object(n_client, 'Client')
|
||||
def test_microversions_invaild_version3(self, mock_client):
|
||||
headers = self._make_headers(self.invaild_version3)
|
||||
url = '/v2.1/' + self.tenant_id + '/servers/detail'
|
||||
mock_client.return_value = FakeNovaClient()
|
||||
res = self.app.get(url, headers=headers, expect_errors=True)
|
||||
self.assertEqual(406, res.status_int)
|
||||
|
||||
@mock.patch.object(server.ServerController, '_get_client',
|
||||
new=get_tricircle_client)
|
||||
@mock.patch.object(n_client, 'Client')
|
||||
def test_microversions_invaild_version4(self, mock_client):
|
||||
headers = self._make_headers(self.invaild_version4)
|
||||
url = '/v2.1/' + self.tenant_id + '/servers/detail'
|
||||
mock_client.return_value = FakeNovaClient()
|
||||
res = self.app.get(url, headers=headers, expect_errors=True)
|
||||
self.assertEqual(406, res.status_int)
|
@ -1,479 +0,0 @@
|
||||
# Copyright (c) 2015 Huawei Technologies Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
import mock
|
||||
import pecan
|
||||
from pecan.configuration import set_config
|
||||
from pecan.testing import load_test_app
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_config import fixture as fixture_config
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from tricircle.nova_apigw import app
|
||||
from tricircle.nova_apigw.controllers import quota_sets
|
||||
|
||||
from tricircle.common import context
|
||||
from tricircle.common import exceptions as t_exceptions
|
||||
from tricircle.common import quota
|
||||
from tricircle.db import core
|
||||
|
||||
from tricircle.tests.unit.common import test_quota
|
||||
|
||||
QUOTAS = quota.QUOTAS
|
||||
|
||||
|
||||
def _make_body(tenant_id='foo', root=True, **kw):
|
||||
resources = copy.copy(kw)
|
||||
if tenant_id:
|
||||
resources['id'] = tenant_id
|
||||
if root:
|
||||
result = {'quota_set': resources}
|
||||
else:
|
||||
result = resources
|
||||
return result
|
||||
|
||||
|
||||
def _update_body(src_body, root=True, **kw):
|
||||
for k, v in kw.iteritems():
|
||||
if root:
|
||||
src_body['quota_set'][k] = v
|
||||
else:
|
||||
src_body[k] = v
|
||||
return src_body
|
||||
|
||||
|
||||
def _update_subproject_body(src_body, root=True, **kw):
|
||||
for k, v in kw.iteritems():
|
||||
if root:
|
||||
src_body['quota_set'][k] = v
|
||||
else:
|
||||
src_body[k] = v
|
||||
|
||||
if root:
|
||||
for k, v in src_body['quota_set'].iteritems():
|
||||
if not kw.get(k):
|
||||
src_body['quota_set'][k] = 0
|
||||
|
||||
else:
|
||||
for k, v in src_body.iteritems():
|
||||
if not kw.get(k) and k != 'id':
|
||||
src_body[k] = 0
|
||||
|
||||
return src_body
|
||||
|
||||
|
||||
def _make_subproject_body(tenant_id='foo', root=True, **kw):
|
||||
return _make_body(tenant_id=tenant_id, root=root, **kw)
|
||||
|
||||
|
||||
class QuotaControllerTest(test_quota.QuotaSetsOperationTest):
|
||||
|
||||
def setUp(self):
|
||||
super(QuotaControllerTest, self).setUp()
|
||||
|
||||
self.addCleanup(set_config, {}, overwrite=True)
|
||||
|
||||
cfg.CONF.register_opts(app.common_opts)
|
||||
self.CONF = self.useFixture(fixture_config.Config()).conf
|
||||
self.CONF.set_override('auth_strategy', 'noauth')
|
||||
|
||||
self.exception_string = 'NotFound'
|
||||
self.test_exception = [
|
||||
{'exception_raise': 'NotFound',
|
||||
'expected_error': 404},
|
||||
{'exception_raise': 'AdminRequired',
|
||||
'expected_error': 403},
|
||||
{'exception_raise': 'NotAuthorized',
|
||||
'expected_error': 403},
|
||||
{'exception_raise': 'HTTPForbiddenError',
|
||||
'expected_error': 403},
|
||||
{'exception_raise': 'Conflict',
|
||||
'expected_error': 400}, ]
|
||||
|
||||
self._flags_rest(use_default_quota_class=True)
|
||||
|
||||
self.app = self._make_app()
|
||||
|
||||
def _make_app(self, enable_acl=False):
|
||||
self.config = {
|
||||
'app': {
|
||||
'root':
|
||||
'tricircle.nova_apigw.controllers.root.RootController',
|
||||
'modules': ['tricircle.nova_apigw'],
|
||||
'enable_acl': enable_acl,
|
||||
'errors': {
|
||||
400: '/error',
|
||||
'__force_dict__': True
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
return load_test_app(self.config)
|
||||
|
||||
def _override_config_rest(self, name, override, group=None):
|
||||
"""Cleanly override CONF variables."""
|
||||
self.CONF.set_override(name, override, group)
|
||||
self.addCleanup(self.CONF.clear_override, name, group)
|
||||
|
||||
def _flags_rest(self, **kw):
|
||||
"""Override CONF variables for a test."""
|
||||
for k, v in kw.items():
|
||||
self._override_config_rest(k, v, group='quota')
|
||||
|
||||
def tearDown(self):
|
||||
super(QuotaControllerTest, self).tearDown()
|
||||
pecan.set_config({}, overwrite=True)
|
||||
cfg.CONF.unregister_opts(app.common_opts)
|
||||
core.ModelBase.metadata.drop_all(core.get_engine())
|
||||
|
||||
def _get_mock_ctx(self):
|
||||
return self.ctx
|
||||
|
||||
def _mock_func_and_obj(self):
|
||||
quota.QuotaSetOperation._get_project = mock.Mock()
|
||||
quota.QuotaSetOperation._get_project.side_effect = self._get_project
|
||||
context.extract_context_from_environ = mock.Mock()
|
||||
context.extract_context_from_environ.side_effect = self._get_mock_ctx
|
||||
|
||||
def test_quota_set_update_show_defaults(self):
|
||||
self._mock_func_and_obj()
|
||||
|
||||
# show quota before update, should be equal to defaults
|
||||
url = self._url_for_quota_set(self.A.id, self.A.id)
|
||||
res = self.app.get(url, expect_errors=True)
|
||||
self.assertEqual(res.status_int, 200)
|
||||
json_body = jsonutils.loads(res.body)
|
||||
default_body = _make_body(tenant_id=self.A.id, root=True,
|
||||
**self.default_quota)
|
||||
result = self._DictIn(json_body['quota_set'],
|
||||
default_body['quota_set'])
|
||||
self.assertEqual(result, True)
|
||||
|
||||
# quota update with wrong parameter
|
||||
quota_a = dict(instances=5, cores=10, ram=25600)
|
||||
url = self._url_for_quota_set(self.A.id, self.A.id)
|
||||
res = self.app.put_json(url,
|
||||
{'quota_s': quota_a},
|
||||
expect_errors=True)
|
||||
self.assertIn(res.status_int, [400, 403, 404])
|
||||
|
||||
# quota update with non-admin
|
||||
self.ctx.is_admin = False
|
||||
quota_a = dict(instances=5, cores=10, ram=25600)
|
||||
url = self._url_for_quota_set(self.A.id, self.A.id)
|
||||
res = self.app.put_json(url,
|
||||
{'quota_set': quota_a},
|
||||
expect_errors=True)
|
||||
self.assertIn(res.status_int, [409])
|
||||
self.ctx.is_admin = True
|
||||
|
||||
# show quota after update
|
||||
quota_a = dict(instances=5, cores=10, ram=25600)
|
||||
url = self._url_for_quota_set(self.A.id, self.A.id)
|
||||
res = self.app.put_json(url,
|
||||
{'quota_set': quota_a},
|
||||
expect_errors=True)
|
||||
json_body = jsonutils.loads(res.body)
|
||||
updated_a = _make_body(tenant_id=self.A.id, root=True,
|
||||
**self.default_quota)
|
||||
updated_a = _update_body(updated_a, root=True, **quota_a)
|
||||
result = self._DictIn(json_body['quota_set'], updated_a['quota_set'])
|
||||
self.assertEqual(result, True)
|
||||
|
||||
self.ctx.is_admin = False
|
||||
url = self._url_for_quota_set(self.A.id, self.A.id)
|
||||
res = self.app.get(url, expect_errors=True)
|
||||
self.assertIn(res.status_int, [400, 403, 404])
|
||||
self.ctx.is_admin = True
|
||||
|
||||
# show quota after update for child
|
||||
quota_b = dict(instances=3, cores=5, ram=12800)
|
||||
url = self._url_for_quota_set(self.A.id, self.B.id)
|
||||
res = self.app.put_json(url,
|
||||
{'quota_set': quota_b},
|
||||
expect_errors=True)
|
||||
json_body = jsonutils.loads(res.body)
|
||||
updated_b = _make_body(tenant_id=self.B.id, root=False,
|
||||
**self.default_quota)
|
||||
updated_b = _update_subproject_body(updated_b, root=False, **quota_b)
|
||||
result = self._DictIn(json_body['quota_set'], updated_b)
|
||||
self.assertEqual(result, True)
|
||||
|
||||
# show default quota after update
|
||||
url = self._url_for_quota_set(self.A.id, self.A.id)
|
||||
res = self.app.get(url + '/defaults', expect_errors=True)
|
||||
json_body = jsonutils.loads(res.body)
|
||||
result = self._DictIn(json_body['quota_set'],
|
||||
default_body['quota_set'])
|
||||
self.assertEqual(result, True)
|
||||
|
||||
# show default quota for child, should be all 0
|
||||
quota_c = {}
|
||||
url = self._url_for_quota_set(self.A.id, self.B.id)
|
||||
res = self.app.get(url + '/defaults', expect_errors=True)
|
||||
json_body = jsonutils.loads(res.body)
|
||||
updated_c = _make_body(tenant_id=self.B.id, root=False,
|
||||
**self.default_quota)
|
||||
updated_c = _update_subproject_body(updated_c, root=False, **quota_c)
|
||||
result = self._DictIn(json_body['quota_set'], updated_c)
|
||||
self.assertEqual(result, True)
|
||||
|
||||
# show quota after update
|
||||
url = self._url_for_quota_set(self.A.id, self.A.id)
|
||||
res = self.app.get(url, expect_errors=True)
|
||||
json_body = jsonutils.loads(res.body)
|
||||
result = self._DictIn(json_body['quota_set'], updated_a['quota_set'])
|
||||
self.assertEqual(result, True)
|
||||
|
||||
# show quota for child, should be equal to update_b
|
||||
url = self._url_for_quota_set(self.A.id, self.B.id)
|
||||
res = self.app.get(url, expect_errors=True)
|
||||
json_body = jsonutils.loads(res.body)
|
||||
result = self._DictIn(json_body['quota_set'], updated_b)
|
||||
self.assertEqual(result, True)
|
||||
|
||||
# delete with non-admin
|
||||
self.ctx.is_admin = False
|
||||
url = self._url_for_quota_set(self.A.id, self.A.id)
|
||||
res = self.app.delete(url, expect_errors=True)
|
||||
self.assertIn(res.status_int, [409])
|
||||
self.ctx.is_admin = True
|
||||
|
||||
# delete parent quota when child quota is not zero
|
||||
url = self._url_for_quota_set(self.A.id, self.A.id)
|
||||
res = self.app.delete(url, expect_errors=True)
|
||||
self.assertIn(res.status_int, [400, 403, 404])
|
||||
|
||||
# delete child quota
|
||||
url = self._url_for_quota_set(self.A.id, self.B.id)
|
||||
res = self.app.delete(url, expect_errors=True)
|
||||
self.assertEqual(res.status_int, 202)
|
||||
|
||||
# delete parent quota when child quota is deleted
|
||||
url = self._url_for_quota_set(self.A.id, self.A.id)
|
||||
res = self.app.delete(url, expect_errors=True)
|
||||
self.assertEqual(res.status_int, 202)
|
||||
|
||||
# show quota for parent after delete, equal to defaults
|
||||
url = self._url_for_quota_set(self.A.id, self.A.id)
|
||||
res = self.app.get(url, expect_errors=True)
|
||||
json_body = jsonutils.loads(res.body)
|
||||
result = self._DictIn(json_body['quota_set'],
|
||||
default_body['quota_set'])
|
||||
self.assertEqual(result, True)
|
||||
|
||||
# show quota for child after delete, should be all 0
|
||||
url = self._url_for_quota_set(self.A.id, self.B.id)
|
||||
res = self.app.get(url, expect_errors=True)
|
||||
json_body = jsonutils.loads(res.body)
|
||||
result = self._DictIn(json_body['quota_set'], updated_c)
|
||||
self.assertEqual(result, True)
|
||||
|
||||
def test_quota_detail_limits(self):
|
||||
self._mock_func_and_obj()
|
||||
|
||||
def _make_default_detail_body(tenant_id='foo'):
|
||||
resources = copy.copy(self.default_quota)
|
||||
|
||||
for k, v in self.default_quota.iteritems():
|
||||
resources[k] = {}
|
||||
resources[k]['limit'] = v
|
||||
resources[k]['reserved'] = 0
|
||||
resources[k]['in_use'] = 0
|
||||
|
||||
if tenant_id:
|
||||
resources['id'] = tenant_id
|
||||
|
||||
return resources
|
||||
|
||||
def _update_usage_in_default_detail(quota_item,
|
||||
reserved, in_use, **kw):
|
||||
kw[quota_item]['reserved'] = reserved
|
||||
kw[quota_item]['in_use'] = in_use
|
||||
return kw
|
||||
|
||||
# show quota usage before update, should be equal to defaults
|
||||
url = self._url_for_quota_set(self.A.id, self.A.id)
|
||||
res = self.app.get(url + '/detail', expect_errors=True)
|
||||
self.assertEqual(res.status_int, 200)
|
||||
json_body = jsonutils.loads(res.body)
|
||||
default_detail = _make_default_detail_body(self.A.id)
|
||||
result = self._DictIn(json_body['quota_set'], default_detail)
|
||||
self.assertEqual(result, True)
|
||||
|
||||
# show quota usage after reserve and in_use update
|
||||
inuse_opts = {'instances': 2, 'cores': 5}
|
||||
reserve_opts = {'instances': 3, 'cores': 3}
|
||||
self.ctx.project_id = self.A.id
|
||||
reservations = QUOTAS.reserve(self.ctx,
|
||||
project_id=self.A.id,
|
||||
**inuse_opts)
|
||||
QUOTAS.commit(self.ctx, reservations, self.A.id)
|
||||
QUOTAS.reserve(self.ctx, project_id=self.A.id, **reserve_opts)
|
||||
|
||||
url = self._url_for_quota_set(self.A.id, self.A.id)
|
||||
res = self.app.get(url + '/detail', expect_errors=True)
|
||||
self.assertEqual(res.status_int, 200)
|
||||
json_body = jsonutils.loads(res.body)
|
||||
default_detail = _make_default_detail_body(self.A.id)
|
||||
update_detail = _update_usage_in_default_detail(
|
||||
'instances',
|
||||
reserve_opts['instances'],
|
||||
inuse_opts['instances'],
|
||||
**default_detail)
|
||||
update_detail = _update_usage_in_default_detail(
|
||||
'cores',
|
||||
reserve_opts['cores'],
|
||||
inuse_opts['cores'],
|
||||
**update_detail)
|
||||
result = self._DictIn(json_body['quota_set'], update_detail)
|
||||
self.assertEqual(result, True)
|
||||
|
||||
# Wrong parameter
|
||||
url = '/v2.1/' + self.A.id + '/limits?_id=' + self.A.id
|
||||
res = self.app.get(url, expect_errors=True)
|
||||
self.assertIn(res.status_int, [400, 403, 404])
|
||||
|
||||
url = '/v2.1/' + self.A.id + '/limits'
|
||||
res = self.app.get(url, expect_errors=True)
|
||||
self.assertIn(res.status_int, [400, 403, 404])
|
||||
|
||||
self.ctx.is_admin = False
|
||||
url = '/v2.1/' + self.B.id + '/limits?tenant_id=' + self.C.id
|
||||
res = self.app.get(url, expect_errors=True)
|
||||
self.assertIn(res.status_int, [400, 403, 404])
|
||||
self.ctx.is_admin = True
|
||||
|
||||
# test absolute limits and usage
|
||||
url = '/v2.1/' + self.A.id + '/limits?tenant_id=' + self.A.id
|
||||
res = self.app.get(url, expect_errors=True)
|
||||
self.assertEqual(res.status_int, 200)
|
||||
json_body = jsonutils.loads(res.body)
|
||||
ret_limits = json_body['limits']['absolute']
|
||||
|
||||
absolute = {}
|
||||
absolute.update(quota_sets.build_absolute_limits(update_detail))
|
||||
absolute.update(quota_sets.build_used_limits(update_detail))
|
||||
|
||||
result = self._DictIn(absolute, ret_limits)
|
||||
self.assertEqual(result, True)
|
||||
|
||||
# test child limits, set child quota
|
||||
quota_b = dict(instances=3, cores=5)
|
||||
url = self._url_for_quota_set(self.A.id, self.B.id)
|
||||
res = self.app.put_json(url,
|
||||
{'quota_set': quota_b},
|
||||
expect_errors=True)
|
||||
json_body = jsonutils.loads(res.body)
|
||||
updated_b = _make_body(tenant_id=self.B.id, root=False,
|
||||
**self.default_quota)
|
||||
updated_b = _update_subproject_body(updated_b, root=False, **quota_b)
|
||||
result = self._DictIn(json_body['quota_set'], updated_b)
|
||||
self.assertEqual(result, True)
|
||||
|
||||
# test child limits, use and reserve child quota
|
||||
inuse_opts = {'instances': 1, 'cores': 1}
|
||||
reserve_opts = {'instances': 1, 'cores': 2}
|
||||
self.ctx.project_id = self.A.id
|
||||
reservations = QUOTAS.reserve(self.ctx,
|
||||
project_id=self.B.id,
|
||||
**inuse_opts)
|
||||
QUOTAS.commit(self.ctx, reservations, self.B.id)
|
||||
QUOTAS.reserve(self.ctx, project_id=self.B.id, **reserve_opts)
|
||||
url = self._url_for_quota_set(self.A.id, self.B.id)
|
||||
res = self.app.get(url + '/detail', expect_errors=True)
|
||||
self.assertEqual(res.status_int, 200)
|
||||
child_json_body = jsonutils.loads(res.body)
|
||||
|
||||
self.assertEqual(
|
||||
child_json_body['quota_set']['instances']['limit'],
|
||||
quota_b['instances'])
|
||||
self.assertEqual(
|
||||
child_json_body['quota_set']['instances']['in_use'],
|
||||
inuse_opts['instances'])
|
||||
self.assertEqual(
|
||||
child_json_body['quota_set']['instances']['reserved'],
|
||||
reserve_opts['instances'])
|
||||
|
||||
self.assertEqual(
|
||||
child_json_body['quota_set']['cores']['limit'],
|
||||
quota_b['cores'])
|
||||
self.assertEqual(
|
||||
child_json_body['quota_set']['cores']['in_use'],
|
||||
inuse_opts['cores'])
|
||||
self.assertEqual(
|
||||
child_json_body['quota_set']['cores']['reserved'],
|
||||
reserve_opts['cores'])
|
||||
|
||||
# test child limits, get child quota limits and compare
|
||||
url = '/v2.1/' + self.A.id + '/limits?tenant_id=' + self.B.id
|
||||
res = self.app.get(url, expect_errors=True)
|
||||
self.assertEqual(res.status_int, 200)
|
||||
json_body = jsonutils.loads(res.body)
|
||||
ret_limits = json_body['limits']['absolute']
|
||||
|
||||
self.assertEqual(
|
||||
ret_limits['maxTotalInstances'],
|
||||
quota_b['instances'])
|
||||
self.assertEqual(
|
||||
ret_limits['maxTotalCores'],
|
||||
quota_b['cores'])
|
||||
self.assertEqual(
|
||||
ret_limits['totalInstancesUsed'],
|
||||
inuse_opts['instances'] + reserve_opts['instances'])
|
||||
self.assertEqual(
|
||||
ret_limits['totalCoresUsed'],
|
||||
inuse_opts['cores'] + reserve_opts['cores'])
|
||||
|
||||
def _show_detail_exception(self, context, show_usage=False):
|
||||
for todo_exception in self.test_exception:
|
||||
if todo_exception['exception_raise'] == self.exception_string:
|
||||
e = getattr(t_exceptions, self.exception_string)
|
||||
raise e()
|
||||
|
||||
def test_quota_sets_exception_catch(self):
|
||||
|
||||
orig_show = quota.QuotaSetOperation.show_detail_quota
|
||||
quota.QuotaSetOperation.show_detail_quota = mock.Mock()
|
||||
quota.QuotaSetOperation.show_detail_quota.side_effect = \
|
||||
self._show_detail_exception
|
||||
|
||||
# show quota usage before update, should be equal to defaults
|
||||
for todo_exception in self.test_exception:
|
||||
self.exception_string = todo_exception['exception_raise']
|
||||
|
||||
url = self._url_for_quota_set(self.A.id, self.A.id)
|
||||
|
||||
# exception raised in LimitsController
|
||||
res = self.app.get(url + '/detail', expect_errors=True)
|
||||
self.assertEqual(res.status_int, todo_exception['expected_error'])
|
||||
|
||||
# exception raised in QuotaSetController
|
||||
res = self.app.get(url, expect_errors=True)
|
||||
self.assertEqual(res.status_int, todo_exception['expected_error'])
|
||||
|
||||
quota.QuotaSetOperation.show_detail_quota = orig_show
|
||||
|
||||
def _url_for_quota_set(self, owner_tenant_id, target_tenant_id):
|
||||
return '/v2.1/' + owner_tenant_id + \
|
||||
'/os-quota-sets/' + target_tenant_id
|
||||
|
||||
def _DictIn(self, dict_small, dict_full):
|
||||
for k, v in dict_small.iteritems():
|
||||
if dict_full[k] != v:
|
||||
return False
|
||||
return True
|
@ -1,174 +0,0 @@
|
||||
# Copyright (c) 2015 Huawei Technologies Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pecan
|
||||
from pecan.configuration import set_config
|
||||
from pecan.testing import load_test_app
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_config import fixture as fixture_config
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from tricircle.nova_apigw import app
|
||||
from tricircle.tests import base
|
||||
|
||||
|
||||
OPT_GROUP_NAME = 'keystone_authtoken'
|
||||
cfg.CONF.import_group(OPT_GROUP_NAME, "keystonemiddleware.auth_token")
|
||||
|
||||
|
||||
class Nova_API_GW_FunctionalTest(base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(Nova_API_GW_FunctionalTest, self).setUp()
|
||||
|
||||
self.addCleanup(set_config, {}, overwrite=True)
|
||||
|
||||
cfg.CONF.register_opts(app.common_opts)
|
||||
|
||||
self.CONF = self.useFixture(fixture_config.Config()).conf
|
||||
|
||||
self.CONF.set_override('auth_strategy', 'noauth')
|
||||
|
||||
self.app = self._make_app()
|
||||
|
||||
def _make_app(self, enable_acl=False):
|
||||
self.config = {
|
||||
'app': {
|
||||
'root': 'tricircle.nova_apigw.controllers.root.RootController',
|
||||
'modules': ['tricircle.nova_apigw'],
|
||||
'enable_acl': enable_acl,
|
||||
'errors': {
|
||||
400: '/error',
|
||||
'__force_dict__': True
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
return load_test_app(self.config)
|
||||
|
||||
def tearDown(self):
|
||||
super(Nova_API_GW_FunctionalTest, self).tearDown()
|
||||
cfg.CONF.unregister_opts(app.common_opts)
|
||||
pecan.set_config({}, overwrite=True)
|
||||
|
||||
|
||||
class TestRootController(Nova_API_GW_FunctionalTest):
|
||||
"""Test version listing on root URI."""
|
||||
|
||||
def test_get(self):
|
||||
response = self.app.get('/')
|
||||
self.assertEqual(response.status_int, 200)
|
||||
json_body = jsonutils.loads(response.body)
|
||||
versions = json_body.get('versions')
|
||||
self.assertEqual(2, len(versions))
|
||||
self.assertEqual(versions[0]["id"], "v2.0")
|
||||
self.assertEqual(versions[1]["min_version"], "2.1")
|
||||
self.assertEqual(versions[1]["id"], "v2.1")
|
||||
|
||||
def _test_method_returns_404(self, method):
|
||||
api_method = getattr(self.app, method)
|
||||
response = api_method('/', expect_errors=True)
|
||||
self.assertEqual(response.status_int, 404)
|
||||
|
||||
def test_post(self):
|
||||
self._test_method_returns_404('post')
|
||||
|
||||
def test_put(self):
|
||||
self._test_method_returns_404('put')
|
||||
|
||||
def test_patch(self):
|
||||
self._test_method_returns_404('patch')
|
||||
|
||||
def test_delete(self):
|
||||
self._test_method_returns_404('delete')
|
||||
|
||||
def test_head(self):
|
||||
self._test_method_returns_404('head')
|
||||
|
||||
|
||||
class TestV21Controller(Nova_API_GW_FunctionalTest):
|
||||
|
||||
def test_get(self):
|
||||
response = self.app.get('/v2.1/')
|
||||
self.assertEqual(response.status_int, 200)
|
||||
json_body = jsonutils.loads(response.body)
|
||||
version = json_body.get('version')
|
||||
self.assertEqual(version["min_version"], "2.1")
|
||||
self.assertEqual(version["id"], "v2.1")
|
||||
|
||||
def _test_method_returns_404(self, method):
|
||||
api_method = getattr(self.app, method)
|
||||
response = api_method('/', expect_errors=True)
|
||||
self.assertEqual(response.status_int, 404)
|
||||
|
||||
def test_post(self):
|
||||
self._test_method_returns_404('post')
|
||||
|
||||
def test_put(self):
|
||||
self._test_method_returns_404('put')
|
||||
|
||||
def test_patch(self):
|
||||
self._test_method_returns_404('patch')
|
||||
|
||||
def test_delete(self):
|
||||
self._test_method_returns_404('delete')
|
||||
|
||||
def test_head(self):
|
||||
self._test_method_returns_404('head')
|
||||
|
||||
|
||||
class TestErrors(Nova_API_GW_FunctionalTest):
|
||||
|
||||
def test_404(self):
|
||||
response = self.app.get('/assert_called_once', expect_errors=True)
|
||||
self.assertEqual(response.status_int, 404)
|
||||
|
||||
def test_bad_method(self):
|
||||
response = self.app.patch('/v2.1/123',
|
||||
expect_errors=True)
|
||||
self.assertEqual(response.status_int, 404)
|
||||
|
||||
|
||||
class TestRequestID(Nova_API_GW_FunctionalTest):
|
||||
|
||||
def test_request_id(self):
|
||||
response = self.app.get('/v2.1/')
|
||||
self.assertIn('x-openstack-request-id', response.headers)
|
||||
self.assertTrue(
|
||||
response.headers['x-openstack-request-id'].startswith('req-'))
|
||||
id_part = response.headers['x-openstack-request-id'].split('req-')[1]
|
||||
self.assertTrue(uuidutils.is_uuid_like(id_part))
|
||||
|
||||
|
||||
class TestKeystoneAuth(Nova_API_GW_FunctionalTest):
|
||||
|
||||
def setUp(self):
|
||||
super(Nova_API_GW_FunctionalTest, self).setUp()
|
||||
|
||||
self.addCleanup(set_config, {}, overwrite=True)
|
||||
|
||||
cfg.CONF.register_opts(app.common_opts)
|
||||
|
||||
self.CONF = self.useFixture(fixture_config.Config()).conf
|
||||
|
||||
cfg.CONF.set_override('auth_strategy', 'keystone')
|
||||
|
||||
self.app = self._make_app()
|
||||
|
||||
def test_auth_enforced(self):
|
||||
response = self.app.get('/v2.1/', expect_errors=True)
|
||||
self.assertEqual(response.status_int, 401)
|
@ -1,271 +0,0 @@
|
||||
# Copyright 2016 OpenStack Foundation.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from mock import patch
|
||||
import pecan
|
||||
import unittest
|
||||
|
||||
from tricircle.cinder_apigw.controllers import volume_type
|
||||
from tricircle.common import context
|
||||
from tricircle.db import api as db_api
|
||||
from tricircle.db import core
|
||||
|
||||
|
||||
class FakeResponse(object):
|
||||
def __new__(cls, code=500):
|
||||
cls.status = code
|
||||
cls.status_code = code
|
||||
return super(FakeResponse, cls).__new__(cls)
|
||||
|
||||
|
||||
class VolumeTypeTest(unittest.TestCase):
|
||||
def setUp(self):
|
||||
core.initialize()
|
||||
core.ModelBase.metadata.create_all(core.get_engine())
|
||||
self.context = context.get_admin_context()
|
||||
self.project_id = 'test_project'
|
||||
self.controller = volume_type.VolumeTypeController(self.project_id)
|
||||
|
||||
def _validate_error_code(self, res, code):
|
||||
self.assertEqual(code, res[res.keys()[0]]['code'])
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_post(self, mock_context):
|
||||
mock_context.return_value = self.context
|
||||
|
||||
body = {'volume_type': {'name': 'vol-type-001',
|
||||
'description': 'volume type 001',
|
||||
'os-volume-type-access:is_public': True,
|
||||
'extra_specs': {
|
||||
'capabilities': 'gpu',
|
||||
}}}
|
||||
self.controller.post(**body)
|
||||
res = db_api.volume_type_get_by_name(self.context, 'vol-type-001')
|
||||
|
||||
self.assertEqual('vol-type-001', res['name'])
|
||||
self.assertEqual('volume type 001', res['description'])
|
||||
capabilities = res['extra_specs']['capabilities']
|
||||
self.assertEqual('gpu', capabilities)
|
||||
|
||||
# failure case, only admin can create volume type
|
||||
self.context.is_admin = False
|
||||
res = self.controller.post(**body)
|
||||
self._validate_error_code(res, 403)
|
||||
|
||||
self.context.is_admin = True
|
||||
|
||||
# failure case, volume_type body is required
|
||||
body = {'name': 'vol-type-002'}
|
||||
res = self.controller.post(**body)
|
||||
self._validate_error_code(res, 400)
|
||||
|
||||
# failure case, volume type name is empty
|
||||
body = {'volume_type': {'name': '',
|
||||
'description': 'volume type 001',
|
||||
'os-volume-type-access:is_public': True,
|
||||
'extra_specs': {
|
||||
'capabilities': 'gpu',
|
||||
}}}
|
||||
res = self.controller.post(**body)
|
||||
self._validate_error_code(res, 400)
|
||||
|
||||
# failure case, volume type name has more than 255 characters
|
||||
body = {'volume_type': {'name': ('a' * 500),
|
||||
'description': 'volume type 001',
|
||||
'os-volume-type-access:is_public': True,
|
||||
'extra_specs': {
|
||||
'capabilities': 'gpu', }
|
||||
}
|
||||
}
|
||||
res = self.controller.post(**body)
|
||||
self._validate_error_code(res, 400)
|
||||
|
||||
# failure case, volume type description has more than 255 characters
|
||||
body = {'volume_type': {'name': 'vol-type-001',
|
||||
'description': ('a' * 500),
|
||||
'os-volume-type-access:is_public': True,
|
||||
'extra_specs': {
|
||||
'capabilities': 'gpu',
|
||||
}}}
|
||||
self.controller.post(**body)
|
||||
self._validate_error_code(res, 400)
|
||||
|
||||
# failure case, is_public is invalid input
|
||||
body = {'volume_type': {'name': 'vol-type-001',
|
||||
'description': 'volume type 001',
|
||||
'os-volume-type-access:is_public': 'a',
|
||||
'extra_specs': {
|
||||
'capabilities': 'gpu',
|
||||
}}}
|
||||
res = self.controller.post(**body)
|
||||
self._validate_error_code(res, 400)
|
||||
|
||||
# failure case, volume type name is unique
|
||||
body = {'volume_type': {'name': 'vol-type-001',
|
||||
'description': 'volume type 001',
|
||||
'os-volume-type-access:is_public': True,
|
||||
'extra_specs': {
|
||||
'capabilities': 'gpu',
|
||||
}}}
|
||||
res = self.controller.post(**body)
|
||||
self._validate_error_code(res, 409)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_get_one(self, mock_context):
|
||||
mock_context.return_value = self.context
|
||||
|
||||
body = {'volume_type': {'name': 'vol-type-001',
|
||||
'description': 'volume type 001',
|
||||
'os-volume-type-access:is_public': True,
|
||||
'extra_specs': {
|
||||
'capabilities': 'gpu',
|
||||
}}}
|
||||
self.controller.post(**body)
|
||||
vtype = db_api.volume_type_get_by_name(self.context, 'vol-type-001')
|
||||
res = self.controller.get_one(vtype['id'])['volume_type']
|
||||
|
||||
self.assertEqual('vol-type-001', res['name'])
|
||||
self.assertEqual('volume type 001', res['description'])
|
||||
capabilities = res['extra_specs']['capabilities']
|
||||
self.assertEqual('gpu', capabilities)
|
||||
|
||||
# failure case, volume type is not exist.
|
||||
fake_id = "Fake_ID"
|
||||
res = self.controller.get_one(fake_id)
|
||||
self._validate_error_code(res, 404)
|
||||
|
||||
# failure case, the volume type is private.
|
||||
body = {'volume_type': {'name': 'vol-type-002',
|
||||
'description': 'volume type 002',
|
||||
'os-volume-type-access:is_public': False,
|
||||
'extra_specs': {
|
||||
'capabilities': 'gpu',
|
||||
}}}
|
||||
self.controller.post(**body)
|
||||
vtype = db_api.volume_type_get_by_name(self.context, 'vol-type-002')
|
||||
self.context.is_admin = False
|
||||
res = self.controller.get_one(vtype['id'])
|
||||
self._validate_error_code(res, 404)
|
||||
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_get_all(self, mock_context):
|
||||
mock_context.return_value = self.context
|
||||
|
||||
volume_type_001 = {'volume_type': {'name': 'vol-type-001',
|
||||
'description': 'volume type 001',
|
||||
'os-volume-'
|
||||
'type-access:is_public': True,
|
||||
'extra_specs': {
|
||||
'capabilities': 'gpu',
|
||||
}}}
|
||||
volume_type_002 = {'volume_type': {'name': 'vol-type-002',
|
||||
'description': 'volume type 002',
|
||||
'os-volume-'
|
||||
'type-access:is_public': True,
|
||||
'extra_specs': {
|
||||
'capabilities': 'gpu',
|
||||
}}}
|
||||
self.controller.post(**volume_type_001)
|
||||
self.controller.post(**volume_type_002)
|
||||
volume_types = self.controller.get_all()['volume_types']
|
||||
|
||||
self.assertEqual('vol-type-001', volume_types[0]['name'])
|
||||
self.assertEqual('volume type 001', volume_types[0]['description'])
|
||||
capabilities_001 = volume_types[0]['extra_specs']['capabilities']
|
||||
self.assertEqual('gpu', capabilities_001)
|
||||
|
||||
self.assertEqual('vol-type-002', volume_types[1]['name'])
|
||||
self.assertEqual('volume type 002', volume_types[1]['description'])
|
||||
capabilities_002 = volume_types[1]['extra_specs']['capabilities']
|
||||
self.assertEqual('gpu', capabilities_002)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_put(self, mock_context):
|
||||
mock_context.return_value = self.context
|
||||
|
||||
body = {'volume_type': {'name': 'vol-type-001',
|
||||
'description': 'volume type 001',
|
||||
'os-volume-type-access:is_public': True,
|
||||
'extra_specs': {
|
||||
'capabilities': 'gpu',
|
||||
}}}
|
||||
body_update = {'volume_type': {'name': 'vol-type-002',
|
||||
'description': 'volume type 002',
|
||||
'os-volume-'
|
||||
'type-access:is_public': True,
|
||||
'extra_specs': {
|
||||
'capabilities': 'gpu',
|
||||
}}}
|
||||
self.controller.post(**body)
|
||||
vtype = db_api.volume_type_get_by_name(self.context, 'vol-type-001')
|
||||
res = self.controller.put(vtype['id'], **body_update)['volume_type']
|
||||
|
||||
self.assertEqual('vol-type-002', res['name'])
|
||||
self.assertEqual('volume type 002', res['description'])
|
||||
capabilities = res['extra_specs']['capabilities']
|
||||
self.assertEqual('gpu', capabilities)
|
||||
|
||||
# failure case, volume type name, description, is_public
|
||||
# not None at the same time
|
||||
body = {'volume_type': {'extra_specs': {
|
||||
'capabilities': 'gpu',
|
||||
}}}
|
||||
res = self.controller.put(vtype['id'], **body)
|
||||
self._validate_error_code(res, 400)
|
||||
# failure case, name exists in db
|
||||
body = {'volume_type': {'name': 'vol-type-003',
|
||||
'description': 'volume type 003',
|
||||
'os-volume-type-access:is_public': True,
|
||||
'extra_specs': {
|
||||
'capabilities': 'gpu',
|
||||
}}}
|
||||
self.controller.post(**body)
|
||||
res = self.controller.put(vtype['id'], **body)
|
||||
self._validate_error_code(res, 500)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(db_api, 'volume_type_delete')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_delete(self, mock_context, mock_delete):
|
||||
mock_context.return_value = self.context
|
||||
mock_delete.return_value = Exception()
|
||||
|
||||
body = {'volume_type': {'name': 'vol-type-001',
|
||||
'description': 'volume type 001',
|
||||
'os-volume-type-access:is_public': True,
|
||||
'extra_specs': {
|
||||
'capabilities': 'gpu',
|
||||
}}}
|
||||
self.controller.post(**body)
|
||||
vtype = db_api.volume_type_get_by_name(self.context, 'vol-type-001')
|
||||
|
||||
# failure case, only admin delete create volume type
|
||||
self.context.is_admin = False
|
||||
res = self.controller.delete(vtype['id'])
|
||||
self._validate_error_code(res, 403)
|
||||
|
||||
# failure case, bad request
|
||||
self.context.is_admin = True
|
||||
res = self.controller.delete(_id=None)
|
||||
self._validate_error_code(res, 404)
|
||||
|
||||
res = self.controller.delete(vtype['id'])
|
||||
self.assertEqual(res.status, 202)
|
||||
|
||||
def tearDown(self):
|
||||
core.ModelBase.metadata.drop_all(core.get_engine())
|
@ -1,306 +0,0 @@
|
||||
# Copyright (c) 2015 Huawei Tech. Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from mock import patch
|
||||
import pecan
|
||||
import unittest
|
||||
|
||||
from cinderclient.client import HTTPClient
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from tricircle.cinder_apigw.controllers import volume_actions as action
|
||||
from tricircle.common import constants
|
||||
from tricircle.common import context
|
||||
from tricircle.common import exceptions
|
||||
from tricircle.db import api
|
||||
from tricircle.db import core
|
||||
from tricircle.db import models
|
||||
|
||||
|
||||
class FakeResponse(object):
|
||||
def __new__(cls, code=500):
|
||||
cls.status = code
|
||||
cls.status_code = code
|
||||
return super(FakeResponse, cls).__new__(cls)
|
||||
|
||||
|
||||
class VolumeActionTest(unittest.TestCase):
|
||||
def setUp(self):
|
||||
core.initialize()
|
||||
core.ModelBase.metadata.create_all(core.get_engine())
|
||||
self.context = context.Context()
|
||||
self.project_id = 'test_project'
|
||||
self.context.tenant = self.project_id
|
||||
self.controller = action.VolumeActionController(self.project_id, '')
|
||||
|
||||
def _prepare_pod(self, bottom_pod_num=1):
|
||||
t_pod = {'pod_id': 't_pod_uuid', 'pod_name': 't_region',
|
||||
'az_name': ''}
|
||||
api.create_pod(self.context, t_pod)
|
||||
b_pods = []
|
||||
if bottom_pod_num == 1:
|
||||
b_pod = {'pod_id': 'b_pod_uuid', 'pod_name': 'b_region',
|
||||
'az_name': 'b_az'}
|
||||
api.create_pod(self.context, b_pod)
|
||||
b_pods.append(b_pod)
|
||||
else:
|
||||
for i in xrange(1, bottom_pod_num + 1):
|
||||
b_pod = {'pod_id': 'b_pod_%d_uuid' % i,
|
||||
'pod_name': 'b_region_%d' % i,
|
||||
'az_name': 'b_az_%d' % i}
|
||||
api.create_pod(self.context, b_pod)
|
||||
b_pods.append(b_pod)
|
||||
return t_pod, b_pods
|
||||
|
||||
def _prepare_pod_service(self, pod_id, service):
|
||||
config_dict = {'service_id': uuidutils.generate_uuid(),
|
||||
'pod_id': pod_id,
|
||||
'service_type': service,
|
||||
'service_url': 'fake_pod_service'}
|
||||
api.create_pod_service_configuration(self.context, config_dict)
|
||||
pass
|
||||
|
||||
def _prepare_volume(self, pod):
|
||||
t_volume_id = uuidutils.generate_uuid()
|
||||
b_volume_id = t_volume_id
|
||||
with self.context.session.begin():
|
||||
core.create_resource(
|
||||
self.context, models.ResourceRouting,
|
||||
{'top_id': t_volume_id, 'bottom_id': b_volume_id,
|
||||
'pod_id': pod['pod_id'], 'project_id': self.project_id,
|
||||
'resource_type': constants.RT_VOLUME})
|
||||
return t_volume_id
|
||||
|
||||
def _prepare_server(self, pod):
|
||||
t_server_id = uuidutils.generate_uuid()
|
||||
b_server_id = t_server_id
|
||||
with self.context.session.begin():
|
||||
core.create_resource(
|
||||
self.context, models.ResourceRouting,
|
||||
{'top_id': t_server_id, 'bottom_id': b_server_id,
|
||||
'pod_id': pod['pod_id'], 'project_id': self.project_id,
|
||||
'resource_type': constants.RT_SERVER})
|
||||
return t_server_id
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_action_not_supported(self, mock_context):
|
||||
mock_context.return_value = self.context
|
||||
|
||||
body = {'unsupported_action': ''}
|
||||
res = self.controller.post(**body)
|
||||
self.assertEqual('Volume action not supported',
|
||||
res['badRequest']['message'])
|
||||
self.assertEqual(400, res['badRequest']['code'])
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_action_volume_not_found(self, mock_context):
|
||||
mock_context.return_value = self.context
|
||||
|
||||
body = {'os-extend': ''}
|
||||
self.controller.volume_id = 'Fake_volume_id'
|
||||
res = self.controller.post(**body)
|
||||
self.assertEqual(
|
||||
'Volume %(volume_id)s could not be found.' % {
|
||||
'volume_id': self.controller.volume_id},
|
||||
res['itemNotFound']['message'])
|
||||
self.assertEqual(404, res['itemNotFound']['code'])
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(HTTPClient, 'post')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_action_exception(self, mock_context, mock_action):
|
||||
mock_context.return_value = self.context
|
||||
t_pod, b_pods = self._prepare_pod()
|
||||
self._prepare_pod_service(b_pods[0]['pod_id'], constants.ST_CINDER)
|
||||
t_volume_id = self._prepare_volume(b_pods[0])
|
||||
self.controller.volume_id = t_volume_id
|
||||
|
||||
mock_action.side_effect = exceptions.HTTPForbiddenError(
|
||||
msg='Volume operation forbidden')
|
||||
body = {'os-extend': {'new_size': 2}}
|
||||
res = self.controller.post(**body)
|
||||
# this is the message of HTTPForbiddenError exception
|
||||
self.assertEqual('Volume operation forbidden',
|
||||
res['forbidden']['message'])
|
||||
# this is the code of HTTPForbiddenError exception
|
||||
self.assertEqual(403, res['forbidden']['code'])
|
||||
|
||||
mock_action.side_effect = exceptions.ServiceUnavailable
|
||||
body = {'os-extend': {'new_size': 2}}
|
||||
res = self.controller.post(**body)
|
||||
# this is the message of ServiceUnavailable exception
|
||||
self.assertEqual('The service is unavailable',
|
||||
res['internalServerError']['message'])
|
||||
# code is 500 by default
|
||||
self.assertEqual(500, res['internalServerError']['code'])
|
||||
|
||||
mock_action.side_effect = Exception
|
||||
body = {'os-extend': {'new_size': 2}}
|
||||
res = self.controller.post(**body)
|
||||
# use default message if exception's message is empty
|
||||
self.assertEqual('Action os-extend on volume %s fails' % t_volume_id,
|
||||
res['internalServerError']['message'])
|
||||
# code is 500 by default
|
||||
self.assertEqual(500, res['internalServerError']['code'])
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(HTTPClient, 'post')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_extend_action(self, mock_context, mock_action):
|
||||
mock_context.return_value = self.context
|
||||
mock_action.return_value = (FakeResponse(202), None)
|
||||
t_pod, b_pods = self._prepare_pod()
|
||||
self._prepare_pod_service(b_pods[0]['pod_id'], constants.ST_CINDER)
|
||||
t_volume_id = self._prepare_volume(b_pods[0])
|
||||
self.controller.volume_id = t_volume_id
|
||||
|
||||
body = {'os-extend': {'new_size': 2}}
|
||||
res = self.controller.post(**body)
|
||||
url = '/volumes/%s/action' % t_volume_id
|
||||
mock_action.assert_called_once_with(url, body=body)
|
||||
self.assertEqual(202, res.status)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(HTTPClient, 'post')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_attach_action(self, mock_context, mock_action):
|
||||
mock_context.return_value = self.context
|
||||
mock_action.return_value = (FakeResponse(202), None)
|
||||
|
||||
t_pod, b_pods = self._prepare_pod()
|
||||
self._prepare_pod_service(b_pods[0]['pod_id'], constants.ST_CINDER)
|
||||
t_volume_id = self._prepare_volume(b_pods[0])
|
||||
t_server_id = self._prepare_server(b_pods[0])
|
||||
self.controller.volume_id = t_volume_id
|
||||
|
||||
body = {'os-attach': {
|
||||
'instance_uuid': t_server_id,
|
||||
'mountpoint': '/dev/vdc'
|
||||
}}
|
||||
res = self.controller.post(**body)
|
||||
url = '/volumes/%s/action' % t_volume_id
|
||||
mock_action.assert_called_once_with(url, body=body)
|
||||
self.assertEqual(202, res.status)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(HTTPClient, 'post')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_force_detach_volume_action(self, mock_context, mock_action):
|
||||
mock_context.return_value = self.context
|
||||
mock_action.return_value = (FakeResponse(202), None)
|
||||
|
||||
t_pod, b_pods = self._prepare_pod()
|
||||
self._prepare_pod_service(b_pods[0]['pod_id'], constants.ST_CINDER)
|
||||
t_volume_id = self._prepare_volume(b_pods[0])
|
||||
t_server_id = self._prepare_server(b_pods[0])
|
||||
self.controller.volume_id = t_volume_id
|
||||
body = {"os-force_detach": {
|
||||
"attachment_id": t_server_id,
|
||||
"connector": {
|
||||
"initiator": "iqn.2012-07.org.fake:01"}}}
|
||||
res = self.controller.post(**body)
|
||||
url = '/volumes/%s/action' % t_volume_id
|
||||
mock_action.assert_called_once_with(url, body=body)
|
||||
self.assertEqual(202, res.status)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(HTTPClient, 'post')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_reset_status_action(self, mock_context, mock_action):
|
||||
mock_context.return_value = self.context
|
||||
mock_action.return_value = (FakeResponse(202), None)
|
||||
|
||||
t_pod, b_pods = self._prepare_pod()
|
||||
self._prepare_pod_service(b_pods[0]['pod_id'], constants.ST_CINDER)
|
||||
t_volume_id = self._prepare_volume(b_pods[0])
|
||||
self.controller.volume_id = t_volume_id
|
||||
|
||||
body = {"os-reset_status": {
|
||||
"status": "available",
|
||||
"attach_status": "detached",
|
||||
"migration_status": "migrating"
|
||||
}}
|
||||
res = self.controller.post(**body)
|
||||
url = '/volumes/%s/action' % t_volume_id
|
||||
mock_action.assert_called_once_with(url, body=body)
|
||||
self.assertEqual(202, res.status)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(HTTPClient, 'post')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_set_image_metadata_action(self, mock_context, mock_action):
|
||||
mock_context.return_value = self.context
|
||||
mock_action.return_value = (FakeResponse(202), None)
|
||||
|
||||
t_pod, b_pods = self._prepare_pod()
|
||||
self._prepare_pod_service(b_pods[0]['pod_id'], constants.ST_CINDER)
|
||||
t_volume_id = self._prepare_volume(b_pods[0])
|
||||
self.controller.volume_id = t_volume_id
|
||||
|
||||
body = {"os-set_image_metadata": {
|
||||
"metadata": {
|
||||
"image_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c",
|
||||
"image_name": "image",
|
||||
"kernel_id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
|
||||
"ramdisk_id": "somedisk"
|
||||
}
|
||||
}}
|
||||
res = self.controller.post(**body)
|
||||
url = '/volumes/%s/action' % t_volume_id
|
||||
mock_action.assert_called_once_with(url, body=body)
|
||||
self.assertEqual(202, res.status)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(HTTPClient, 'post')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_unset_image_metadata_action(self, mock_context, mock_action):
|
||||
mock_context.return_value = self.context
|
||||
mock_action.return_value = (FakeResponse(202), None)
|
||||
|
||||
t_pod, b_pods = self._prepare_pod()
|
||||
self._prepare_pod_service(b_pods[0]['pod_id'], constants.ST_CINDER)
|
||||
t_volume_id = self._prepare_volume(b_pods[0])
|
||||
self.controller.volume_id = t_volume_id
|
||||
|
||||
body = {"os-unset_image_metadata": {
|
||||
'key': 'image_name'
|
||||
}}
|
||||
res = self.controller.post(**body)
|
||||
url = '/volumes/%s/action' % t_volume_id
|
||||
mock_action.assert_called_once_with(url, body=body)
|
||||
self.assertEqual(202, res.status)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(HTTPClient, 'post')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_show_image_metadata_action(self, mock_context, mock_action):
|
||||
mock_context.return_value = self.context
|
||||
mock_action.return_value = (FakeResponse(202), None)
|
||||
|
||||
t_pod, b_pods = self._prepare_pod()
|
||||
self._prepare_pod_service(b_pods[0]['pod_id'], constants.ST_CINDER)
|
||||
t_volume_id = self._prepare_volume(b_pods[0])
|
||||
self.controller.volume_id = t_volume_id
|
||||
|
||||
body = {"os-show_image_metadata": None}
|
||||
res = self.controller.post(**body)
|
||||
url = '/volumes/%s/action' % t_volume_id
|
||||
mock_action.assert_called_once_with(url, body=body)
|
||||
self.assertEqual(202, res.status)
|
||||
|
||||
def tearDown(self):
|
||||
core.ModelBase.metadata.drop_all(core.get_engine())
|
@ -1,141 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tricircle.common import context
|
||||
from tricircle.common.scheduler import filter_scheduler
|
||||
from tricircle.db import api
|
||||
from tricircle.db import core
|
||||
from tricircle.db import models
|
||||
import unittest
|
||||
|
||||
|
||||
class FilterSchedulerTest(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
core.initialize()
|
||||
core.ModelBase.metadata.create_all(core.get_engine())
|
||||
self.context = context.Context()
|
||||
self.project_id = 'test_fs_project'
|
||||
self.az_name_1 = 'b_az_fs_1'
|
||||
self.az_name_2 = 'b_az_fs_2'
|
||||
self.filter_scheduler = filter_scheduler.FilterScheduler()
|
||||
|
||||
def _prepare_binding(self, pod_id):
|
||||
binding = {'tenant_id': self.project_id,
|
||||
'pod_id': pod_id,
|
||||
'is_binding': True}
|
||||
api.create_pod_binding(self.context, self.project_id, pod_id)
|
||||
return binding
|
||||
|
||||
def test_select_destination(self):
|
||||
b_pod_1 = {'pod_id': 'b_pod_fs_uuid_1', 'pod_name': 'b_region_fs_1',
|
||||
'az_name': self.az_name_1}
|
||||
api.create_pod(self.context, b_pod_1)
|
||||
b_pod_2 = {'pod_id': 'b_pod_fs_uuid_2', 'pod_name': 'b_region_fs_2',
|
||||
'az_name': self.az_name_2}
|
||||
api.create_pod(self.context, b_pod_2)
|
||||
b_pod_3 = {'pod_id': 'b_pod_fs_uuid_3', 'pod_name': 'b_region_fs_3',
|
||||
'az_name': self.az_name_2}
|
||||
api.create_pod(self.context, b_pod_3)
|
||||
|
||||
t_pod = {'pod_id': 'b_pod_fs_uuid_t_pod',
|
||||
'pod_name': 'b_region_fs_t_pod',
|
||||
'az_name': ''}
|
||||
api.create_pod(self.context, t_pod)
|
||||
self._prepare_binding(b_pod_1['pod_id'])
|
||||
binding_q = core.query_resource(
|
||||
self.context, models.PodBinding, [{'key': 'tenant_id',
|
||||
'comparator': 'eq',
|
||||
'value': self.project_id}], [])
|
||||
self.assertEqual(binding_q[0]['pod_id'], b_pod_1['pod_id'])
|
||||
self.assertEqual(binding_q[0]['tenant_id'], self.project_id)
|
||||
self.assertEqual(binding_q[0]['is_binding'], True)
|
||||
|
||||
pod_1, _ = self.filter_scheduler.select_destination(
|
||||
self.context, '', self.project_id, pod_group='')
|
||||
self.assertEqual(pod_1['pod_id'], b_pod_1['pod_id'])
|
||||
binding_q = core.query_resource(
|
||||
self.context, models.PodBinding, [{'key': 'tenant_id',
|
||||
'comparator': 'eq',
|
||||
'value': self.project_id}], [])
|
||||
self.assertEqual(len(binding_q), 1)
|
||||
self.assertEqual(binding_q[0]['pod_id'], pod_1['pod_id'])
|
||||
self.assertEqual(binding_q[0]['tenant_id'], self.project_id)
|
||||
self.assertEqual(binding_q[0]['is_binding'], True)
|
||||
|
||||
pod_2, _ = self.filter_scheduler.select_destination(
|
||||
self.context, '', 'new_project', pod_group='')
|
||||
binding_q = core.query_resource(
|
||||
self.context, models.PodBinding, [{'key': 'tenant_id',
|
||||
'comparator': 'eq',
|
||||
'value': 'new_project'}], [])
|
||||
self.assertEqual(len(binding_q), 1)
|
||||
self.assertEqual(binding_q[0]['pod_id'], pod_2['pod_id'])
|
||||
self.assertEqual(binding_q[0]['tenant_id'], 'new_project')
|
||||
self.assertEqual(binding_q[0]['is_binding'], True)
|
||||
|
||||
pod_3, _ = self.filter_scheduler.select_destination(
|
||||
self.context, self.az_name_1, 'new_project', pod_group='')
|
||||
binding_q = core.query_resource(
|
||||
self.context, models.PodBinding, [{'key': 'tenant_id',
|
||||
'comparator': 'eq',
|
||||
'value': 'new_project'}], [])
|
||||
self.assertEqual(len(binding_q), 1)
|
||||
self.assertEqual(binding_q[0]['pod_id'], pod_3['pod_id'])
|
||||
self.assertEqual(binding_q[0]['tenant_id'], 'new_project')
|
||||
self.assertEqual(binding_q[0]['is_binding'], True)
|
||||
|
||||
pod_4, _ = self.filter_scheduler.select_destination(
|
||||
self.context, self.az_name_2, 'new_project', pod_group='')
|
||||
binding_q = core.query_resource(
|
||||
self.context, models.PodBinding, [{'key': 'tenant_id',
|
||||
'comparator': 'eq',
|
||||
'value': 'new_project'}], [])
|
||||
self.assertEqual(len(binding_q), 2)
|
||||
self.assertEqual(binding_q[1]['pod_id'], pod_4['pod_id'])
|
||||
self.assertEqual(binding_q[1]['tenant_id'], 'new_project')
|
||||
self.assertEqual(binding_q[1]['is_binding'], True)
|
||||
|
||||
pod_5, _ = self.filter_scheduler.select_destination(
|
||||
self.context, self.az_name_2, self.project_id, pod_group='')
|
||||
binding_q = core.query_resource(
|
||||
self.context, models.PodBinding, [{'key': 'tenant_id',
|
||||
'comparator': 'eq',
|
||||
'value': self.project_id}], [])
|
||||
self.assertEqual(len(binding_q), 2)
|
||||
self.assertEqual(pod_5['az_name'], self.az_name_2)
|
||||
self.assertEqual(binding_q[1]['pod_id'], pod_5['pod_id'])
|
||||
self.assertEqual(binding_q[1]['tenant_id'], self.project_id)
|
||||
self.assertEqual(binding_q[1]['is_binding'], True)
|
||||
|
||||
pod_6, _ = self.filter_scheduler.select_destination(
|
||||
self.context, self.az_name_1, self.project_id, pod_group='test')
|
||||
binding_q = core.query_resource(
|
||||
self.context, models.PodBinding, [{'key': 'tenant_id',
|
||||
'comparator': 'eq',
|
||||
'value': self.project_id}], [])
|
||||
self.assertEqual(len(binding_q), 2)
|
||||
self.assertEqual(pod_6, None)
|
||||
|
||||
pod_7, _ = self.filter_scheduler.select_destination(
|
||||
self.context, self.az_name_2, self.project_id, pod_group='test')
|
||||
binding_q = core.query_resource(
|
||||
self.context, models.PodBinding, [{'key': 'tenant_id',
|
||||
'comparator': 'eq',
|
||||
'value': self.project_id}], [])
|
||||
self.assertEqual(len(binding_q), 3)
|
||||
self.assertEqual(pod_7['az_name'], self.az_name_2)
|
||||
self.assertEqual(binding_q[1]['tenant_id'], self.project_id)
|
||||
self.assertEqual(binding_q[1]['is_binding'], False)
|
||||
self.assertEqual(binding_q[2]['pod_id'], pod_7['pod_id'])
|
||||
self.assertEqual(binding_q[2]['tenant_id'], self.project_id)
|
||||
self.assertEqual(binding_q[2]['is_binding'], True)
|
@ -1,141 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from stevedore import driver
|
||||
|
||||
from tricircle.common import context
|
||||
from tricircle.db import api
|
||||
from tricircle.db import core
|
||||
from tricircle.db import models
|
||||
|
||||
import unittest
|
||||
|
||||
|
||||
class PodManagerTest(unittest.TestCase):
|
||||
def setUp(self):
|
||||
core.initialize()
|
||||
core.ModelBase.metadata.create_all(core.get_engine())
|
||||
self.context = context.Context()
|
||||
self.project_id = 'test_pm_project'
|
||||
self.az_name_2 = 'b_az_pm_2'
|
||||
self.az_name_1 = 'b_az_pm_1'
|
||||
self.pod_manager = driver.DriverManager(
|
||||
namespace='tricircle.common.schedulers',
|
||||
name='pod_manager',
|
||||
invoke_on_load=True
|
||||
).driver
|
||||
self.b_pod_1 = {'pod_id': 'b_pod_pm_uuid_1',
|
||||
'pod_name': 'b_region_pm_1',
|
||||
'az_name': self.az_name_1}
|
||||
|
||||
self.b_pod_2 = {'pod_id': 'b_pod_pm_uuid_2',
|
||||
'pod_name': 'b_region_pm_2',
|
||||
'az_name': self.az_name_2}
|
||||
|
||||
self.b_pod_3 = {'pod_id': 'b_pod_pm_uuid_3',
|
||||
'pod_name': 'b_region_pm_3',
|
||||
'az_name': self.az_name_2}
|
||||
|
||||
self.b_pod_4 = {'pod_id': 'b_pod_pm_uuid_4',
|
||||
'pod_name': 'b_region_pm_4',
|
||||
'az_name': self.az_name_2}
|
||||
|
||||
def test_get_current_binding_and_pod(self):
|
||||
api.create_pod(self.context, self.b_pod_1)
|
||||
api.create_pod_binding(
|
||||
self.context, self.project_id, self.b_pod_1['pod_id'])
|
||||
|
||||
pod_b_1, pod_1 = self.pod_manager.get_current_binding_and_pod(
|
||||
self.context, self.az_name_1, self.project_id, pod_group='')
|
||||
binding_q = core.query_resource(
|
||||
self.context, models.PodBinding,
|
||||
[{'key': 'tenant_id',
|
||||
'comparator': 'eq',
|
||||
'value': self.project_id}], [])
|
||||
self.assertEqual(len(binding_q), 1)
|
||||
self.assertEqual(binding_q[0]['id'], pod_b_1['id'])
|
||||
|
||||
pod_b_2, pod_2 = self.pod_manager.get_current_binding_and_pod(
|
||||
self.context, self.az_name_1, 'new_project_pm_1', pod_group='')
|
||||
binding_q = core.query_resource(
|
||||
self.context, models.PodBinding,
|
||||
[{'key': 'tenant_id',
|
||||
'comparator': 'eq',
|
||||
'value': 'new_project_pm_1'}], [])
|
||||
self.assertEqual(len(binding_q), 0)
|
||||
self.assertEqual(pod_b_2, None)
|
||||
self.assertEqual(pod_2, None)
|
||||
|
||||
pod_b_3, pod_3 = self.pod_manager.get_current_binding_and_pod(
|
||||
self.context, 'unknown_az', self.project_id, pod_group='')
|
||||
binding_q = core.query_resource(
|
||||
self.context, models.PodBinding,
|
||||
[{'key': 'tenant_id',
|
||||
'comparator': 'eq',
|
||||
'value': self.project_id}], [])
|
||||
self.assertEqual(len(binding_q), 1)
|
||||
self.assertEqual(pod_b_3, None)
|
||||
self.assertEqual(pod_3, None)
|
||||
|
||||
pod_b_4, pod_4 = self.pod_manager.get_current_binding_and_pod(
|
||||
self.context, self.az_name_1, self.project_id, pod_group='test')
|
||||
binding_q = core.query_resource(
|
||||
self.context, models.PodBinding,
|
||||
[{'key': 'tenant_id',
|
||||
'comparator': 'eq',
|
||||
'value': self.project_id}], [])
|
||||
self.assertEqual(len(binding_q), 1)
|
||||
self.assertEqual(pod_b_4['id'], binding_q[0]['id'])
|
||||
self.assertEqual(pod_4, None)
|
||||
|
||||
def test_create_binding(self):
|
||||
api.create_pod(self.context, self.b_pod_2)
|
||||
flag = self.pod_manager.create_binding(
|
||||
self.context, 'new_project_pm_2', self.b_pod_2['pod_id'])
|
||||
self.assertEqual(flag, True)
|
||||
binding_q = core.query_resource(
|
||||
self.context, models.PodBinding,
|
||||
[{'key': 'tenant_id',
|
||||
'comparator': 'eq',
|
||||
'value': 'new_project_pm_2'}], [])
|
||||
self.assertEqual(len(binding_q), 1)
|
||||
self.assertEqual(binding_q[0]['pod_id'], self.b_pod_2['pod_id'])
|
||||
self.assertEqual(binding_q[0]['tenant_id'], 'new_project_pm_2')
|
||||
self.assertEqual(binding_q[0]['is_binding'], True)
|
||||
|
||||
def test_update_binding(self):
|
||||
api.create_pod(self.context, self.b_pod_4)
|
||||
api.create_pod(self.context, self.b_pod_3)
|
||||
flag = self.pod_manager.create_binding(
|
||||
self.context, 'new_project_pm_3', self.b_pod_3['pod_id'])
|
||||
self.assertEqual(flag, True)
|
||||
current_binding = core.query_resource(
|
||||
self.context, models.PodBinding,
|
||||
[{'key': 'tenant_id',
|
||||
'comparator': 'eq',
|
||||
'value': 'new_project_pm_3'}], [])
|
||||
|
||||
flag = self.pod_manager.update_binding(
|
||||
self.context, current_binding[0], self.b_pod_4['pod_id'])
|
||||
self.assertEqual(flag, True)
|
||||
binding_q = core.query_resource(
|
||||
self.context, models.PodBinding,
|
||||
[{'key': 'tenant_id',
|
||||
'comparator': 'eq',
|
||||
'value': 'new_project_pm_3'}], [])
|
||||
self.assertEqual(len(binding_q), 2)
|
||||
self.assertEqual(binding_q[0]['pod_id'], self.b_pod_3['pod_id'])
|
||||
self.assertEqual(binding_q[0]['tenant_id'], 'new_project_pm_3')
|
||||
self.assertEqual(binding_q[0]['is_binding'], False)
|
||||
self.assertEqual(binding_q[1]['pod_id'], self.b_pod_4['pod_id'])
|
||||
self.assertEqual(binding_q[1]['tenant_id'], 'new_project_pm_3')
|
||||
self.assertEqual(binding_q[1]['is_binding'], True)
|
@ -1,473 +0,0 @@
|
||||
# Copyright (c) 2015 Huawei Tech. Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from mock import patch
|
||||
from novaclient.client import HTTPClient
|
||||
import pecan
|
||||
import unittest
|
||||
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from tricircle.common import client
|
||||
from tricircle.common import constants
|
||||
from tricircle.common import context
|
||||
from tricircle.common import exceptions
|
||||
from tricircle.db import api
|
||||
from tricircle.db import core
|
||||
from tricircle.db import models
|
||||
from tricircle.nova_apigw.controllers import action
|
||||
|
||||
|
||||
class FakeResponse(object):
|
||||
def __new__(cls, code=500):
|
||||
cls.status = code
|
||||
cls.status_code = code
|
||||
return super(FakeResponse, cls).__new__(cls)
|
||||
|
||||
|
||||
class ActionTest(unittest.TestCase):
|
||||
def setUp(self):
|
||||
core.initialize()
|
||||
core.ModelBase.metadata.create_all(core.get_engine())
|
||||
self.context = context.Context()
|
||||
self.project_id = 'test_project'
|
||||
self.context.tenant = self.project_id
|
||||
self.controller = action.ActionController(self.project_id, '')
|
||||
|
||||
def _prepare_pod(self, bottom_pod_num=1):
|
||||
t_pod = {'pod_id': 't_pod_uuid', 'pod_name': 't_region',
|
||||
'az_name': ''}
|
||||
api.create_pod(self.context, t_pod)
|
||||
b_pods = []
|
||||
if bottom_pod_num == 1:
|
||||
b_pod = {'pod_id': 'b_pod_uuid', 'pod_name': 'b_region',
|
||||
'az_name': 'b_az'}
|
||||
api.create_pod(self.context, b_pod)
|
||||
b_pods.append(b_pod)
|
||||
else:
|
||||
for i in xrange(1, bottom_pod_num + 1):
|
||||
b_pod = {'pod_id': 'b_pod_%d_uuid' % i,
|
||||
'pod_name': 'b_region_%d' % i,
|
||||
'az_name': 'b_az_%d' % i}
|
||||
api.create_pod(self.context, b_pod)
|
||||
b_pods.append(b_pod)
|
||||
return t_pod, b_pods
|
||||
|
||||
def _prepare_pod_service(self, pod_id, service):
|
||||
config_dict = {'service_id': uuidutils.generate_uuid(),
|
||||
'pod_id': pod_id,
|
||||
'service_type': service,
|
||||
'service_url': 'fake_pod_service'}
|
||||
api.create_pod_service_configuration(self.context, config_dict)
|
||||
|
||||
def _prepare_server(self, pod):
|
||||
t_server_id = uuidutils.generate_uuid()
|
||||
b_server_id = t_server_id
|
||||
with self.context.session.begin():
|
||||
core.create_resource(
|
||||
self.context, models.ResourceRouting,
|
||||
{'top_id': t_server_id, 'bottom_id': b_server_id,
|
||||
'pod_id': pod['pod_id'], 'project_id': self.project_id,
|
||||
'resource_type': constants.RT_SERVER})
|
||||
return t_server_id
|
||||
|
||||
def _validate_error_code(self, res, code):
|
||||
self.assertEqual(code, res[res.keys()[0]]['code'])
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_action_not_supported(self, mock_context):
|
||||
mock_context.return_value = self.context
|
||||
|
||||
body = {'unsupported_action': ''}
|
||||
res = self.controller.post(**body)
|
||||
self._validate_error_code(res, 400)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_action_server_not_found(self, mock_context):
|
||||
mock_context.return_value = self.context
|
||||
|
||||
body = {'os-start': ''}
|
||||
res = self.controller.post(**body)
|
||||
self._validate_error_code(res, 404)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(client.Client, 'action_resources')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_action_exception(self, mock_context, mock_action):
|
||||
mock_context.return_value = self.context
|
||||
|
||||
t_pod, b_pods = self._prepare_pod()
|
||||
t_server_id = self._prepare_server(b_pods[0])
|
||||
self.controller.server_id = t_server_id
|
||||
|
||||
mock_action.side_effect = exceptions.HTTPForbiddenError(
|
||||
msg='Server operation forbidden')
|
||||
body = {'os-start': ''}
|
||||
res = self.controller.post(**body)
|
||||
self._validate_error_code(res, 403)
|
||||
|
||||
mock_action.side_effect = exceptions.ServiceUnavailable
|
||||
body = {'os-start': ''}
|
||||
res = self.controller.post(**body)
|
||||
self._validate_error_code(res, 500)
|
||||
|
||||
mock_action.side_effect = Exception
|
||||
body = {'os-start': ''}
|
||||
res = self.controller.post(**body)
|
||||
self._validate_error_code(res, 500)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(client.Client, 'action_resources')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_start_action(self, mock_context, mock_action):
|
||||
mock_context.return_value = self.context
|
||||
mock_action.return_value = (FakeResponse(202), None)
|
||||
|
||||
t_pod, b_pods = self._prepare_pod()
|
||||
t_server_id = self._prepare_server(b_pods[0])
|
||||
self.controller.server_id = t_server_id
|
||||
|
||||
body = {'os-start': ''}
|
||||
res = self.controller.post(**body)
|
||||
mock_action.assert_called_once_with(
|
||||
'server', self.context, 'start', t_server_id)
|
||||
self.assertEqual(202, res.status)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(client.Client, 'action_resources')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_stop_action(self, mock_context, mock_action):
|
||||
mock_context.return_value = self.context
|
||||
mock_action.return_value = (FakeResponse(202), None)
|
||||
|
||||
t_pod, b_pods = self._prepare_pod()
|
||||
t_server_id = self._prepare_server(b_pods[0])
|
||||
self.controller.server_id = t_server_id
|
||||
|
||||
body = {'os-stop': ''}
|
||||
res = self.controller.post(**body)
|
||||
mock_action.assert_called_once_with(
|
||||
'server', self.context, 'stop', t_server_id)
|
||||
self.assertEqual(202, res.status)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(client.Client, 'action_resources')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_force_delete_action(self, mock_context, mock_action):
|
||||
mock_context.return_value = self.context
|
||||
mock_action.return_value = (FakeResponse(202), None)
|
||||
|
||||
t_pod, b_pods = self._prepare_pod()
|
||||
t_server_id = self._prepare_server(b_pods[0])
|
||||
self.controller.server_id = t_server_id
|
||||
|
||||
body = {'forceDelete': ''}
|
||||
res = self.controller.post(**body)
|
||||
mock_action.assert_called_once_with(
|
||||
'server', self.context, 'force_delete', t_server_id)
|
||||
self.assertEqual(202, res.status)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(client.Client, 'action_resources')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_lock_action(self, mock_context, mock_action):
|
||||
mock_context.return_value = self.context
|
||||
mock_action.return_value = (FakeResponse(202), None)
|
||||
|
||||
t_pod, b_pods = self._prepare_pod()
|
||||
t_server_id = self._prepare_server(b_pods[0])
|
||||
self.controller.server_id = t_server_id
|
||||
|
||||
body = {'lock': ''}
|
||||
res = self.controller.post(**body)
|
||||
mock_action.assert_called_once_with(
|
||||
'server', self.context, 'lock', t_server_id)
|
||||
self.assertEqual(202, res.status)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(client.Client, 'action_resources')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_unlock_action(self, mock_context, mock_action):
|
||||
mock_context.return_value = self.context
|
||||
mock_action.return_value = (FakeResponse(202), None)
|
||||
|
||||
t_pod, b_pods = self._prepare_pod()
|
||||
t_server_id = self._prepare_server(b_pods[0])
|
||||
self.controller.server_id = t_server_id
|
||||
|
||||
body = {'unlock': ''}
|
||||
res = self.controller.post(**body)
|
||||
mock_action.assert_called_once_with(
|
||||
'server', self.context, 'unlock', t_server_id)
|
||||
self.assertEqual(202, res.status)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(client.Client, 'action_resources')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_pause_action(self, mock_context, mock_action):
|
||||
mock_context.return_value = self.context
|
||||
mock_action.return_value = (FakeResponse(202), None)
|
||||
|
||||
t_pod, b_pods = self._prepare_pod()
|
||||
t_server_id = self._prepare_server(b_pods[0])
|
||||
self.controller.server_id = t_server_id
|
||||
|
||||
body = {'pause': ''}
|
||||
res = self.controller.post(**body)
|
||||
mock_action.assert_called_once_with(
|
||||
'server', self.context, 'pause', t_server_id)
|
||||
self.assertEqual(202, res.status)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(client.Client, 'action_resources')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_unpause_action(self, mock_context, mock_action):
|
||||
mock_context.return_value = self.context
|
||||
mock_action.return_value = (FakeResponse(202), None)
|
||||
|
||||
t_pod, b_pods = self._prepare_pod()
|
||||
t_server_id = self._prepare_server(b_pods[0])
|
||||
self.controller.server_id = t_server_id
|
||||
|
||||
body = {'unpause': ''}
|
||||
res = self.controller.post(**body)
|
||||
mock_action.assert_called_once_with(
|
||||
'server', self.context, 'unpause', t_server_id)
|
||||
self.assertEqual(202, res.status)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(client.Client, 'action_resources')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_suspend_action(self, mock_context, mock_action):
|
||||
mock_context.return_value = self.context
|
||||
mock_action.return_value = (FakeResponse(202), None)
|
||||
|
||||
t_pod, b_pods = self._prepare_pod()
|
||||
t_server_id = self._prepare_server(b_pods[0])
|
||||
self.controller.server_id = t_server_id
|
||||
|
||||
body = {'suspend': ''}
|
||||
res = self.controller.post(**body)
|
||||
mock_action.assert_called_once_with(
|
||||
'server', self.context, 'suspend', t_server_id)
|
||||
self.assertEqual(202, res.status)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(client.Client, 'action_resources')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_resume_action(self, mock_context, mock_action):
|
||||
mock_context.return_value = self.context
|
||||
mock_action.return_value = (FakeResponse(202), None)
|
||||
|
||||
t_pod, b_pods = self._prepare_pod()
|
||||
t_server_id = self._prepare_server(b_pods[0])
|
||||
self.controller.server_id = t_server_id
|
||||
|
||||
body = {'resume': ''}
|
||||
res = self.controller.post(**body)
|
||||
mock_action.assert_called_once_with(
|
||||
'server', self.context, 'resume', t_server_id)
|
||||
self.assertEqual(202, res.status)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(client.Client, 'action_resources')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_shelveOffload_action(self, mock_context, mock_action):
|
||||
mock_context.return_value = self.context
|
||||
mock_action.return_value = (FakeResponse(202), None)
|
||||
|
||||
t_pod, b_pods = self._prepare_pod()
|
||||
t_server_id = self._prepare_server(b_pods[0])
|
||||
self.controller.server_id = t_server_id
|
||||
|
||||
body = {'shelveOffload': ''}
|
||||
res = self.controller.post(**body)
|
||||
mock_action.assert_called_once_with(
|
||||
'server', self.context, 'shelve_offload', t_server_id)
|
||||
self.assertEqual(202, res.status)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(client.Client, 'action_resources')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_shelve_action(self, mock_context, mock_action):
|
||||
mock_context.return_value = self.context
|
||||
mock_action.return_value = (FakeResponse(202), None)
|
||||
|
||||
t_pod, b_pods = self._prepare_pod()
|
||||
t_server_id = self._prepare_server(b_pods[0])
|
||||
self.controller.server_id = t_server_id
|
||||
|
||||
body = {'shelve': ''}
|
||||
res = self.controller.post(**body)
|
||||
mock_action.assert_called_once_with(
|
||||
'server', self.context, 'shelve', t_server_id)
|
||||
self.assertEqual(202, res.status)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(client.Client, 'action_resources')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_unshelve_action(self, mock_context, mock_action):
|
||||
mock_context.return_value = self.context
|
||||
mock_action.return_value = (FakeResponse(202), None)
|
||||
|
||||
t_pod, b_pods = self._prepare_pod()
|
||||
t_server_id = self._prepare_server(b_pods[0])
|
||||
self.controller.server_id = t_server_id
|
||||
|
||||
body = {'unshelve': ''}
|
||||
res = self.controller.post(**body)
|
||||
mock_action.assert_called_once_with(
|
||||
'server', self.context, 'unshelve', t_server_id)
|
||||
self.assertEqual(202, res.status)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(client.Client, 'action_resources')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_trigger_crash_dump_action(self, mock_context, mock_action):
|
||||
mock_context.return_value = self.context
|
||||
mock_action.return_value = (FakeResponse(202), None)
|
||||
|
||||
t_pod, b_pods = self._prepare_pod()
|
||||
t_server_id = self._prepare_server(b_pods[0])
|
||||
self.controller.server_id = t_server_id
|
||||
|
||||
body = {'trigger_crash_dump': ''}
|
||||
res = self.controller.post(**body)
|
||||
mock_action.assert_called_once_with(
|
||||
'server', self.context, 'trigger_crash_dump', t_server_id)
|
||||
self.assertEqual(202, res.status)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(client.Client, 'action_resources')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_migrate_action(self, mock_context, mock_action):
|
||||
mock_context.return_value = self.context
|
||||
mock_action.return_value = (FakeResponse(202), None)
|
||||
|
||||
t_pod, b_pods = self._prepare_pod()
|
||||
t_server_id = self._prepare_server(b_pods[0])
|
||||
self.controller.server_id = t_server_id
|
||||
|
||||
body = {'migrate': ''}
|
||||
res = self.controller.post(**body)
|
||||
mock_action.assert_called_once_with(
|
||||
'server', self.context, 'migrate', t_server_id)
|
||||
self.assertEqual(202, res.status)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(HTTPClient, 'post')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_confirm_resize_action(self, mock_context, mock_action):
|
||||
mock_context.return_value = self.context
|
||||
mock_action.return_value = (FakeResponse(202), None)
|
||||
|
||||
t_pod, b_pods = self._prepare_pod()
|
||||
self._prepare_pod_service(b_pods[0]['pod_id'], constants.ST_NOVA)
|
||||
t_server_id = self._prepare_server(b_pods[0])
|
||||
self.controller.server_id = t_server_id
|
||||
body = {"confirmResize": ''}
|
||||
res = self.controller.post(**body)
|
||||
url = '/servers/%s/action' % t_server_id
|
||||
mock_action.assert_called_once_with(url, body=body)
|
||||
self.assertEqual(202, res.status)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(HTTPClient, 'post')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_revert_resize_action(self, mock_context, mock_action):
|
||||
mock_context.return_value = self.context
|
||||
mock_action.return_value = (FakeResponse(202), None)
|
||||
|
||||
t_pod, b_pods = self._prepare_pod()
|
||||
self._prepare_pod_service(b_pods[0]['pod_id'], constants.ST_NOVA)
|
||||
t_server_id = self._prepare_server(b_pods[0])
|
||||
self.controller.server_id = t_server_id
|
||||
body = {"revertResize": ''}
|
||||
res = self.controller.post(**body)
|
||||
url = '/servers/%s/action' % t_server_id
|
||||
mock_action.assert_called_once_with(url, body=body)
|
||||
self.assertEqual(202, res.status)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(HTTPClient, 'post')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_resize_action(self, mock_context, mock_action):
|
||||
mock_context.return_value = self.context
|
||||
mock_action.return_value = (FakeResponse(202), None)
|
||||
|
||||
t_pod, b_pods = self._prepare_pod()
|
||||
self._prepare_pod_service(b_pods[0]['pod_id'], constants.ST_NOVA)
|
||||
t_server_id = self._prepare_server(b_pods[0])
|
||||
self.controller.server_id = t_server_id
|
||||
body = {"resize": {"flavorRef": "2"}}
|
||||
res = self.controller.post(**body)
|
||||
url = '/servers/%s/action' % t_server_id
|
||||
mock_action.assert_called_once_with(url, body=body)
|
||||
self.assertEqual(202, res.status)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(HTTPClient, 'post')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_reset_state_action(self, mock_context, mock_action):
|
||||
mock_context.return_value = self.context
|
||||
mock_action.return_value = (FakeResponse(202), None)
|
||||
|
||||
t_pod, b_pods = self._prepare_pod()
|
||||
self._prepare_pod_service(b_pods[0]['pod_id'], constants.ST_NOVA)
|
||||
t_server_id = self._prepare_server(b_pods[0])
|
||||
self.controller.server_id = t_server_id
|
||||
body = {"os-resetState": {"state": "active"}}
|
||||
res = self.controller.post(**body)
|
||||
url = '/servers/%s/action' % t_server_id
|
||||
mock_action.assert_called_once_with(url, body=body)
|
||||
self.assertEqual(202, res.status)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(HTTPClient, 'post')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_soft_reboot_action(self, mock_context, mock_action):
|
||||
mock_context.return_value = self.context
|
||||
mock_action.return_value = (FakeResponse(202), None)
|
||||
|
||||
t_pod, b_pods = self._prepare_pod()
|
||||
self._prepare_pod_service(b_pods[0]['pod_id'], constants.ST_NOVA)
|
||||
t_server_id = self._prepare_server(b_pods[0])
|
||||
self.controller.server_id = t_server_id
|
||||
body = {"reboot": {"type": "SOFT"}}
|
||||
res = self.controller.post(**body)
|
||||
url = '/servers/%s/action' % t_server_id
|
||||
mock_action.assert_called_once_with(url, body=body)
|
||||
self.assertEqual(202, res.status)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(HTTPClient, 'post')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_hard_reboot_action(self, mock_context, mock_action):
|
||||
mock_context.return_value = self.context
|
||||
mock_action.return_value = (FakeResponse(202), None)
|
||||
t_pod, b_pods = self._prepare_pod()
|
||||
self._prepare_pod_service(b_pods[0]['pod_id'], constants.ST_NOVA)
|
||||
t_server_id = self._prepare_server(b_pods[0])
|
||||
self.controller.server_id = t_server_id
|
||||
body = {"reboot": {"type": "HARD"}}
|
||||
res = self.controller.post(**body)
|
||||
url = '/servers/%s/action' % t_server_id
|
||||
mock_action.assert_called_once_with(url, body=body)
|
||||
self.assertEqual(202, res.status)
|
||||
|
||||
def tearDown(self):
|
||||
core.ModelBase.metadata.drop_all(core.get_engine())
|
@ -1,62 +0,0 @@
|
||||
# Copyright 2015 Huawei Technologies Co., Ltd.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from mock import patch
|
||||
import unittest
|
||||
|
||||
from tricircle.common import context
|
||||
from tricircle.db import core
|
||||
from tricircle.nova_apigw.controllers import aggregate
|
||||
|
||||
|
||||
class AggregateTest(unittest.TestCase):
|
||||
def setUp(self):
|
||||
core.initialize()
|
||||
core.ModelBase.metadata.create_all(core.get_engine())
|
||||
self.context = context.get_admin_context()
|
||||
self.project_id = 'test_project'
|
||||
self.controller = aggregate.AggregateController(self.project_id)
|
||||
|
||||
def tearDown(self):
|
||||
core.ModelBase.metadata.drop_all(core.get_engine())
|
||||
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_post(self, mock_context):
|
||||
mock_context.return_value = self.context
|
||||
|
||||
body = {'aggregate': {'name': 'ag1',
|
||||
'availability_zone': 'az1'}}
|
||||
aggregate_id = self.controller.post(**body)['aggregate']['id']
|
||||
aggregate_dict = self.controller.get_one(aggregate_id)['aggregate']
|
||||
self.assertEqual('ag1', aggregate_dict['name'])
|
||||
self.assertEqual('az1', aggregate_dict['availability_zone'])
|
||||
self.assertEqual('az1',
|
||||
aggregate_dict['metadata']['availability_zone'])
|
||||
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_post_action(self, mock_context):
|
||||
mock_context.return_value = self.context
|
||||
|
||||
body = {'aggregate': {'name': 'ag1',
|
||||
'availability_zone': 'az1'}}
|
||||
|
||||
return_ag1 = self.controller.post(**body)['aggregate']
|
||||
action_controller = aggregate.AggregateActionController(
|
||||
self.project_id, return_ag1['id'])
|
||||
|
||||
return_ag2 = action_controller.post(**body)['aggregate']
|
||||
|
||||
self.assertEqual('ag1', return_ag2['name'])
|
||||
self.assertEqual('az1', return_ag2['availability_zone'])
|
@ -1,47 +0,0 @@
|
||||
# Copyright 2015 Huawei Technologies Co., Ltd.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from mock import patch
|
||||
import unittest
|
||||
|
||||
from tricircle.common import context
|
||||
from tricircle.db import core
|
||||
from tricircle.nova_apigw.controllers import flavor
|
||||
|
||||
|
||||
class FlavorTest(unittest.TestCase):
|
||||
def setUp(self):
|
||||
core.initialize()
|
||||
core.ModelBase.metadata.create_all(core.get_engine())
|
||||
self.context = context.get_admin_context()
|
||||
self.project_id = 'test_project'
|
||||
self.controller = flavor.FlavorController(self.project_id)
|
||||
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_post(self, mock_context):
|
||||
mock_context.return_value = self.context
|
||||
|
||||
body = {'flavor': {'id': '1', 'name': 'test_flavor',
|
||||
'ram': 1024, 'vcpus': 1, 'disk': 10}}
|
||||
self.controller.post(**body)
|
||||
flavor_dict = self.controller.get_one('1')['flavor']
|
||||
self.assertEqual('1', flavor_dict['id'])
|
||||
self.assertEqual('test_flavor', flavor_dict['name'])
|
||||
self.assertEqual(1024, flavor_dict['memory_mb'])
|
||||
self.assertEqual(1, flavor_dict['vcpus'])
|
||||
self.assertEqual(10, flavor_dict['root_gb'])
|
||||
|
||||
def tearDown(self):
|
||||
core.ModelBase.metadata.drop_all(core.get_engine())
|
File diff suppressed because it is too large
Load Diff
@ -1,103 +0,0 @@
|
||||
# Copyright (c) 2015 Huawei Tech. Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from mock import patch
|
||||
import pecan
|
||||
import unittest
|
||||
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from novaclient import client
|
||||
|
||||
from tricircle.common import constants
|
||||
from tricircle.common import context
|
||||
from tricircle.db import api
|
||||
from tricircle.db import core
|
||||
from tricircle.db import models
|
||||
from tricircle.nova_apigw.controllers import server_ips
|
||||
|
||||
|
||||
class FakeResponse(object):
|
||||
def __new__(cls, code=500):
|
||||
cls.status = code
|
||||
cls.status_code = code
|
||||
return super(FakeResponse, cls).__new__(cls)
|
||||
|
||||
|
||||
class ServerIpsTest(unittest.TestCase):
|
||||
def setUp(self):
|
||||
core.initialize()
|
||||
core.ModelBase.metadata.create_all(core.get_engine())
|
||||
self.context = context.Context()
|
||||
self.project_id = 'test_project'
|
||||
self.context.tenant = self.project_id
|
||||
self.context.user = 'test_user'
|
||||
self.controller = server_ips.ServerIpsController(self.project_id, '')
|
||||
|
||||
def _prepare_pod(self, bottom_pod_num=1):
|
||||
t_pod = {'pod_id': 't_pod_uuid', 'pod_name': 't_region',
|
||||
'az_name': ''}
|
||||
api.create_pod(self.context, t_pod)
|
||||
b_pods = []
|
||||
if bottom_pod_num == 1:
|
||||
b_pod = {'pod_id': 'b_pod_uuid', 'pod_name': 'b_region',
|
||||
'az_name': 'b_az'}
|
||||
api.create_pod(self.context, b_pod)
|
||||
b_pods.append(b_pod)
|
||||
else:
|
||||
for i in xrange(1, bottom_pod_num + 1):
|
||||
b_pod = {'pod_id': 'b_pod_%d_uuid' % i,
|
||||
'pod_name': 'b_region_%d' % i,
|
||||
'az_name': 'b_az_%d' % i}
|
||||
api.create_pod(self.context, b_pod)
|
||||
b_pods.append(b_pod)
|
||||
return t_pod, b_pods
|
||||
|
||||
def _prepare_server(self, pod):
|
||||
t_server_id = uuidutils.generate_uuid()
|
||||
b_server_id = t_server_id
|
||||
with self.context.session.begin():
|
||||
core.create_resource(
|
||||
self.context, models.ResourceRouting,
|
||||
{'top_id': t_server_id, 'bottom_id': b_server_id,
|
||||
'pod_id': pod['pod_id'], 'project_id': self.project_id,
|
||||
'resource_type': constants.RT_SERVER})
|
||||
return t_server_id
|
||||
|
||||
def _prepare_pod_service(self, pod_id, service):
|
||||
config_dict = {'service_id': uuidutils.generate_uuid(),
|
||||
'pod_id': pod_id,
|
||||
'service_type': service,
|
||||
'service_url': 'fake_pod_service'}
|
||||
api.create_pod_service_configuration(self.context, config_dict)
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(client.HTTPClient, 'get')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_list_ips(self, mock_context, mock_get):
|
||||
mock_context.return_value = self.context
|
||||
mock_get.return_value = (FakeResponse(202), None)
|
||||
|
||||
t_pod, b_pods = self._prepare_pod()
|
||||
self._prepare_pod_service(b_pods[0]['pod_id'], constants.ST_NOVA)
|
||||
t_server_id = self._prepare_server(b_pods[0])
|
||||
self.controller.server_id = t_server_id
|
||||
res = self.controller.get_all()
|
||||
url = '/servers/%s/ips' % t_server_id
|
||||
mock_get.assert_called_once_with(url)
|
||||
self.assertEqual(202, res.status)
|
||||
|
||||
def tearDown(self):
|
||||
core.ModelBase.metadata.drop_all(core.get_engine())
|
@ -1,304 +0,0 @@
|
||||
# Copyright (c) 2015 Huawei Tech. Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
from mock import patch
|
||||
import pecan
|
||||
import unittest
|
||||
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from tricircle.common import client
|
||||
from tricircle.common import constants
|
||||
from tricircle.common import context
|
||||
from tricircle.db import api
|
||||
from tricircle.db import core
|
||||
from tricircle.db import models
|
||||
from tricircle.nova_apigw.controllers import volume
|
||||
|
||||
|
||||
class FakeResponse(object):
|
||||
def __new__(cls, code=500):
|
||||
cls.status = code
|
||||
cls.status_code = code
|
||||
return super(FakeResponse, cls).__new__(cls)
|
||||
|
||||
|
||||
class FakeVolume(object):
|
||||
def to_dict(self):
|
||||
pass
|
||||
|
||||
|
||||
class FakeClient(object):
|
||||
def post(self, url, body):
|
||||
return FakeResponse(), FakeVolume()
|
||||
|
||||
def get(self, url):
|
||||
return FakeResponse(), FakeVolume()
|
||||
|
||||
def put(self, url, body):
|
||||
return FakeResponse(), FakeVolume()
|
||||
|
||||
def delete(self, url):
|
||||
return FakeResponse(), None
|
||||
|
||||
|
||||
class FakeApi(object):
|
||||
def __init__(self):
|
||||
self.client = FakeClient()
|
||||
|
||||
|
||||
class VolumeTest(unittest.TestCase):
|
||||
def setUp(self):
|
||||
core.initialize()
|
||||
core.ModelBase.metadata.create_all(core.get_engine())
|
||||
self.context = context.get_admin_context()
|
||||
self.project_id = 'test_project'
|
||||
self.controller = volume.VolumeController(self.project_id, '')
|
||||
|
||||
def _prepare_pod(self, bottom_pod_num=1):
|
||||
t_pod = {'pod_id': 't_pod_uuid', 'pod_name': 't_region',
|
||||
'az_name': ''}
|
||||
api.create_pod(self.context, t_pod)
|
||||
if bottom_pod_num == 1:
|
||||
b_pod = {'pod_id': 'b_pod_uuid', 'pod_name': 'b_region',
|
||||
'az_name': 'b_az'}
|
||||
api.create_pod(self.context, b_pod)
|
||||
return t_pod, b_pod
|
||||
b_pods = []
|
||||
for i in xrange(1, bottom_pod_num + 1):
|
||||
b_pod = {'pod_id': 'b_pod_%d_uuid' % i,
|
||||
'pod_name': 'b_region_%d' % i,
|
||||
'az_name': 'b_az_%d' % i}
|
||||
api.create_pod(self.context, b_pod)
|
||||
b_pods.append(b_pod)
|
||||
return t_pod, b_pods
|
||||
|
||||
def _validate_error_code(self, res, code):
|
||||
self.assertEqual(code, res[res.keys()[0]]['code'])
|
||||
|
||||
@patch.object(pecan, 'response')
|
||||
@patch.object(FakeClient, 'post')
|
||||
@patch.object(client.Client, 'get_native_client')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_attach_volume(self, mock_context, mock_api, mock_post,
|
||||
mock_response):
|
||||
mock_context.return_value = self.context
|
||||
mock_api.return_value = FakeApi()
|
||||
mock_response = FakeResponse()
|
||||
mock_response.status = 202
|
||||
t_pod, b_pods = self._prepare_pod(bottom_pod_num=2)
|
||||
b_pod1 = b_pods[0]
|
||||
b_pod2 = b_pods[1]
|
||||
t_server_id = uuidutils.generate_uuid()
|
||||
b_server_id = t_server_id
|
||||
with self.context.session.begin():
|
||||
core.create_resource(
|
||||
self.context, models.ResourceRouting,
|
||||
{'top_id': t_server_id, 'bottom_id': b_server_id,
|
||||
'pod_id': b_pod1['pod_id'], 'project_id': self.project_id,
|
||||
'resource_type': constants.RT_SERVER})
|
||||
|
||||
t_volume1_id = uuidutils.generate_uuid()
|
||||
b_volume1_id = t_volume1_id
|
||||
t_volume2_id = uuidutils.generate_uuid()
|
||||
b_volume2_id = t_volume1_id
|
||||
with self.context.session.begin():
|
||||
core.create_resource(
|
||||
self.context, models.ResourceRouting,
|
||||
{'top_id': t_volume1_id, 'bottom_id': b_volume1_id,
|
||||
'pod_id': b_pod1['pod_id'], 'project_id': self.project_id,
|
||||
'resource_type': constants.RT_VOLUME})
|
||||
core.create_resource(
|
||||
self.context, models.ResourceRouting,
|
||||
{'top_id': t_volume2_id, 'bottom_id': b_volume2_id,
|
||||
'pod_id': b_pod2['pod_id'], 'project_id': self.project_id,
|
||||
'resource_type': constants.RT_VOLUME})
|
||||
|
||||
# success case
|
||||
self.controller.server_id = t_server_id
|
||||
body = {'volumeAttachment': {'volumeId': t_volume1_id}}
|
||||
self.controller.post(**body)
|
||||
calls = [mock.call('server_volume', self.context)]
|
||||
mock_api.assert_has_calls(calls)
|
||||
url = "/servers/%s/os-volume_attachments" % t_server_id
|
||||
calls = [mock.call(url, body=body)]
|
||||
mock_post.assert_has_calls(calls)
|
||||
body = {'volumeAttachment': {'volumeId': t_volume1_id,
|
||||
'device': '/dev/vdb'}}
|
||||
self.controller.post(**body)
|
||||
calls = [mock.call('server_volume', self.context)]
|
||||
mock_api.assert_has_calls(calls)
|
||||
calls = [mock.call(url, body=body)]
|
||||
mock_post.assert_has_calls(calls)
|
||||
|
||||
# failure case, bad request
|
||||
body = {'volumeAttachment': {'volumeId': t_volume2_id}}
|
||||
res = self.controller.post(**body)
|
||||
self._validate_error_code(res, 400)
|
||||
|
||||
body = {'fakePara': ''}
|
||||
res = self.controller.post(**body)
|
||||
self._validate_error_code(res, 400)
|
||||
|
||||
body = {'volumeAttachment': {}}
|
||||
res = self.controller.post(**body)
|
||||
self._validate_error_code(res, 400)
|
||||
|
||||
# each part of path should not start with digit
|
||||
body = {'volumeAttachment': {'volumeId': t_volume1_id,
|
||||
'device': '/dev/001disk'}}
|
||||
res = self.controller.post(**body)
|
||||
self._validate_error_code(res, 400)
|
||||
|
||||
# the first part should be "dev", and only two parts are allowed
|
||||
body = {'volumeAttachment': {'volumeId': t_volume1_id,
|
||||
'device': '/dev/vdb/disk'}}
|
||||
res = self.controller.post(**body)
|
||||
self._validate_error_code(res, 400)
|
||||
|
||||
body = {'volumeAttachment': {'volumeId': t_volume1_id,
|
||||
'device': '/disk/vdb'}}
|
||||
res = self.controller.post(**body)
|
||||
self._validate_error_code(res, 400)
|
||||
|
||||
# failure case, resource not found
|
||||
body = {'volumeAttachment': {'volumeId': 'fake_volume_id'}}
|
||||
res = self.controller.post(**body)
|
||||
self._validate_error_code(res, 404)
|
||||
|
||||
self.controller.server_id = 'fake_server_id'
|
||||
body = {'volumeAttachment': {'volumeId': t_volume1_id}}
|
||||
|
||||
res = self.controller.post(**body)
|
||||
self._validate_error_code(res, 404)
|
||||
|
||||
@patch.object(pecan, 'response')
|
||||
@patch.object(FakeClient, 'delete')
|
||||
@patch.object(client.Client, 'get_native_client')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_detach_volume(self, mock_context, mock_api, mock_delete,
|
||||
mock_response):
|
||||
mock_context.return_value = self.context
|
||||
mock_api.return_value = FakeApi()
|
||||
mock_response = FakeResponse()
|
||||
mock_response.status = 202
|
||||
t_pod, b_pods = self._prepare_pod(bottom_pod_num=1)
|
||||
b_pod1 = b_pods
|
||||
t_server_id = uuidutils.generate_uuid()
|
||||
b_server_id = t_server_id
|
||||
with self.context.session.begin():
|
||||
core.create_resource(
|
||||
self.context, models.ResourceRouting,
|
||||
{'top_id': t_server_id, 'bottom_id': b_server_id,
|
||||
'pod_id': b_pod1['pod_id'], 'project_id': self.project_id,
|
||||
'resource_type': constants.RT_SERVER})
|
||||
|
||||
t_volume1_id = uuidutils.generate_uuid()
|
||||
b_volume1_id = t_volume1_id
|
||||
with self.context.session.begin():
|
||||
core.create_resource(
|
||||
self.context, models.ResourceRouting,
|
||||
{'top_id': t_volume1_id, 'bottom_id': b_volume1_id,
|
||||
'pod_id': b_pod1['pod_id'], 'project_id': self.project_id,
|
||||
'resource_type': constants.RT_VOLUME})
|
||||
|
||||
# success case
|
||||
self.controller.server_id = t_server_id
|
||||
body = {'volumeAttachment': {'volumeId': t_volume1_id}}
|
||||
self.controller.post(**body)
|
||||
self.controller.delete(t_volume1_id)
|
||||
calls = [mock.call('server_volume', self.context),
|
||||
mock.call('server_volume', self.context)]
|
||||
mock_api.assert_has_calls(calls)
|
||||
url = "/servers/%s/os-volume_attachments/%s" % (
|
||||
t_server_id, t_volume1_id)
|
||||
calls = [mock.call(url)]
|
||||
mock_delete.assert_has_calls(calls)
|
||||
|
||||
# failure case, resource not found
|
||||
body = {'volumeAttachment': {'volumeId': t_volume1_id}}
|
||||
self.controller.post(**body)
|
||||
self.controller.server_id = 'fake_server_id'
|
||||
res = self.controller.delete(t_volume1_id)
|
||||
self._validate_error_code(res, 404)
|
||||
|
||||
@patch.object(pecan, 'response')
|
||||
@patch.object(FakeClient, 'get')
|
||||
@patch.object(client.Client, 'get_native_client')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_get_volume_attachments(self, mock_context, mock_api,
|
||||
mock_get, mock_response):
|
||||
mock_context.return_value = self.context
|
||||
mock_api.return_value = FakeApi()
|
||||
mock_response = FakeResponse()
|
||||
mock_response.status = 202
|
||||
t_pod, b_pods = self._prepare_pod(bottom_pod_num=1)
|
||||
b_pod1 = b_pods
|
||||
t_server_id = uuidutils.generate_uuid()
|
||||
b_server_id = t_server_id
|
||||
with self.context.session.begin():
|
||||
core.create_resource(
|
||||
self.context, models.ResourceRouting,
|
||||
{'top_id': t_server_id, 'bottom_id': b_server_id,
|
||||
'pod_id': b_pod1['pod_id'], 'project_id': self.project_id,
|
||||
'resource_type': constants.RT_SERVER})
|
||||
|
||||
t_volume1_id = uuidutils.generate_uuid()
|
||||
b_volume1_id = t_volume1_id
|
||||
t_volume2_id = uuidutils.generate_uuid()
|
||||
b_volume2_id = t_volume2_id
|
||||
with self.context.session.begin():
|
||||
core.create_resource(
|
||||
self.context, models.ResourceRouting,
|
||||
{'top_id': t_volume1_id, 'bottom_id': b_volume1_id,
|
||||
'pod_id': b_pod1['pod_id'], 'project_id': self.project_id,
|
||||
'resource_type': constants.RT_VOLUME})
|
||||
core.create_resource(
|
||||
self.context, models.ResourceRouting,
|
||||
{'top_id': t_volume2_id, 'bottom_id': b_volume2_id,
|
||||
'pod_id': b_pod1['pod_id'], 'project_id': self.project_id,
|
||||
'resource_type': constants.RT_VOLUME})
|
||||
|
||||
# success case
|
||||
self.controller.server_id = t_server_id
|
||||
body = {'volumeAttachment': {'volumeId': t_volume1_id}}
|
||||
self.controller.post(**body)
|
||||
body = {'volumeAttachment': {'volumeId': t_volume2_id}}
|
||||
self.controller.post(**body)
|
||||
self.controller.get_one(t_volume1_id)
|
||||
url = "/servers/%s/os-volume_attachments/%s" % (
|
||||
t_server_id, t_volume1_id)
|
||||
calls = [mock.call(url)]
|
||||
mock_get.asset_has_calls(calls)
|
||||
self.controller.get_all()
|
||||
url = "/servers/%s/os-volume_attachments" % t_server_id
|
||||
calls = [mock.call(calls)]
|
||||
mock_get.asset_has_calls(calls)
|
||||
calls = [mock.call('server_volume', self.context),
|
||||
mock.call('server_volume', self.context),
|
||||
mock.call('server_volume', self.context),
|
||||
mock.call('server_volume', self.context)]
|
||||
mock_api.assert_has_calls(calls)
|
||||
|
||||
# failure case, resource not found
|
||||
self.controller.server_id = 'fake_server_id'
|
||||
res = self.controller.get_one(t_volume1_id)
|
||||
self._validate_error_code(res, 404)
|
||||
res = self.controller.get_all()
|
||||
self._validate_error_code(res, 404)
|
||||
|
||||
def tearDown(self):
|
||||
core.ModelBase.metadata.drop_all(core.get_engine())
|
Loading…
Reference in New Issue
Block a user