Add quotas for Server Groups (quota checks)
This third change in a sequence that adds new quota values for server groups, which implements the quota checks. Co-authored-by: Cyril Roelandt <cyril.roelandt@enovance.com> Implements: blueprint server-group-quotas DocImpact Change-Id: I535c4ac2475d0b0ca6e14081f92e1c7d111792bd
This commit is contained in:
parent
9a4d7ab534
commit
0e9093c30f
|
@ -24,9 +24,14 @@ from nova.api.openstack import wsgi
|
|||
from nova.api.openstack import xmlutil
|
||||
import nova.exception
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LE
|
||||
from nova import objects
|
||||
from nova.openstack.common import log as logging
|
||||
from nova import utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# NOTE(russellb) There is one other policy, 'legacy', but we don't allow that
|
||||
# being set via the API. It's only used when a group gets automatically
|
||||
# created to support the legacy behavior of the 'group' scheduler hint.
|
||||
|
@ -128,6 +133,9 @@ class ServerGroupXMLDeserializer(wsgi.MetadataXMLDeserializer):
|
|||
class ServerGroupController(wsgi.Controller):
|
||||
"""The Server group API controller for the OpenStack API."""
|
||||
|
||||
def __init__(self, ext_mgr):
|
||||
self.ext_mgr = ext_mgr
|
||||
|
||||
def _format_server_group(self, context, group):
|
||||
# the id field has its value as the uuid of the server group
|
||||
# There is no 'uuid' key in server_group seen by clients.
|
||||
|
@ -220,9 +228,34 @@ class ServerGroupController(wsgi.Controller):
|
|||
context = _authorize_context(req)
|
||||
try:
|
||||
sg = objects.InstanceGroup.get_by_uuid(context, id)
|
||||
sg.destroy(context)
|
||||
except nova.exception.InstanceGroupNotFound as e:
|
||||
raise webob.exc.HTTPNotFound(explanation=e.format_message())
|
||||
|
||||
quotas = None
|
||||
if self.ext_mgr.is_loaded('os-server-group-quotas'):
|
||||
quotas = objects.Quotas()
|
||||
project_id, user_id = objects.quotas.ids_from_server_group(context,
|
||||
sg)
|
||||
try:
|
||||
# We have to add the quota back to the user that created
|
||||
# the server group
|
||||
quotas.reserve(context, project_id=project_id,
|
||||
user_id=user_id, server_groups=-1)
|
||||
except Exception:
|
||||
quotas = None
|
||||
LOG.exception(_LE("Failed to update usages deallocating "
|
||||
"server group"))
|
||||
|
||||
try:
|
||||
sg.destroy(context)
|
||||
except nova.exception.InstanceGroupNotFound as e:
|
||||
if quotas:
|
||||
quotas.rollback()
|
||||
raise webob.exc.HTTPNotFound(explanation=e.format_message())
|
||||
|
||||
if quotas:
|
||||
quotas.commit()
|
||||
|
||||
return webob.Response(status_int=204)
|
||||
|
||||
@wsgi.serializers(xml=ServerGroupsTemplate)
|
||||
|
@ -251,6 +284,16 @@ class ServerGroupController(wsgi.Controller):
|
|||
except nova.exception.InvalidInput as e:
|
||||
raise exc.HTTPBadRequest(explanation=e.format_message())
|
||||
|
||||
quotas = None
|
||||
if self.ext_mgr.is_loaded('os-server-group-quotas'):
|
||||
quotas = objects.Quotas()
|
||||
try:
|
||||
quotas.reserve(context, project_id=context.project_id,
|
||||
user_id=context.user_id, server_groups=1)
|
||||
except nova.exception.OverQuota:
|
||||
msg = _("Quota exceeded, too many server groups.")
|
||||
raise exc.HTTPForbidden(explanation=msg)
|
||||
|
||||
vals = body['server_group']
|
||||
sg = objects.InstanceGroup(context)
|
||||
sg.project_id = context.project_id
|
||||
|
@ -260,8 +303,13 @@ class ServerGroupController(wsgi.Controller):
|
|||
sg.policies = vals.get('policies')
|
||||
sg.create()
|
||||
except ValueError as e:
|
||||
if quotas:
|
||||
quotas.rollback()
|
||||
raise exc.HTTPBadRequest(explanation=e)
|
||||
|
||||
if quotas:
|
||||
quotas.commit()
|
||||
|
||||
return {'server_group': self._format_server_group(context, sg)}
|
||||
|
||||
|
||||
|
@ -283,7 +331,7 @@ class Server_groups(extensions.ExtensionDescriptor):
|
|||
|
||||
res = extensions.ResourceExtension(
|
||||
'os-server-groups',
|
||||
controller=ServerGroupController(),
|
||||
controller=ServerGroupController(self.ext_mgr),
|
||||
member_actions={"action": "POST", })
|
||||
|
||||
resources.append(res)
|
||||
|
|
|
@ -23,9 +23,13 @@ from nova.api.openstack import extensions
|
|||
from nova.api.openstack import wsgi
|
||||
import nova.exception
|
||||
from nova.i18n import _
|
||||
from nova.i18n import _LE
|
||||
from nova import objects
|
||||
from nova.openstack.common import log as logging
|
||||
from nova import utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
ALIAS = "os-server-groups"
|
||||
|
||||
# NOTE(russellb) There is one other policy, 'legacy', but we don't allow that
|
||||
|
@ -138,9 +142,31 @@ class ServerGroupController(wsgi.Controller):
|
|||
context = _authorize_context(req)
|
||||
try:
|
||||
sg = objects.InstanceGroup.get_by_uuid(context, id)
|
||||
sg.destroy(context)
|
||||
except nova.exception.InstanceGroupNotFound as e:
|
||||
raise webob.exc.HTTPNotFound(explanation=e.format_message())
|
||||
|
||||
quotas = objects.Quotas()
|
||||
project_id, user_id = objects.quotas.ids_from_server_group(context, sg)
|
||||
try:
|
||||
# We have to add the quota back to the user that created
|
||||
# the server group
|
||||
quotas.reserve(context, project_id=project_id,
|
||||
user_id=user_id, server_groups=-1)
|
||||
except Exception:
|
||||
quotas = None
|
||||
LOG.exception(_LE("Failed to update usages deallocating "
|
||||
"server group"))
|
||||
|
||||
try:
|
||||
sg.destroy(context)
|
||||
except nova.exception.InstanceGroupNotFound as e:
|
||||
if quotas:
|
||||
quotas.rollback()
|
||||
raise webob.exc.HTTPNotFound(explanation=e.format_message())
|
||||
|
||||
if quotas:
|
||||
quotas.commit()
|
||||
|
||||
return webob.Response(status_int=204)
|
||||
|
||||
@extensions.expected_errors(())
|
||||
|
@ -158,7 +184,7 @@ class ServerGroupController(wsgi.Controller):
|
|||
for group in limited_list]
|
||||
return {'server_groups': result}
|
||||
|
||||
@extensions.expected_errors(400)
|
||||
@extensions.expected_errors((400, 403))
|
||||
def create(self, req, body):
|
||||
"""Creates a new server group."""
|
||||
context = _authorize_context(req)
|
||||
|
@ -168,6 +194,14 @@ class ServerGroupController(wsgi.Controller):
|
|||
except nova.exception.InvalidInput as e:
|
||||
raise exc.HTTPBadRequest(explanation=e.format_message())
|
||||
|
||||
quotas = objects.Quotas()
|
||||
try:
|
||||
quotas.reserve(context, project_id=context.project_id,
|
||||
user_id=context.user_id, server_groups=1)
|
||||
except nova.exception.OverQuota:
|
||||
msg = _("Quota exceeded, too many server groups.")
|
||||
raise exc.HTTPForbidden(explanation=msg)
|
||||
|
||||
vals = body['server_group']
|
||||
sg = objects.InstanceGroup(context)
|
||||
sg.project_id = context.project_id
|
||||
|
@ -177,8 +211,11 @@ class ServerGroupController(wsgi.Controller):
|
|||
sg.policies = vals.get('policies')
|
||||
sg.create()
|
||||
except ValueError as e:
|
||||
quotas.rollback()
|
||||
raise exc.HTTPBadRequest(explanation=e)
|
||||
|
||||
quotas.commit()
|
||||
|
||||
return {'server_group': self._format_server_group(context, sg)}
|
||||
|
||||
|
||||
|
|
|
@ -511,6 +511,7 @@ class ServersController(wsgi.Controller):
|
|||
metadata=server_dict.get('metadata', {}),
|
||||
admin_password=password,
|
||||
requested_networks=requested_networks,
|
||||
check_server_group_quota=True,
|
||||
**create_kwargs)
|
||||
except (exception.QuotaError,
|
||||
exception.PortLimitExceeded) as error:
|
||||
|
|
|
@ -925,33 +925,37 @@ class Controller(wsgi.Controller):
|
|||
if self.ext_mgr.is_loaded('OS-SCH-HNT'):
|
||||
scheduler_hints = server_dict.get('scheduler_hints', {})
|
||||
|
||||
check_server_group_quota = \
|
||||
self.ext_mgr.is_loaded('os-server-group-quotas')
|
||||
|
||||
try:
|
||||
_get_inst_type = flavors.get_flavor_by_flavor_id
|
||||
inst_type = _get_inst_type(flavor_id, ctxt=context,
|
||||
read_deleted="no")
|
||||
|
||||
(instances, resv_id) = self.compute_api.create(context,
|
||||
inst_type,
|
||||
image_uuid,
|
||||
display_name=name,
|
||||
display_description=name,
|
||||
key_name=key_name,
|
||||
metadata=server_dict.get('metadata', {}),
|
||||
access_ip_v4=access_ip_v4,
|
||||
access_ip_v6=access_ip_v6,
|
||||
injected_files=injected_files,
|
||||
admin_password=password,
|
||||
min_count=min_count,
|
||||
max_count=max_count,
|
||||
requested_networks=requested_networks,
|
||||
security_group=sg_names,
|
||||
user_data=user_data,
|
||||
availability_zone=availability_zone,
|
||||
config_drive=config_drive,
|
||||
block_device_mapping=block_device_mapping,
|
||||
auto_disk_config=auto_disk_config,
|
||||
scheduler_hints=scheduler_hints,
|
||||
legacy_bdm=legacy_bdm)
|
||||
inst_type,
|
||||
image_uuid,
|
||||
display_name=name,
|
||||
display_description=name,
|
||||
key_name=key_name,
|
||||
metadata=server_dict.get('metadata', {}),
|
||||
access_ip_v4=access_ip_v4,
|
||||
access_ip_v6=access_ip_v6,
|
||||
injected_files=injected_files,
|
||||
admin_password=password,
|
||||
min_count=min_count,
|
||||
max_count=max_count,
|
||||
requested_networks=requested_networks,
|
||||
security_group=sg_names,
|
||||
user_data=user_data,
|
||||
availability_zone=availability_zone,
|
||||
config_drive=config_drive,
|
||||
block_device_mapping=block_device_mapping,
|
||||
auto_disk_config=auto_disk_config,
|
||||
scheduler_hints=scheduler_hints,
|
||||
legacy_bdm=legacy_bdm,
|
||||
check_server_group_quota=check_server_group_quota)
|
||||
except (exception.QuotaError,
|
||||
exception.PortLimitExceeded) as error:
|
||||
raise exc.HTTPForbidden(
|
||||
|
|
|
@ -848,7 +848,8 @@ class API(base.Base):
|
|||
|
||||
def _provision_instances(self, context, instance_type, min_count,
|
||||
max_count, base_options, boot_meta, security_groups,
|
||||
block_device_mapping, shutdown_terminate):
|
||||
block_device_mapping, shutdown_terminate,
|
||||
instance_group, check_server_group_quota):
|
||||
# Reserve quotas
|
||||
num_instances, quotas = self._check_num_instances_quota(
|
||||
context, instance_type, min_count, max_count)
|
||||
|
@ -866,6 +867,25 @@ class API(base.Base):
|
|||
pci_requests.instance_uuid = instance.uuid
|
||||
pci_requests.save(context)
|
||||
instances.append(instance)
|
||||
|
||||
if instance_group:
|
||||
if check_server_group_quota:
|
||||
count = QUOTAS.count(context,
|
||||
'server_group_members',
|
||||
instance_group,
|
||||
context.user_id)
|
||||
try:
|
||||
QUOTAS.limit_check(context,
|
||||
server_group_members=count + 1)
|
||||
except exception.OverQuota:
|
||||
msg = _("Quota exceeded, too many servers in "
|
||||
"group")
|
||||
raise exception.QuotaError(msg)
|
||||
|
||||
objects.InstanceGroup.add_members(context,
|
||||
instance_group.uuid,
|
||||
[instance.uuid])
|
||||
|
||||
# send a state update notification for the initial create to
|
||||
# show it going from non-existent to BUILDING
|
||||
notifications.send_update_with_states(context, instance, None,
|
||||
|
@ -938,26 +958,8 @@ class API(base.Base):
|
|||
return {}
|
||||
|
||||
@staticmethod
|
||||
def _update_instance_group_by_name(context, instance_uuids, group_name):
|
||||
try:
|
||||
ig = objects.InstanceGroup.get_by_name(context, group_name)
|
||||
objects.InstanceGroup.add_members(context, ig.uuid, instance_uuids)
|
||||
except exception.InstanceGroupNotFound:
|
||||
# NOTE(russellb) If the group does not already exist, we need to
|
||||
# automatically create it to be backwards compatible with old
|
||||
# handling of the 'group' scheduler hint. The policy type will be
|
||||
# 'legacy', indicating that this group was created to emulate
|
||||
# legacy group behavior.
|
||||
ig = objects.InstanceGroup(context)
|
||||
ig.name = group_name
|
||||
ig.project_id = context.project_id
|
||||
ig.user_id = context.user_id
|
||||
ig.policies = ['legacy']
|
||||
ig.members = instance_uuids
|
||||
ig.create()
|
||||
|
||||
@staticmethod
|
||||
def _update_instance_group(context, instances, scheduler_hints):
|
||||
def _get_requested_instance_group(context, scheduler_hints,
|
||||
check_quota):
|
||||
if not scheduler_hints:
|
||||
return
|
||||
|
||||
|
@ -965,14 +967,45 @@ class API(base.Base):
|
|||
if not group_hint:
|
||||
return
|
||||
|
||||
instance_uuids = [instance.uuid for instance in instances]
|
||||
|
||||
if uuidutils.is_uuid_like(group_hint):
|
||||
objects.InstanceGroup.add_members(context, group_hint,
|
||||
instance_uuids)
|
||||
group = objects.InstanceGroup.get_by_uuid(context, group_hint)
|
||||
else:
|
||||
API._update_instance_group_by_name(context, instance_uuids,
|
||||
group_hint)
|
||||
try:
|
||||
group = objects.InstanceGroup.get_by_name(context, group_hint)
|
||||
except exception.InstanceGroupNotFound:
|
||||
# NOTE(russellb) If the group does not already exist, we need
|
||||
# to automatically create it to be backwards compatible with
|
||||
# old handling of the 'group' scheduler hint. The policy type
|
||||
# will be 'legacy', indicating that this group was created to
|
||||
# emulate legacy group behavior.
|
||||
quotas = None
|
||||
if check_quota:
|
||||
quotas = objects.Quotas()
|
||||
try:
|
||||
quotas.reserve(context,
|
||||
project_id=context.project_id,
|
||||
user_id=context.user_id,
|
||||
server_groups=1)
|
||||
except nova.exception.OverQuota:
|
||||
msg = _("Quota exceeded, too many server groups.")
|
||||
raise nova.exception.QuotaError(msg)
|
||||
|
||||
group = objects.InstanceGroup(context)
|
||||
group.name = group_hint
|
||||
group.project_id = context.project_id
|
||||
group.user_id = context.user_id
|
||||
group.policies = ['legacy']
|
||||
try:
|
||||
group.create()
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
if quotas:
|
||||
quotas.rollback()
|
||||
|
||||
if quotas:
|
||||
quotas.commit()
|
||||
|
||||
return group
|
||||
|
||||
def _create_instance(self, context, instance_type,
|
||||
image_href, kernel_id, ramdisk_id,
|
||||
|
@ -985,7 +1018,8 @@ class API(base.Base):
|
|||
requested_networks, config_drive,
|
||||
block_device_mapping, auto_disk_config,
|
||||
reservation_id=None, scheduler_hints=None,
|
||||
legacy_bdm=True, shutdown_terminate=False):
|
||||
legacy_bdm=True, shutdown_terminate=False,
|
||||
check_server_group_quota=False):
|
||||
"""Verify all the input parameters regardless of the provisioning
|
||||
strategy being performed and schedule the instance(s) for
|
||||
creation.
|
||||
|
@ -1041,17 +1075,19 @@ class API(base.Base):
|
|||
base_options, instance_type, boot_meta, min_count, max_count,
|
||||
block_device_mapping, legacy_bdm)
|
||||
|
||||
instance_group = self._get_requested_instance_group(context,
|
||||
scheduler_hints, check_server_group_quota)
|
||||
|
||||
instances = self._provision_instances(context, instance_type,
|
||||
min_count, max_count, base_options, boot_meta, security_groups,
|
||||
block_device_mapping, shutdown_terminate)
|
||||
block_device_mapping, shutdown_terminate,
|
||||
instance_group, check_server_group_quota)
|
||||
|
||||
filter_properties = self._build_filter_properties(context,
|
||||
scheduler_hints, forced_host,
|
||||
forced_node, instance_type,
|
||||
base_options.get('pci_request_info'))
|
||||
|
||||
self._update_instance_group(context, instances, scheduler_hints)
|
||||
|
||||
for instance in instances:
|
||||
self._record_action_start(context, instance,
|
||||
instance_actions.CREATE)
|
||||
|
@ -1366,7 +1402,7 @@ class API(base.Base):
|
|||
block_device_mapping=None, access_ip_v4=None,
|
||||
access_ip_v6=None, requested_networks=None, config_drive=None,
|
||||
auto_disk_config=None, scheduler_hints=None, legacy_bdm=True,
|
||||
shutdown_terminate=False):
|
||||
shutdown_terminate=False, check_server_group_quota=False):
|
||||
"""Provision instances, sending instance information to the
|
||||
scheduler. The scheduler will determine where the instance(s)
|
||||
go and will handle creating the DB entries.
|
||||
|
@ -1384,19 +1420,20 @@ class API(base.Base):
|
|||
requested_networks)
|
||||
|
||||
return self._create_instance(
|
||||
context, instance_type,
|
||||
image_href, kernel_id, ramdisk_id,
|
||||
min_count, max_count,
|
||||
display_name, display_description,
|
||||
key_name, key_data, security_group,
|
||||
availability_zone, user_data, metadata,
|
||||
injected_files, admin_password,
|
||||
access_ip_v4, access_ip_v6,
|
||||
requested_networks, config_drive,
|
||||
block_device_mapping, auto_disk_config,
|
||||
scheduler_hints=scheduler_hints,
|
||||
legacy_bdm=legacy_bdm,
|
||||
shutdown_terminate=shutdown_terminate)
|
||||
context, instance_type,
|
||||
image_href, kernel_id, ramdisk_id,
|
||||
min_count, max_count,
|
||||
display_name, display_description,
|
||||
key_name, key_data, security_group,
|
||||
availability_zone, user_data, metadata,
|
||||
injected_files, admin_password,
|
||||
access_ip_v4, access_ip_v6,
|
||||
requested_networks, config_drive,
|
||||
block_device_mapping, auto_disk_config,
|
||||
scheduler_hints=scheduler_hints,
|
||||
legacy_bdm=legacy_bdm,
|
||||
shutdown_terminate=shutdown_terminate,
|
||||
check_server_group_quota=check_server_group_quota)
|
||||
|
||||
def trigger_provider_fw_rules_refresh(self, context):
|
||||
"""Called when a rule is added/removed from a provider firewall."""
|
||||
|
|
|
@ -1422,9 +1422,9 @@ def _keypair_get_count_by_user(*args, **kwargs):
|
|||
return objects.KeyPairList.get_count_by_user(*args, **kwargs)
|
||||
|
||||
|
||||
def _server_group_count_members_by_user(*args, **kwargs):
|
||||
def _server_group_count_members_by_user(context, group, user_id):
|
||||
"""Helper method to avoid referencing objects.InstanceGroup on import."""
|
||||
return objects.InstanceGroup.count_members_by_user(*args, **kwargs)
|
||||
return group.count_members_by_user(context, user_id)
|
||||
|
||||
|
||||
QUOTAS = QuotaEngine()
|
||||
|
|
|
@ -0,0 +1,184 @@
|
|||
# Copyright 2014 Hewlett-Packard Development Company, L.P
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
import webob
|
||||
|
||||
from nova.api.openstack.compute.contrib import server_groups
|
||||
from nova.api.openstack.compute.plugins.v3 import server_groups as sg_v3
|
||||
from nova.api.openstack import extensions
|
||||
from nova import context
|
||||
import nova.db
|
||||
from nova.openstack.common import uuidutils
|
||||
from nova import quota
|
||||
from nova import test
|
||||
from nova.tests.api.openstack import fakes
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class AttrDict(dict):
|
||||
def __getattr__(self, k):
|
||||
return self[k]
|
||||
|
||||
|
||||
def server_group_template(**kwargs):
|
||||
sgroup = kwargs.copy()
|
||||
sgroup.setdefault('name', 'test')
|
||||
return sgroup
|
||||
|
||||
|
||||
def server_group_db(sg):
|
||||
attrs = sg.copy()
|
||||
if 'id' in attrs:
|
||||
attrs['uuid'] = attrs.pop('id')
|
||||
if 'policies' in attrs:
|
||||
policies = attrs.pop('policies')
|
||||
attrs['policies'] = policies
|
||||
else:
|
||||
attrs['policies'] = []
|
||||
if 'members' in attrs:
|
||||
members = attrs.pop('members')
|
||||
attrs['members'] = members
|
||||
else:
|
||||
attrs['members'] = []
|
||||
if 'metadata' in attrs:
|
||||
attrs['metadetails'] = attrs.pop('metadata')
|
||||
else:
|
||||
attrs['metadetails'] = {}
|
||||
attrs['deleted'] = 0
|
||||
attrs['deleted_at'] = None
|
||||
attrs['created_at'] = None
|
||||
attrs['updated_at'] = None
|
||||
if 'user_id' not in attrs:
|
||||
attrs['user_id'] = 'user_id'
|
||||
if 'project_id' not in attrs:
|
||||
attrs['project_id'] = 'project_id'
|
||||
attrs['id'] = 7
|
||||
|
||||
return AttrDict(attrs)
|
||||
|
||||
|
||||
class ServerGroupQuotasTestV21(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(ServerGroupQuotasTestV21, self).setUp()
|
||||
self._setup_controller()
|
||||
self.app = self._get_app()
|
||||
|
||||
def _setup_controller(self):
|
||||
self.controller = sg_v3.ServerGroupController()
|
||||
|
||||
def _get_app(self):
|
||||
return fakes.wsgi_app_v3(init_only=('os-server-groups',))
|
||||
|
||||
def _get_url(self):
|
||||
return '/v3'
|
||||
|
||||
def _setup_quotas(self):
|
||||
pass
|
||||
|
||||
def _assert_server_groups_in_use(self, project_id, user_id, in_use):
|
||||
ctxt = context.get_admin_context()
|
||||
result = quota.QUOTAS.get_user_quotas(ctxt, project_id, user_id)
|
||||
self.assertEqual(result['server_groups']['in_use'], in_use)
|
||||
|
||||
def test_create_server_group_normal(self):
|
||||
self._setup_quotas()
|
||||
req = fakes.HTTPRequest.blank('/v2/fake/os-server-groups')
|
||||
sgroup = server_group_template()
|
||||
policies = ['anti-affinity']
|
||||
sgroup['policies'] = policies
|
||||
res_dict = self.controller.create(req, {'server_group': sgroup})
|
||||
self.assertEqual(res_dict['server_group']['name'], 'test')
|
||||
self.assertTrue(uuidutils.is_uuid_like(res_dict['server_group']['id']))
|
||||
self.assertEqual(res_dict['server_group']['policies'], policies)
|
||||
|
||||
def test_create_server_group_quota_limit(self):
|
||||
self._setup_quotas()
|
||||
req = fakes.HTTPRequest.blank('/v2/fake/os-server-groups')
|
||||
sgroup = server_group_template()
|
||||
policies = ['anti-affinity']
|
||||
sgroup['policies'] = policies
|
||||
# Start by creating as many server groups as we're allowed to.
|
||||
for i in range(CONF.quota_server_groups):
|
||||
self.controller.create(req, {'server_group': sgroup})
|
||||
|
||||
# Then, creating a server group should fail.
|
||||
self.assertRaises(webob.exc.HTTPForbidden,
|
||||
self.controller.create,
|
||||
req, {'server_group': sgroup})
|
||||
|
||||
def test_delete_server_group_by_admin(self):
|
||||
self._setup_quotas()
|
||||
sgroup = server_group_template()
|
||||
policies = ['anti-affinity']
|
||||
sgroup['policies'] = policies
|
||||
req = fakes.HTTPRequest.blank('/v2/fake/os-server-groups')
|
||||
res = self.controller.create(req, {'server_group': sgroup})
|
||||
sg_id = res['server_group']['id']
|
||||
context = req.environ['nova.context']
|
||||
|
||||
self._assert_server_groups_in_use(context.project_id,
|
||||
context.user_id, 1)
|
||||
|
||||
# Delete the server group we've just created.
|
||||
req = fakes.HTTPRequest.blank('/v2/fake/os-server-groups/%s' % sg_id,
|
||||
use_admin_context=True)
|
||||
self.controller.delete(req, sg_id)
|
||||
|
||||
# Make sure the quota in use has been released.
|
||||
self._assert_server_groups_in_use(context.project_id,
|
||||
context.user_id, 0)
|
||||
|
||||
def test_delete_server_group_by_id(self):
|
||||
self._setup_quotas()
|
||||
sg = server_group_template(id='123')
|
||||
self.called = False
|
||||
|
||||
def server_group_delete(context, id):
|
||||
self.called = True
|
||||
|
||||
def return_server_group(context, group_id):
|
||||
self.assertEqual(sg['id'], group_id)
|
||||
return server_group_db(sg)
|
||||
|
||||
self.stubs.Set(nova.db, 'instance_group_delete',
|
||||
server_group_delete)
|
||||
self.stubs.Set(nova.db, 'instance_group_get',
|
||||
return_server_group)
|
||||
|
||||
req = fakes.HTTPRequest.blank('/v2/fake/os-server-groups/123')
|
||||
resp = self.controller.delete(req, '123')
|
||||
self.assertTrue(self.called)
|
||||
self.assertEqual(resp.status_int, 204)
|
||||
|
||||
|
||||
class ServerGroupQuotasTestV2(ServerGroupQuotasTestV21):
|
||||
|
||||
def _setup_controller(self):
|
||||
self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
|
||||
self.controller = server_groups.ServerGroupController(self.ext_mgr)
|
||||
|
||||
def _setup_quotas(self):
|
||||
self.ext_mgr.is_loaded('os-server-group-quotas').MultipleTimes()\
|
||||
.AndReturn(True)
|
||||
self.mox.ReplayAll()
|
||||
|
||||
def _get_app(self):
|
||||
return fakes.wsgi_app(init_only=('os-server-groups',))
|
||||
|
||||
def _get_url(self):
|
||||
return '/v2/fake'
|
|
@ -18,6 +18,7 @@ import webob
|
|||
|
||||
from nova.api.openstack.compute.contrib import server_groups
|
||||
from nova.api.openstack.compute.plugins.v3 import server_groups as sg_v3
|
||||
from nova.api.openstack import extensions
|
||||
from nova.api.openstack import wsgi
|
||||
from nova import context
|
||||
import nova.db
|
||||
|
@ -81,13 +82,14 @@ def server_group_db(sg):
|
|||
|
||||
class ServerGroupTestV21(test.TestCase):
|
||||
|
||||
sg_controller_cls = sg_v3.ServerGroupController
|
||||
|
||||
def setUp(self):
|
||||
super(ServerGroupTestV21, self).setUp()
|
||||
self.controller = self.sg_controller_cls()
|
||||
self._setup_controller()
|
||||
self.app = self._get_app()
|
||||
|
||||
def _setup_controller(self):
|
||||
self.controller = sg_v3.ServerGroupController()
|
||||
|
||||
def _get_app(self):
|
||||
return fakes.wsgi_app_v3(init_only=('os-server-groups',))
|
||||
|
||||
|
@ -362,7 +364,11 @@ class ServerGroupTestV21(test.TestCase):
|
|||
|
||||
|
||||
class ServerGroupTestV2(ServerGroupTestV21):
|
||||
sg_controller_cls = server_groups.ServerGroupController
|
||||
|
||||
def _setup_controller(self):
|
||||
ext_mgr = extensions.ExtensionManager()
|
||||
ext_mgr.extensions = {}
|
||||
self.controller = server_groups.ServerGroupController(ext_mgr)
|
||||
|
||||
def _get_app(self):
|
||||
return fakes.wsgi_app(init_only=('os-server-groups',))
|
||||
|
|
|
@ -2476,6 +2476,60 @@ class ServersControllerCreateTest(test.TestCase):
|
|||
' already used 9 of 10 cores')
|
||||
self._do_test_create_instance_above_quota('cores', 1, 10, msg)
|
||||
|
||||
def test_create_instance_above_quota_server_group_members(self):
|
||||
ctxt = context.get_admin_context()
|
||||
fake_group = objects.InstanceGroup(ctxt)
|
||||
fake_group.create()
|
||||
|
||||
def fake_count(context, name, group, user_id):
|
||||
self.assertEqual(name, "server_group_members")
|
||||
self.assertEqual(group.uuid, fake_group.uuid)
|
||||
self.assertEqual(user_id,
|
||||
self.req.environ['nova.context'].user_id)
|
||||
return 10
|
||||
|
||||
def fake_limit_check(context, **kwargs):
|
||||
if 'server_group_members' in kwargs:
|
||||
raise exception.OverQuota(overs={})
|
||||
|
||||
def fake_instance_destroy(context, uuid, constraint):
|
||||
return fakes.stub_instance(1)
|
||||
|
||||
self.stubs.Set(fakes.QUOTAS, 'count', fake_count)
|
||||
self.stubs.Set(fakes.QUOTAS, 'limit_check', fake_limit_check)
|
||||
self.stubs.Set(db, 'instance_destroy', fake_instance_destroy)
|
||||
self.body['os:scheduler_hints'] = {'group': fake_group.uuid}
|
||||
self.req.body = jsonutils.dumps(self.body)
|
||||
expected_msg = "Quota exceeded, too many servers in group"
|
||||
|
||||
try:
|
||||
self.controller.create(self.req, body=self.body).obj
|
||||
self.fail('expected quota to be exceeded')
|
||||
except webob.exc.HTTPForbidden as e:
|
||||
self.assertEqual(e.explanation, expected_msg)
|
||||
|
||||
def test_create_instance_above_quota_server_groups(self):
|
||||
|
||||
def fake_reserve(contex, **deltas):
|
||||
if 'server_groups' in deltas:
|
||||
raise exception.OverQuota(overs={})
|
||||
|
||||
def fake_instance_destroy(context, uuid, constraint):
|
||||
return fakes.stub_instance(1)
|
||||
|
||||
self.stubs.Set(fakes.QUOTAS, 'reserve', fake_reserve)
|
||||
self.stubs.Set(db, 'instance_destroy', fake_instance_destroy)
|
||||
self.body['os:scheduler_hints'] = {'group': 'fake_group'}
|
||||
self.req.body = jsonutils.dumps(self.body)
|
||||
|
||||
expected_msg = "Quota exceeded, too many server groups."
|
||||
|
||||
try:
|
||||
self.controller.create(self.req, body=self.body).obj
|
||||
self.fail('expected quota to be exceeded')
|
||||
except webob.exc.HTTPForbidden as e:
|
||||
self.assertEqual(e.explanation, expected_msg)
|
||||
|
||||
def test_create_instance_with_neutronv2_port_in_use(self):
|
||||
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
|
||||
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
|
||||
|
|
|
@ -2930,6 +2930,65 @@ class ServersControllerCreateTest(test.TestCase):
|
|||
' already used 9 of 10 cores')
|
||||
self._do_test_create_instance_above_quota('cores', 1, 10, msg)
|
||||
|
||||
def test_create_instance_above_quota_group_members(self):
|
||||
ctxt = context.get_admin_context()
|
||||
fake_group = objects.InstanceGroup(ctxt)
|
||||
fake_group.create()
|
||||
|
||||
def fake_count(context, name, group, user_id):
|
||||
self.assertEqual(name, "server_group_members")
|
||||
self.assertEqual(group.uuid, fake_group.uuid)
|
||||
self.assertEqual(user_id,
|
||||
self.req.environ['nova.context'].user_id)
|
||||
return 10
|
||||
|
||||
def fake_limit_check(context, **kwargs):
|
||||
if 'server_group_members' in kwargs:
|
||||
raise exception.OverQuota(overs={})
|
||||
|
||||
def fake_instance_destroy(context, uuid, constraint):
|
||||
return fakes.stub_instance(1)
|
||||
|
||||
self.stubs.Set(fakes.QUOTAS, 'count', fake_count)
|
||||
self.stubs.Set(fakes.QUOTAS, 'limit_check', fake_limit_check)
|
||||
self.stubs.Set(db, 'instance_destroy', fake_instance_destroy)
|
||||
self.ext_mgr.extensions = {'OS-SCH-HNT': 'fake',
|
||||
'os-server-group-quotas': 'fake'}
|
||||
self.body['server']['scheduler_hints'] = {'group': fake_group.uuid}
|
||||
self.req.body = jsonutils.dumps(self.body)
|
||||
|
||||
expected_msg = "Quota exceeded, too many servers in group"
|
||||
|
||||
try:
|
||||
self.controller.create(self.req, self.body).obj['server']
|
||||
self.fail('expected quota to be exceeded')
|
||||
except webob.exc.HTTPForbidden as e:
|
||||
self.assertEqual(e.explanation, expected_msg)
|
||||
|
||||
def test_create_instance_above_quota_server_groups(self):
|
||||
|
||||
def fake_reserve(contex, **deltas):
|
||||
if 'server_groups' in deltas:
|
||||
raise exception.OverQuota(overs={})
|
||||
|
||||
def fake_instance_destroy(context, uuid, constraint):
|
||||
return fakes.stub_instance(1)
|
||||
|
||||
self.stubs.Set(fakes.QUOTAS, 'reserve', fake_reserve)
|
||||
self.stubs.Set(db, 'instance_destroy', fake_instance_destroy)
|
||||
self.ext_mgr.extensions = {'OS-SCH-HNT': 'fake',
|
||||
'os-server-group-quotas': 'fake'}
|
||||
self.body['server']['scheduler_hints'] = {'group': 'fake-group'}
|
||||
self.req.body = jsonutils.dumps(self.body)
|
||||
|
||||
expected_msg = "Quota exceeded, too many server groups."
|
||||
|
||||
try:
|
||||
self.controller.create(self.req, self.body).obj['server']
|
||||
self.fail('expected quota to be exceeded')
|
||||
except webob.exc.HTTPForbidden as e:
|
||||
self.assertEqual(e.explanation, expected_msg)
|
||||
|
||||
|
||||
class ServersControllerCreateTestWithMock(test.TestCase):
|
||||
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
|
||||
|
|
Loading…
Reference in New Issue