Finish quota refactor.
Finishes quota refactoring by making use of the new quota infrastructure. Partially implements blueprint quota-refactor (the final piece is to remove the old quota architecture). This change is fairly substantial. To make it easier to review, it has been broken up into 3 parts. This is the second part. Change-Id: I1c8b43198f0d44e9e13a45575361aa043fd0639e
This commit is contained in:
parent
7e15d4e28f
commit
b7f0946bbd
@ -105,6 +105,8 @@ flags.DECLARE('vpn_start', 'nova.network.manager')
|
||||
flags.DECLARE('default_floating_pool', 'nova.network.manager')
|
||||
flags.DECLARE('public_interface', 'nova.network.linux_net')
|
||||
|
||||
QUOTAS = quota.QUOTAS
|
||||
|
||||
|
||||
# Decorators for actions
|
||||
def args(*args, **kwargs):
|
||||
@ -493,11 +495,11 @@ class ProjectCommands(object):
|
||||
db.quota_update(ctxt, project_id, key, value)
|
||||
except exception.ProjectQuotaNotFound:
|
||||
db.quota_create(ctxt, project_id, key, value)
|
||||
project_quota = quota.get_project_quotas(ctxt, project_id)
|
||||
project_quota = QUOTAS.get_project_quotas(ctxt, project_id)
|
||||
for key, value in project_quota.iteritems():
|
||||
if value is None:
|
||||
value = 'unlimited'
|
||||
print '%s: %s' % (key, value)
|
||||
if value['limit'] < 0 or value['limit'] is None:
|
||||
value['limit'] = 'unlimited'
|
||||
print '%s: %s' % (key, value['limit'])
|
||||
|
||||
@args('--project', dest="project_id", metavar='<Project name>',
|
||||
help='Project name')
|
||||
|
@ -40,6 +40,7 @@ from nova import flags
|
||||
from nova.image import s3
|
||||
from nova import log as logging
|
||||
from nova import network
|
||||
from nova.openstack.common import excutils
|
||||
from nova.openstack.common import importutils
|
||||
from nova import quota
|
||||
from nova import utils
|
||||
@ -50,6 +51,8 @@ FLAGS = flags.FLAGS
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
QUOTAS = quota.QUOTAS
|
||||
|
||||
|
||||
def validate_ec2_id(val):
|
||||
if not validator.validate_str()(val):
|
||||
@ -713,10 +716,11 @@ class CloudController(object):
|
||||
raise exception.EC2APIError(err % values_for_rule)
|
||||
postvalues.append(values_for_rule)
|
||||
|
||||
allowed = quota.allowed_security_group_rules(context,
|
||||
security_group['id'],
|
||||
1)
|
||||
if allowed < 1:
|
||||
count = QUOTAS.count(context, 'security_group_rules',
|
||||
security_group['id'])
|
||||
try:
|
||||
QUOTAS.limit_check(context, security_group_rules=count + 1)
|
||||
except exception.OverQuota:
|
||||
msg = _("Quota exceeded, too many security group rules.")
|
||||
raise exception.EC2APIError(msg)
|
||||
|
||||
@ -777,17 +781,26 @@ class CloudController(object):
|
||||
msg = _('group %s already exists')
|
||||
raise exception.EC2APIError(msg % group_name)
|
||||
|
||||
if quota.allowed_security_groups(context, 1) < 1:
|
||||
try:
|
||||
reservations = QUOTAS.reserve(context, security_groups=1)
|
||||
except exception.OverQuota:
|
||||
msg = _("Quota exceeded, too many security groups.")
|
||||
raise exception.EC2APIError(msg)
|
||||
|
||||
group = {'user_id': context.user_id,
|
||||
'project_id': context.project_id,
|
||||
'name': group_name,
|
||||
'description': group_description}
|
||||
group_ref = db.security_group_create(context, group)
|
||||
try:
|
||||
group = {'user_id': context.user_id,
|
||||
'project_id': context.project_id,
|
||||
'name': group_name,
|
||||
'description': group_description}
|
||||
group_ref = db.security_group_create(context, group)
|
||||
|
||||
self.sgh.trigger_security_group_create_refresh(context, group)
|
||||
self.sgh.trigger_security_group_create_refresh(context, group)
|
||||
|
||||
# Commit the reservation
|
||||
QUOTAS.commit(context, reservations)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
QUOTAS.rollback(context, reservations)
|
||||
|
||||
return {'securityGroupSet': [self._format_security_group(context,
|
||||
group_ref)]}
|
||||
@ -810,11 +823,25 @@ class CloudController(object):
|
||||
raise notfound(security_group_id=group_id)
|
||||
if db.security_group_in_use(context, security_group.id):
|
||||
raise exception.InvalidGroup(reason="In Use")
|
||||
|
||||
# Get reservations
|
||||
try:
|
||||
reservations = QUOTAS.reserve(context, security_groups=-1)
|
||||
except Exception:
|
||||
reservations = None
|
||||
LOG.exception(_("Failed to update usages deallocating "
|
||||
"security group"))
|
||||
|
||||
LOG.audit(_("Delete security group %s"), group_name, context=context)
|
||||
db.security_group_destroy(context, security_group.id)
|
||||
|
||||
self.sgh.trigger_security_group_destroy_refresh(context,
|
||||
security_group.id)
|
||||
|
||||
# Commit the reservations
|
||||
if reservations:
|
||||
QUOTAS.commit(context, reservations)
|
||||
|
||||
return True
|
||||
|
||||
def get_console_output(self, context, instance_id, **kwargs):
|
||||
|
@ -27,6 +27,7 @@ from nova.api.openstack import wsgi
|
||||
from nova.api.openstack import xmlutil
|
||||
from nova.compute import task_states
|
||||
from nova.compute import vm_states
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova.network import model as network_model
|
||||
@ -35,6 +36,7 @@ from nova import quota
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
FLAGS = flags.FLAGS
|
||||
QUOTAS = quota.QUOTAS
|
||||
|
||||
|
||||
XML_NS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
|
||||
@ -272,9 +274,9 @@ def get_version_from_href(href):
|
||||
def check_img_metadata_properties_quota(context, metadata):
|
||||
if metadata is None:
|
||||
return
|
||||
num_metadata = len(metadata)
|
||||
quota_metadata = quota.allowed_metadata_items(context, num_metadata)
|
||||
if quota_metadata < num_metadata:
|
||||
try:
|
||||
QUOTAS.limit_check(context, metadata_items=len(metadata))
|
||||
except exception.OverQuota:
|
||||
expl = _("Image metadata limit exceeded")
|
||||
raise webob.exc.HTTPRequestEntityTooLarge(explanation=expl,
|
||||
headers={'Retry-After': 0})
|
||||
|
@ -23,6 +23,9 @@ from nova import exception
|
||||
from nova import quota
|
||||
|
||||
|
||||
QUOTAS = quota.QUOTAS
|
||||
|
||||
|
||||
authorize = extensions.extension_authorizer('compute', 'quota_classes')
|
||||
|
||||
|
||||
@ -32,7 +35,7 @@ class QuotaClassTemplate(xmlutil.TemplateBuilder):
|
||||
selector='quota_class_set')
|
||||
root.set('id')
|
||||
|
||||
for resource in quota.quota_resources:
|
||||
for resource in QUOTAS.resources:
|
||||
elem = xmlutil.SubTemplateElement(root, resource)
|
||||
elem.text = resource
|
||||
|
||||
@ -46,7 +49,7 @@ class QuotaClassSetsController(object):
|
||||
|
||||
result = dict(id=str(quota_class))
|
||||
|
||||
for resource in quota.quota_resources:
|
||||
for resource in QUOTAS.resources:
|
||||
result[resource] = quota_set[resource]
|
||||
|
||||
return dict(quota_class_set=result)
|
||||
@ -58,7 +61,7 @@ class QuotaClassSetsController(object):
|
||||
try:
|
||||
db.sqlalchemy.api.authorize_quota_class_context(context, id)
|
||||
return self._format_quota_set(id,
|
||||
quota.get_class_quotas(context, id))
|
||||
QUOTAS.get_class_quotas(context, id))
|
||||
except exception.NotAuthorized:
|
||||
raise webob.exc.HTTPForbidden()
|
||||
|
||||
@ -68,7 +71,7 @@ class QuotaClassSetsController(object):
|
||||
authorize(context)
|
||||
quota_class = id
|
||||
for key in body['quota_class_set'].keys():
|
||||
if key in quota.quota_resources:
|
||||
if key in QUOTAS:
|
||||
value = int(body['quota_class_set'][key])
|
||||
try:
|
||||
db.quota_class_update(context, quota_class, key, value)
|
||||
@ -76,8 +79,8 @@ class QuotaClassSetsController(object):
|
||||
db.quota_class_create(context, quota_class, key, value)
|
||||
except exception.AdminRequired:
|
||||
raise webob.exc.HTTPForbidden()
|
||||
return {'quota_class_set': quota.get_class_quotas(context,
|
||||
quota_class)}
|
||||
return {'quota_class_set': QUOTAS.get_class_quotas(context,
|
||||
quota_class)}
|
||||
|
||||
|
||||
class Quota_classes(extensions.ExtensionDescriptor):
|
||||
|
@ -26,6 +26,9 @@ from nova import exception
|
||||
from nova import quota
|
||||
|
||||
|
||||
QUOTAS = quota.QUOTAS
|
||||
|
||||
|
||||
authorize = extensions.extension_authorizer('compute', 'quotas')
|
||||
|
||||
|
||||
@ -34,7 +37,7 @@ class QuotaTemplate(xmlutil.TemplateBuilder):
|
||||
root = xmlutil.TemplateElement('quota_set', selector='quota_set')
|
||||
root.set('id')
|
||||
|
||||
for resource in quota.quota_resources:
|
||||
for resource in QUOTAS.resources:
|
||||
elem = xmlutil.SubTemplateElement(root, resource)
|
||||
elem.text = resource
|
||||
|
||||
@ -48,7 +51,7 @@ class QuotaSetsController(object):
|
||||
|
||||
result = dict(id=str(project_id))
|
||||
|
||||
for resource in quota.quota_resources:
|
||||
for resource in QUOTAS.resources:
|
||||
result[resource] = quota_set[resource]
|
||||
|
||||
return dict(quota_set=result)
|
||||
@ -59,14 +62,21 @@ class QuotaSetsController(object):
|
||||
msg = _("Quota limit must be -1 or greater.")
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
def _get_quotas(self, context, id, usages=False):
|
||||
values = QUOTAS.get_project_quotas(context, id, usages=usages)
|
||||
|
||||
if usages:
|
||||
return values
|
||||
else:
|
||||
return dict((k, v['limit']) for k, v in values.items())
|
||||
|
||||
@wsgi.serializers(xml=QuotaTemplate)
|
||||
def show(self, req, id):
|
||||
context = req.environ['nova.context']
|
||||
authorize(context)
|
||||
try:
|
||||
sqlalchemy_api.authorize_project_context(context, id)
|
||||
return self._format_quota_set(id,
|
||||
quota.get_project_quotas(context, id))
|
||||
return self._format_quota_set(id, self._get_quotas(context, id))
|
||||
except exception.NotAuthorized:
|
||||
raise webob.exc.HTTPForbidden()
|
||||
|
||||
@ -76,7 +86,7 @@ class QuotaSetsController(object):
|
||||
authorize(context)
|
||||
project_id = id
|
||||
for key in body['quota_set'].keys():
|
||||
if key in quota.quota_resources:
|
||||
if key in QUOTAS:
|
||||
value = int(body['quota_set'][key])
|
||||
self._validate_quota_limit(value)
|
||||
try:
|
||||
@ -85,12 +95,13 @@ class QuotaSetsController(object):
|
||||
db.quota_create(context, project_id, key, value)
|
||||
except exception.AdminRequired:
|
||||
raise webob.exc.HTTPForbidden()
|
||||
return {'quota_set': quota.get_project_quotas(context, project_id)}
|
||||
return {'quota_set': self._get_quotas(context, id)}
|
||||
|
||||
@wsgi.serializers(xml=QuotaTemplate)
|
||||
def defaults(self, req, id):
|
||||
authorize(req.environ['nova.context'])
|
||||
return self._format_quota_set(id, quota._get_default_quotas())
|
||||
context = req.environ['nova.context']
|
||||
authorize(context)
|
||||
return self._format_quota_set(id, QUOTAS.get_defaults(context))
|
||||
|
||||
|
||||
class Quotas(extensions.ExtensionDescriptor):
|
||||
|
@ -31,6 +31,7 @@ from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova.openstack.common import excutils
|
||||
from nova.openstack.common import importutils
|
||||
from nova import quota
|
||||
from nova import utils
|
||||
@ -38,6 +39,7 @@ from nova import utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
FLAGS = flags.FLAGS
|
||||
QUOTAS = quota.QUOTAS
|
||||
authorize = extensions.extension_authorizer('compute', 'security_groups')
|
||||
|
||||
|
||||
@ -244,11 +246,24 @@ class SecurityGroupController(SecurityGroupControllerBase):
|
||||
if db.security_group_in_use(context, security_group.id):
|
||||
msg = _("Security group is still in use")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
# Get reservations
|
||||
try:
|
||||
reservations = QUOTAS.reserve(context, security_groups=-1)
|
||||
except Exception:
|
||||
reservations = None
|
||||
LOG.exception(_("Failed to update usages deallocating "
|
||||
"security group"))
|
||||
|
||||
LOG.audit(_("Delete security group %s"), id, context=context)
|
||||
db.security_group_destroy(context, security_group.id)
|
||||
self.sgh.trigger_security_group_destroy_refresh(
|
||||
context, security_group.id)
|
||||
|
||||
# Commit the reservations
|
||||
if reservations:
|
||||
QUOTAS.commit(context, reservations)
|
||||
|
||||
return webob.Response(status_int=202)
|
||||
|
||||
@wsgi.serializers(xml=SecurityGroupsTemplate)
|
||||
@ -291,22 +306,33 @@ class SecurityGroupController(SecurityGroupControllerBase):
|
||||
group_name = group_name.strip()
|
||||
group_description = group_description.strip()
|
||||
|
||||
if quota.allowed_security_groups(context, 1) < 1:
|
||||
try:
|
||||
reservations = QUOTAS.reserve(context, security_groups=1)
|
||||
except exception.OverQuota:
|
||||
msg = _("Quota exceeded, too many security groups.")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
LOG.audit(_("Create Security Group %s"), group_name, context=context)
|
||||
self.compute_api.ensure_default_security_group(context)
|
||||
if db.security_group_exists(context, context.project_id, group_name):
|
||||
msg = _('Security group %s already exists') % group_name
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
try:
|
||||
LOG.audit(_("Create Security Group %s"), group_name,
|
||||
context=context)
|
||||
self.compute_api.ensure_default_security_group(context)
|
||||
if db.security_group_exists(context, context.project_id,
|
||||
group_name):
|
||||
msg = _('Security group %s already exists') % group_name
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
group = {'user_id': context.user_id,
|
||||
'project_id': context.project_id,
|
||||
'name': group_name,
|
||||
'description': group_description}
|
||||
group_ref = db.security_group_create(context, group)
|
||||
self.sgh.trigger_security_group_create_refresh(context, group)
|
||||
group = {'user_id': context.user_id,
|
||||
'project_id': context.project_id,
|
||||
'name': group_name,
|
||||
'description': group_description}
|
||||
group_ref = db.security_group_create(context, group)
|
||||
self.sgh.trigger_security_group_create_refresh(context, group)
|
||||
|
||||
# Commit the reservation
|
||||
QUOTAS.commit(context, reservations)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
QUOTAS.rollback(context, reservations)
|
||||
|
||||
return {'security_group': self._format_security_group(context,
|
||||
group_ref)}
|
||||
@ -382,10 +408,10 @@ class SecurityGroupRulesController(SecurityGroupControllerBase):
|
||||
msg = _('This rule already exists in group %s') % parent_group_id
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
allowed = quota.allowed_security_group_rules(context,
|
||||
parent_group_id,
|
||||
1)
|
||||
if allowed < 1:
|
||||
count = QUOTAS.count(context, 'security_group_rules', parent_group_id)
|
||||
try:
|
||||
QUOTAS.limit_check(context, security_group_rules=count + 1)
|
||||
except exception.OverQuota:
|
||||
msg = _("Quota exceeded, too many security group rules.")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
|
@ -36,6 +36,9 @@ from nova import quota
|
||||
from nova import wsgi as base_wsgi
|
||||
|
||||
|
||||
QUOTAS = quota.QUOTAS
|
||||
|
||||
|
||||
# Convenience constants for the limits dictionary passed to Limiter().
|
||||
PER_SECOND = 1
|
||||
PER_MINUTE = 60
|
||||
@ -82,7 +85,9 @@ class LimitsController(object):
|
||||
Return all global and rate limit information.
|
||||
"""
|
||||
context = req.environ['nova.context']
|
||||
abs_limits = quota.get_project_quotas(context, context.project_id)
|
||||
quotas = QUOTAS.get_project_quotas(context, context.project_id,
|
||||
usages=False)
|
||||
abs_limits = dict((k, v['limit']) for k, v in quotas.items())
|
||||
rate_limits = req.environ.get("nova.limits", [])
|
||||
|
||||
builder = self._get_view_builder(req)
|
||||
|
@ -42,6 +42,7 @@ import nova.image
|
||||
from nova import log as logging
|
||||
from nova import network
|
||||
from nova.openstack.common import cfg
|
||||
from nova.openstack.common import excutils
|
||||
from nova.openstack.common import jsonutils
|
||||
import nova.policy
|
||||
from nova import quota
|
||||
@ -56,6 +57,8 @@ LOG = logging.getLogger(__name__)
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DECLARE('consoleauth_topic', 'nova.consoleauth')
|
||||
|
||||
QUOTAS = quota.QUOTAS
|
||||
|
||||
|
||||
def check_instance_state(vm_state=None, task_state=None):
|
||||
"""Decorator to check VM and/or task state before entry to API functions.
|
||||
@ -126,49 +129,91 @@ class API(base.Base):
|
||||
"""
|
||||
if injected_files is None:
|
||||
return
|
||||
limit = quota.allowed_injected_files(context, len(injected_files))
|
||||
if len(injected_files) > limit:
|
||||
|
||||
# Check number of files first
|
||||
try:
|
||||
QUOTAS.limit_check(context, injected_files=len(injected_files))
|
||||
except exception.OverQuota:
|
||||
raise exception.OnsetFileLimitExceeded()
|
||||
path_limit = quota.allowed_injected_file_path_bytes(context)
|
||||
|
||||
# OK, now count path and content lengths; we're looking for
|
||||
# the max...
|
||||
max_path = 0
|
||||
max_content = 0
|
||||
for path, content in injected_files:
|
||||
if len(path) > path_limit:
|
||||
max_path = max(max_path, len(path))
|
||||
max_content = max(max_content, len(content))
|
||||
|
||||
try:
|
||||
QUOTAS.limit_check(context, injected_file_path_bytes=max_path,
|
||||
injected_file_content_bytes=max_content)
|
||||
except exception.OverQuota as exc:
|
||||
# Favor path limit over content limit for reporting
|
||||
# purposes
|
||||
if 'injected_file_path_bytes' in exc.kwargs['overs']:
|
||||
raise exception.OnsetFilePathLimitExceeded()
|
||||
content_limit = quota.allowed_injected_file_content_bytes(
|
||||
context, len(content))
|
||||
if len(content) > content_limit:
|
||||
else:
|
||||
raise exception.OnsetFileContentLimitExceeded()
|
||||
|
||||
def _check_num_instances_quota(self, context, instance_type, min_count,
|
||||
max_count):
|
||||
"""Enforce quota limits on number of instances created."""
|
||||
num_instances = quota.allowed_instances(context, max_count,
|
||||
instance_type)
|
||||
if num_instances < min_count:
|
||||
|
||||
# Determine requested cores and ram
|
||||
req_cores = max_count * instance_type['vcpus']
|
||||
req_ram = max_count * instance_type['memory_mb']
|
||||
|
||||
# Check the quota
|
||||
try:
|
||||
reservations = QUOTAS.reserve(context, instances=max_count,
|
||||
cores=req_cores, ram=req_ram)
|
||||
except exception.OverQuota as exc:
|
||||
# OK, we exceeded quota; let's figure out why...
|
||||
quotas = exc.kwargs['quotas']
|
||||
usages = exc.kwargs['usages']
|
||||
headroom = dict((res, quotas[res] -
|
||||
(usages[res]['in_use'] + usages[res]['reserved']))
|
||||
for res in quotas.keys())
|
||||
allowed = headroom['instances']
|
||||
if instance_type['vcpus']:
|
||||
allowed = min(allowed,
|
||||
headroom['cores'] // instance_type['vcpus'])
|
||||
if instance_type['memory_mb']:
|
||||
allowed = min(allowed,
|
||||
headroom['ram'] // instance_type['memory_mb'])
|
||||
|
||||
# Convert to the appropriate exception message
|
||||
pid = context.project_id
|
||||
if num_instances <= 0:
|
||||
if allowed <= 0:
|
||||
msg = _("Cannot run any more instances of this type.")
|
||||
used = max_count
|
||||
elif min_count <= allowed <= max_count:
|
||||
# We're actually OK, but still need reservations
|
||||
return self._check_num_instances_quota(context, instance_type,
|
||||
min_count, allowed)
|
||||
else:
|
||||
msg = (_("Can only run %s more instances of this type.") %
|
||||
num_instances)
|
||||
used = max_count - num_instances
|
||||
allowed)
|
||||
used = max_count - allowed
|
||||
LOG.warn(_("Quota exceeded for %(pid)s,"
|
||||
" tried to run %(min_count)s instances. %(msg)s"), locals())
|
||||
raise exception.TooManyInstances(used=used, allowed=max_count)
|
||||
|
||||
return num_instances
|
||||
return max_count, reservations
|
||||
|
||||
def _check_metadata_properties_quota(self, context, metadata=None):
|
||||
"""Enforce quota limits on metadata properties."""
|
||||
if not metadata:
|
||||
metadata = {}
|
||||
num_metadata = len(metadata)
|
||||
quota_metadata = quota.allowed_metadata_items(context, num_metadata)
|
||||
if quota_metadata < num_metadata:
|
||||
try:
|
||||
QUOTAS.limit_check(context, metadata_items=num_metadata)
|
||||
except exception.OverQuota as exc:
|
||||
pid = context.project_id
|
||||
msg = _("Quota exceeded for %(pid)s, tried to set "
|
||||
"%(num_metadata)s metadata properties") % locals()
|
||||
LOG.warn(msg)
|
||||
quota_metadata = exc.kwargs['quotas']['metadata_items']
|
||||
raise exception.MetadataLimitExceeded(allowed=quota_metadata)
|
||||
|
||||
# Because metadata is stored in the DB, we hard-code the size limits
|
||||
@ -302,7 +347,7 @@ class API(base.Base):
|
||||
block_device_mapping = block_device_mapping or []
|
||||
|
||||
# Check quotas
|
||||
num_instances = self._check_num_instances_quota(
|
||||
num_instances, quota_reservations = self._check_num_instances_quota(
|
||||
context, instance_type, min_count, max_count)
|
||||
self._check_metadata_properties_quota(context, metadata)
|
||||
self._check_injected_file_quota(context, injected_files)
|
||||
@ -313,8 +358,10 @@ class API(base.Base):
|
||||
image = image_service.show(context, image_id)
|
||||
|
||||
if instance_type['memory_mb'] < int(image.get('min_ram') or 0):
|
||||
QUOTAS.rollback(context, quota_reservations)
|
||||
raise exception.InstanceTypeMemoryTooSmall()
|
||||
if instance_type['root_gb'] < int(image.get('min_disk') or 0):
|
||||
QUOTAS.rollback(context, quota_reservations)
|
||||
raise exception.InstanceTypeDiskTooSmall()
|
||||
|
||||
# Handle config_drive
|
||||
@ -385,7 +432,12 @@ class API(base.Base):
|
||||
if create_instance_here:
|
||||
instance = self.create_db_entry_for_new_instance(
|
||||
context, instance_type, image, base_options,
|
||||
security_group, block_device_mapping)
|
||||
security_group, block_device_mapping,
|
||||
quota_reservations)
|
||||
|
||||
# Reservations committed; don't double-commit
|
||||
quota_reservations = None
|
||||
|
||||
# Tells scheduler we created the instance already.
|
||||
base_options['uuid'] = instance['uuid']
|
||||
use_call = False
|
||||
@ -412,7 +464,7 @@ class API(base.Base):
|
||||
admin_password, image,
|
||||
num_instances, requested_networks,
|
||||
block_device_mapping, security_group,
|
||||
filter_properties)
|
||||
filter_properties, quota_reservations)
|
||||
|
||||
if create_instance_here:
|
||||
return ([instance], reservation_id)
|
||||
@ -509,7 +561,7 @@ class API(base.Base):
|
||||
#NOTE(bcwaldon): No policy check since this is only used by scheduler and
|
||||
# the compute api. That should probably be cleaned up, though.
|
||||
def create_db_entry_for_new_instance(self, context, instance_type, image,
|
||||
base_options, security_group, block_device_mapping):
|
||||
base_options, security_group, block_device_mapping, reservations):
|
||||
"""Create an entry in the DB for this new instance,
|
||||
including any related table updates (such as security group,
|
||||
etc).
|
||||
@ -539,6 +591,11 @@ class API(base.Base):
|
||||
|
||||
base_options.setdefault('launch_index', 0)
|
||||
instance = self.db.instance_create(context, base_options)
|
||||
|
||||
# Commit the reservations
|
||||
if reservations:
|
||||
QUOTAS.commit(context, reservations)
|
||||
|
||||
instance_id = instance['id']
|
||||
instance_uuid = instance['uuid']
|
||||
|
||||
@ -593,7 +650,8 @@ class API(base.Base):
|
||||
requested_networks,
|
||||
block_device_mapping,
|
||||
security_group,
|
||||
filter_properties):
|
||||
filter_properties,
|
||||
quota_reservations):
|
||||
"""Send a run_instance request to the schedulers for processing."""
|
||||
|
||||
pid = context.project_id
|
||||
@ -615,7 +673,8 @@ class API(base.Base):
|
||||
topic=FLAGS.compute_topic, request_spec=request_spec,
|
||||
admin_password=admin_password, injected_files=injected_files,
|
||||
requested_networks=requested_networks, is_first_time=True,
|
||||
filter_properties=filter_properties, call=use_call)
|
||||
filter_properties=filter_properties,
|
||||
reservations=quota_reservations, call=use_call)
|
||||
|
||||
def _check_create_policies(self, context, availability_zone,
|
||||
requested_networks, block_device_mapping):
|
||||
@ -895,10 +954,17 @@ class API(base.Base):
|
||||
pass
|
||||
|
||||
def _delete(self, context, instance):
|
||||
host = instance['host']
|
||||
reservations = QUOTAS.reserve(context,
|
||||
instances=-1,
|
||||
cores=-instance['vcpus'],
|
||||
ram=-instance['memory_mb'])
|
||||
try:
|
||||
if not instance['host']:
|
||||
# Just update database, nothing else we can do
|
||||
return self.db.instance_destroy(context, instance['id'])
|
||||
result = self.db.instance_destroy(context, instance['id'])
|
||||
QUOTAS.commit(context, reservations)
|
||||
return result
|
||||
|
||||
self.update(context,
|
||||
instance,
|
||||
@ -919,9 +985,13 @@ class API(base.Base):
|
||||
|
||||
self.compute_rpcapi.terminate_instance(context, instance)
|
||||
|
||||
QUOTAS.commit(context, reservations)
|
||||
except exception.InstanceNotFound:
|
||||
# NOTE(comstud): Race condition. Instance already gone.
|
||||
pass
|
||||
QUOTAS.rollback(context, reservations)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
QUOTAS.rollback(context, reservations)
|
||||
|
||||
# NOTE(jerdfelt): The API implies that only ACTIVE and ERROR are
|
||||
# allowed but the EC2 API appears to allow from RESCUED and STOPPED
|
||||
@ -1885,7 +1955,10 @@ class KeypairAPI(base.Base):
|
||||
"""Import a key pair using an existing public key."""
|
||||
self._validate_keypair_name(context, user_id, key_name)
|
||||
|
||||
if quota.allowed_key_pairs(context, 1) < 1:
|
||||
count = QUOTAS.count(context, 'key_pairs', user_id)
|
||||
try:
|
||||
QUOTAS.limit_check(context, key_pairs=count + 1)
|
||||
except exception.OverQuota:
|
||||
raise exception.KeypairLimitExceeded()
|
||||
|
||||
try:
|
||||
@ -1906,7 +1979,10 @@ class KeypairAPI(base.Base):
|
||||
"""Create a new key pair."""
|
||||
self._validate_keypair_name(context, user_id, key_name)
|
||||
|
||||
if quota.allowed_key_pairs(context, 1) < 1:
|
||||
count = QUOTAS.count(context, 'key_pairs', user_id)
|
||||
try:
|
||||
QUOTAS.limit_check(context, key_pairs=count + 1)
|
||||
except exception.OverQuota:
|
||||
raise exception.KeypairLimitExceeded()
|
||||
|
||||
private_key, public_key, fingerprint = crypto.generate_key_pair()
|
||||
|
@ -2543,6 +2543,10 @@ def quota_reserve(context, resources, quotas, deltas, expire,
|
||||
session=session,
|
||||
save=False)
|
||||
refresh = True
|
||||
elif usages[resource].in_use < 0:
|
||||
# Negative in_use count indicates a desync, so try to
|
||||
# heal from that...
|
||||
refresh = True
|
||||
elif usages[resource].until_refresh is not None:
|
||||
usages[resource].until_refresh -= 1
|
||||
if usages[resource].until_refresh <= 0:
|
||||
@ -2607,7 +2611,7 @@ def quota_reserve(context, resources, quotas, deltas, expire,
|
||||
# they're not invalidated by being over-quota.
|
||||
|
||||
# Create the reservations
|
||||
if not unders and not overs:
|
||||
if not overs:
|
||||
reservations = []
|
||||
for resource, delta in deltas.items():
|
||||
reservation = reservation_create(elevated,
|
||||
@ -2638,7 +2642,8 @@ def quota_reserve(context, resources, quotas, deltas, expire,
|
||||
usage_ref.save(session=session)
|
||||
|
||||
if unders:
|
||||
raise exception.InvalidQuotaValue(unders=sorted(unders))
|
||||
LOG.warning(_("Change will make usage less than 0 for the following "
|
||||
"resources: %(unders)s") % locals())
|
||||
if overs:
|
||||
usages = dict((k, dict(in_use=v['in_use'], reserved=v['reserved']))
|
||||
for k, v in usages.items())
|
||||
|
@ -998,6 +998,10 @@ class VolumeSizeTooLarge(QuotaError):
|
||||
message = _("Maximum volume size exceeded")
|
||||
|
||||
|
||||
class FloatingIpLimitExceeded(QuotaError):
|
||||
message = _("Maximum number of floating ips exceeded")
|
||||
|
||||
|
||||
class MetadataLimitExceeded(QuotaError):
|
||||
message = _("Maximum number of metadata items exceeds %(allowed)d")
|
||||
|
||||
|
@ -64,6 +64,7 @@ from nova.network import api as network_api
|
||||
from nova.network import model as network_model
|
||||
from nova.notifier import api as notifier
|
||||
from nova.openstack.common import cfg
|
||||
from nova.openstack.common import excutils
|
||||
from nova.openstack.common import importutils
|
||||
from nova.openstack.common import jsonutils
|
||||
import nova.policy
|
||||
@ -74,6 +75,8 @@ from nova import utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
QUOTAS = quota.QUOTAS
|
||||
|
||||
network_opts = [
|
||||
cfg.StrOpt('flat_network_bridge',
|
||||
default=None,
|
||||
@ -398,21 +401,34 @@ class FloatingIP(object):
|
||||
def allocate_floating_ip(self, context, project_id, pool=None):
|
||||
"""Gets a floating ip from the pool."""
|
||||
# NOTE(tr3buchet): all network hosts in zone now use the same pool
|
||||
LOG.debug("QUOTA: %s" % quota.allowed_floating_ips(context, 1))
|
||||
if quota.allowed_floating_ips(context, 1) < 1:
|
||||
LOG.warn(_('Quota exceeded for %s, tried to allocate address'),
|
||||
context.project_id)
|
||||
raise exception.QuotaError(code='AddressLimitExceeded')
|
||||
pool = pool or FLAGS.default_floating_pool
|
||||
|
||||
floating_ip = self.db.floating_ip_allocate_address(context,
|
||||
project_id,
|
||||
pool)
|
||||
payload = dict(project_id=project_id, floating_ip=floating_ip)
|
||||
notifier.notify(context,
|
||||
notifier.publisher_id("network"),
|
||||
'network.floating_ip.allocate',
|
||||
notifier.INFO, payload)
|
||||
# Check the quota; can't put this in the API because we get
|
||||
# called into from other places
|
||||
try:
|
||||
reservations = QUOTAS.reserve(context, floating_ips=1)
|
||||
except exception.OverQuota:
|
||||
pid = context.project_id
|
||||
LOG.warn(_("Quota exceeded for %(pid)s, tried to allocate "
|
||||
"floating IP") % locals())
|
||||
raise exception.FloatingIpLimitExceeded()
|
||||
|
||||
try:
|
||||
floating_ip = self.db.floating_ip_allocate_address(context,
|
||||
project_id,
|
||||
pool)
|
||||
payload = dict(project_id=project_id, floating_ip=floating_ip)
|
||||
notifier.notify(context,
|
||||
notifier.publisher_id("network"),
|
||||
'network.floating_ip.allocate',
|
||||
notifier.INFO, payload)
|
||||
|
||||
# Commit the reservations
|
||||
QUOTAS.commit(context, reservations)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
QUOTAS.rollback(context, reservations)
|
||||
|
||||
return floating_ip
|
||||
|
||||
@wrap_check_policy
|
||||
@ -443,8 +459,20 @@ class FloatingIP(object):
|
||||
'network.floating_ip.deallocate',
|
||||
notifier.INFO, payload=payload)
|
||||
|
||||
# Get reservations...
|
||||
try:
|
||||
reservations = QUOTAS.reserve(context, floating_ips=-1)
|
||||
except Exception:
|
||||
reservations = None
|
||||
LOG.exception(_("Failed to update usages deallocating "
|
||||
"floating IP"))
|
||||
|
||||
self.db.floating_ip_deallocate(context, address)
|
||||
|
||||
# Commit the reservations
|
||||
if reservations:
|
||||
QUOTAS.commit(context, reservations)
|
||||
|
||||
@wrap_check_policy
|
||||
def associate_floating_ip(self, context, floating_address, fixed_address,
|
||||
affect_auto_assigned=False):
|
||||
|
@ -60,14 +60,16 @@ class ChanceScheduler(driver.Scheduler):
|
||||
host = self._schedule(context, topic, None, **kwargs)
|
||||
driver.cast_to_host(context, topic, host, method, **kwargs)
|
||||
|
||||
def schedule_run_instance(self, context, request_spec, *_args, **kwargs):
|
||||
def schedule_run_instance(self, context, request_spec, reservations,
|
||||
*_args, **kwargs):
|
||||
"""Create and run an instance or instances"""
|
||||
num_instances = request_spec.get('num_instances', 1)
|
||||
instances = []
|
||||
for num in xrange(num_instances):
|
||||
host = self._schedule(context, 'compute', request_spec, **kwargs)
|
||||
request_spec['instance_properties']['launch_index'] = num
|
||||
instance = self.create_instance_db_entry(context, request_spec)
|
||||
instance = self.create_instance_db_entry(context, request_spec,
|
||||
reservations)
|
||||
driver.cast_to_compute_host(context, host,
|
||||
'run_instance', instance_uuid=instance['uuid'], **kwargs)
|
||||
instances.append(driver.encode_instance(instance))
|
||||
|
@ -159,7 +159,7 @@ class Scheduler(object):
|
||||
for service in services
|
||||
if utils.service_is_up(service)]
|
||||
|
||||
def create_instance_db_entry(self, context, request_spec):
|
||||
def create_instance_db_entry(self, context, request_spec, reservations):
|
||||
"""Create instance DB entry based on request_spec"""
|
||||
base_options = request_spec['instance_properties']
|
||||
if base_options.get('uuid'):
|
||||
@ -172,7 +172,7 @@ class Scheduler(object):
|
||||
|
||||
instance = self.compute_api.create_db_entry_for_new_instance(
|
||||
context, instance_type, image, base_options,
|
||||
security_group, block_device_mapping)
|
||||
security_group, block_device_mapping, reservations)
|
||||
# NOTE(comstud): This needs to be set for the generic exception
|
||||
# checking in scheduler manager, so that it'll set this instance
|
||||
# to ERROR properly.
|
||||
|
@ -51,7 +51,8 @@ class FilterScheduler(driver.Scheduler):
|
||||
msg = _("No host selection for %s defined.") % topic
|
||||
raise exception.NoValidHost(reason=msg)
|
||||
|
||||
def schedule_run_instance(self, context, request_spec, *args, **kwargs):
|
||||
def schedule_run_instance(self, context, request_spec, reservations,
|
||||
*args, **kwargs):
|
||||
"""This method is called from nova.compute.api to provision
|
||||
an instance. We first create a build plan (a list of WeightedHosts)
|
||||
and then provision.
|
||||
@ -86,7 +87,8 @@ class FilterScheduler(driver.Scheduler):
|
||||
|
||||
request_spec['instance_properties']['launch_index'] = num
|
||||
instance = self._provision_resource(elevated, weighted_host,
|
||||
request_spec, kwargs)
|
||||
request_spec, reservations,
|
||||
kwargs)
|
||||
|
||||
if instance:
|
||||
instances.append(instance)
|
||||
@ -118,9 +120,10 @@ class FilterScheduler(driver.Scheduler):
|
||||
'prep_resize', **kwargs)
|
||||
|
||||
def _provision_resource(self, context, weighted_host, request_spec,
|
||||
kwargs):
|
||||
reservations, kwargs):
|
||||
"""Create the requested resource in this Zone."""
|
||||
instance = self.create_instance_db_entry(context, request_spec)
|
||||
instance = self.create_instance_db_entry(context, request_spec,
|
||||
reservations)
|
||||
|
||||
payload = dict(request_spec=request_spec,
|
||||
weighted_host=weighted_host.to_dict(),
|
||||
|
@ -110,18 +110,24 @@ class SchedulerManager(manager.Manager):
|
||||
Sets instance vm_state to ERROR on exceptions
|
||||
"""
|
||||
args = (context,) + args
|
||||
reservations = kwargs.get('reservations', None)
|
||||
try:
|
||||
return self.driver.schedule_run_instance(*args, **kwargs)
|
||||
result = self.driver.schedule_run_instance(*args, **kwargs)
|
||||
return result
|
||||
except exception.NoValidHost as ex:
|
||||
# don't reraise
|
||||
self._set_vm_state_and_notify('run_instance',
|
||||
{'vm_state': vm_states.ERROR},
|
||||
context, ex, *args, **kwargs)
|
||||
if reservations:
|
||||
QUOTAS.rollback(context, reservations)
|
||||
except Exception as ex:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self._set_vm_state_and_notify('run_instance',
|
||||
{'vm_state': vm_states.ERROR},
|
||||
context, ex, *args, **kwargs)
|
||||
if reservations:
|
||||
QUOTAS.rollback(context, reservations)
|
||||
|
||||
def prep_resize(self, context, topic, *args, **kwargs):
|
||||
"""Tries to call schedule_prep_resize on the driver.
|
||||
|
@ -41,14 +41,15 @@ class SchedulerAPI(nova.rpc.proxy.RpcProxy):
|
||||
|
||||
def run_instance(self, ctxt, topic, request_spec, admin_password,
|
||||
injected_files, requested_networks, is_first_time,
|
||||
filter_properties, call=True):
|
||||
filter_properties, reservations, call=True):
|
||||
rpc_method = self.call if call else self.cast
|
||||
return rpc_method(ctxt, self.make_msg('run_instance', topic=topic,
|
||||
request_spec=request_spec, admin_password=admin_password,
|
||||
injected_files=injected_files,
|
||||
requested_networks=requested_networks,
|
||||
is_first_time=is_first_time,
|
||||
filter_properties=filter_properties))
|
||||
filter_properties=filter_properties,
|
||||
reservations=reservations))
|
||||
|
||||
def prep_resize(self, ctxt, topic, instance_uuid, instance_type_id, image,
|
||||
update_db, request_spec, filter_properties):
|
||||
|
@ -91,7 +91,8 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
msg = _("Is the appropriate service running?")
|
||||
raise exception.NoValidHost(reason=msg)
|
||||
|
||||
def schedule_run_instance(self, context, request_spec, *_args, **_kwargs):
|
||||
def schedule_run_instance(self, context, request_spec, reservations,
|
||||
*_args, **_kwargs):
|
||||
num_instances = request_spec.get('num_instances', 1)
|
||||
instances = []
|
||||
for num in xrange(num_instances):
|
||||
@ -99,7 +100,7 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
request_spec['instance_properties'], *_args, **_kwargs)
|
||||
request_spec['instance_properties']['launch_index'] = num
|
||||
instance_ref = self.create_instance_db_entry(context,
|
||||
request_spec)
|
||||
request_spec, reservations)
|
||||
driver.cast_to_compute_host(context, host, 'run_instance',
|
||||
instance_uuid=instance_ref['uuid'], **_kwargs)
|
||||
instances.append(driver.encode_instance(instance_ref))
|
||||
|
@ -22,10 +22,14 @@ from nova.api.openstack.compute.contrib import keypairs
|
||||
from nova.api.openstack import wsgi
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import quota
|
||||
from nova import test
|
||||
from nova.tests.api.openstack import fakes
|
||||
|
||||
|
||||
QUOTAS = quota.QUOTAS
|
||||
|
||||
|
||||
def fake_keypair(name):
|
||||
return {'public_key': 'FAKE_KEY',
|
||||
'fingerprint': 'FAKE_FINGERPRINT',
|
||||
@ -120,11 +124,10 @@ class KeypairsTest(test.TestCase):
|
||||
|
||||
def test_keypair_create_quota_limit(self):
|
||||
|
||||
def db_key_pair_count_by_user_max(self, user_id):
|
||||
def fake_quotas_count(self, context, resource, *args, **kwargs):
|
||||
return 100
|
||||
|
||||
self.stubs.Set(db, "key_pair_count_by_user",
|
||||
db_key_pair_count_by_user_max)
|
||||
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
|
||||
|
||||
req = webob.Request.blank('/v2/fake/os-keypairs')
|
||||
req.method = 'POST'
|
||||
@ -163,11 +166,10 @@ class KeypairsTest(test.TestCase):
|
||||
|
||||
def test_keypair_import_quota_limit(self):
|
||||
|
||||
def db_key_pair_count_by_user_max(self, user_id):
|
||||
def fake_quotas_count(self, context, resource, *args, **kwargs):
|
||||
return 100
|
||||
|
||||
self.stubs.Set(db, "key_pair_count_by_user",
|
||||
db_key_pair_count_by_user_max)
|
||||
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
|
||||
|
||||
body = {
|
||||
'keypair': {
|
||||
@ -191,6 +193,26 @@ class KeypairsTest(test.TestCase):
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 413)
|
||||
|
||||
def test_keypair_create_quota_limit(self):
|
||||
|
||||
def fake_quotas_count(self, context, resource, *args, **kwargs):
|
||||
return 100
|
||||
|
||||
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
|
||||
|
||||
body = {
|
||||
'keypair': {
|
||||
'name': 'create_test',
|
||||
},
|
||||
}
|
||||
|
||||
req = webob.Request.blank('/v2/fake/os-keypairs')
|
||||
req.method = 'POST'
|
||||
req.body = json.dumps(body)
|
||||
req.headers['Content-Type'] = 'application/json'
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 413)
|
||||
|
||||
def test_keypair_create_duplicate(self):
|
||||
self.stubs.Set(db, "key_pair_get", db_key_pair_get)
|
||||
body = {'keypair': {'name': 'create_duplicate'}}
|
||||
|
@ -28,7 +28,7 @@ def quota_set(class_name):
|
||||
'floating_ips': 10, 'instances': 10, 'injected_files': 5,
|
||||
'cores': 20, 'injected_file_content_bytes': 10240,
|
||||
'security_groups': 10, 'security_group_rules': 20,
|
||||
'key_pairs': 100}}
|
||||
'key_pairs': 100, 'injected_file_path_bytes': 255}}
|
||||
|
||||
|
||||
class QuotaClassSetsTest(test.TestCase):
|
||||
@ -47,6 +47,7 @@ class QuotaClassSetsTest(test.TestCase):
|
||||
'metadata_items': 128,
|
||||
'gigabytes': 1000,
|
||||
'injected_files': 5,
|
||||
'injected_file_path_bytes': 255,
|
||||
'injected_file_content_bytes': 10240,
|
||||
'security_groups': 10,
|
||||
'security_group_rules': 20,
|
||||
@ -66,6 +67,7 @@ class QuotaClassSetsTest(test.TestCase):
|
||||
self.assertEqual(qs['floating_ips'], 10)
|
||||
self.assertEqual(qs['metadata_items'], 128)
|
||||
self.assertEqual(qs['injected_files'], 5)
|
||||
self.assertEqual(qs['injected_file_path_bytes'], 255)
|
||||
self.assertEqual(qs['injected_file_content_bytes'], 10240)
|
||||
self.assertEqual(qs['security_groups'], 10)
|
||||
self.assertEqual(qs['security_group_rules'], 20)
|
||||
@ -91,6 +93,7 @@ class QuotaClassSetsTest(test.TestCase):
|
||||
'gigabytes': 1000, 'floating_ips': 10,
|
||||
'metadata_items': 128, 'injected_files': 5,
|
||||
'injected_file_content_bytes': 10240,
|
||||
'injected_file_path_bytes': 255,
|
||||
'security_groups': 10,
|
||||
'security_group_rules': 20,
|
||||
'key_pairs': 100,
|
||||
@ -130,6 +133,7 @@ class QuotaTemplateXMLSerializerTest(test.TestCase):
|
||||
exemplar = dict(quota_class_set=dict(
|
||||
id='test_class',
|
||||
metadata_items=10,
|
||||
injected_file_path_bytes=255,
|
||||
injected_file_content_bytes=20,
|
||||
volumes=30,
|
||||
gigabytes=40,
|
||||
|
@ -30,7 +30,7 @@ def quota_set(id):
|
||||
'instances': 10, 'injected_files': 5, 'cores': 20,
|
||||
'injected_file_content_bytes': 10240,
|
||||
'security_groups': 10, 'security_group_rules': 20,
|
||||
'key_pairs': 100}}
|
||||
'key_pairs': 100, 'injected_file_path_bytes': 255}}
|
||||
|
||||
|
||||
class QuotaSetsTest(test.TestCase):
|
||||
@ -49,6 +49,7 @@ class QuotaSetsTest(test.TestCase):
|
||||
'metadata_items': 128,
|
||||
'gigabytes': 1000,
|
||||
'injected_files': 5,
|
||||
'injected_file_path_bytes': 255,
|
||||
'injected_file_content_bytes': 10240,
|
||||
'security_groups': 10,
|
||||
'security_group_rules': 20,
|
||||
@ -67,6 +68,7 @@ class QuotaSetsTest(test.TestCase):
|
||||
self.assertEqual(qs['floating_ips'], 10)
|
||||
self.assertEqual(qs['metadata_items'], 128)
|
||||
self.assertEqual(qs['injected_files'], 5)
|
||||
self.assertEqual(qs['injected_file_path_bytes'], 255)
|
||||
self.assertEqual(qs['injected_file_content_bytes'], 10240)
|
||||
self.assertEqual(qs['security_groups'], 10)
|
||||
self.assertEqual(qs['security_group_rules'], 20)
|
||||
@ -88,6 +90,7 @@ class QuotaSetsTest(test.TestCase):
|
||||
'floating_ips': 10,
|
||||
'metadata_items': 128,
|
||||
'injected_files': 5,
|
||||
'injected_file_path_bytes': 255,
|
||||
'injected_file_content_bytes': 10240,
|
||||
'security_groups': 10,
|
||||
'security_group_rules': 20,
|
||||
@ -114,6 +117,7 @@ class QuotaSetsTest(test.TestCase):
|
||||
'gigabytes': 1000, 'floating_ips': 10,
|
||||
'metadata_items': 128, 'injected_files': 5,
|
||||
'injected_file_content_bytes': 10240,
|
||||
'injected_file_path_bytes': 255,
|
||||
'security_groups': 10,
|
||||
'security_group_rules': 20,
|
||||
'key_pairs': 100}}
|
||||
@ -161,6 +165,7 @@ class QuotaXMLSerializerTest(test.TestCase):
|
||||
exemplar = dict(quota_set=dict(
|
||||
id='project_id',
|
||||
metadata_items=10,
|
||||
injected_file_path_bytes=255,
|
||||
injected_file_content_bytes=20,
|
||||
volumes=30,
|
||||
gigabytes=40,
|
||||
|
@ -224,7 +224,7 @@ class TestSecurityGroups(test.TestCase):
|
||||
|
||||
def test_create_security_group_quota_limit(self):
|
||||
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
|
||||
for num in range(1, FLAGS.quota_security_groups):
|
||||
for num in range(1, FLAGS.quota_security_groups + 1):
|
||||
name = 'test%s' % num
|
||||
sg = security_group_template(name=name)
|
||||
res_dict = self.controller.create(req, {'security_group': sg})
|
||||
|
@ -57,10 +57,11 @@ class BaseLimitTestSuite(test.TestCase):
|
||||
self.stubs.Set(limits.Limit, "_get_time", self._get_time)
|
||||
self.absolute_limits = {}
|
||||
|
||||
def stub_get_project_quotas(context, project_id):
|
||||
return self.absolute_limits
|
||||
def stub_get_project_quotas(context, project_id, usages=True):
|
||||
return dict((k, dict(limit=v))
|
||||
for k, v in self.absolute_limits.items())
|
||||
|
||||
self.stubs.Set(nova.quota, "get_project_quotas",
|
||||
self.stubs.Set(nova.quota.QUOTAS, "get_project_quotas",
|
||||
stub_get_project_quotas)
|
||||
|
||||
def _get_time(self):
|
||||
|
@ -1287,6 +1287,7 @@ class ServersControllerTest(test.TestCase):
|
||||
self.assertEqual(s['name'], 'server%d' % (i + 1))
|
||||
|
||||
def test_delete_server_instance(self):
|
||||
fakes.stub_out_instance_quota(self.stubs, 0)
|
||||
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s' % FAKE_UUID)
|
||||
req.method = 'DELETE'
|
||||
|
||||
@ -1304,6 +1305,7 @@ class ServersControllerTest(test.TestCase):
|
||||
self.assertEqual(self.server_delete_called, True)
|
||||
|
||||
def test_delete_server_instance_while_building(self):
|
||||
fakes.stub_out_instance_quota(self.stubs, 0)
|
||||
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s' % FAKE_UUID)
|
||||
req.method = 'DELETE'
|
||||
|
||||
@ -2338,7 +2340,7 @@ class ServersControllerCreateTest(test.TestCase):
|
||||
req.headers["content-type"] = "application/json"
|
||||
try:
|
||||
server = self.controller.create(req, body).obj['server']
|
||||
fail('excepted quota to be exceeded')
|
||||
self.fail('expected quota to be exceeded')
|
||||
except webob.exc.HTTPRequestEntityTooLarge as e:
|
||||
self.assertEquals(e.explanation,
|
||||
_('Quota exceeded: already used 1 of 1 instances'))
|
||||
|
@ -17,13 +17,12 @@
|
||||
|
||||
import datetime
|
||||
|
||||
from glance import client as glance_client
|
||||
import routes
|
||||
import webob
|
||||
import webob.dec
|
||||
import webob.request
|
||||
|
||||
from glance import client as glance_client
|
||||
|
||||
from nova.api import auth as api_auth
|
||||
from nova.api import openstack as openstack_api
|
||||
from nova.api.openstack import auth
|
||||
@ -40,12 +39,16 @@ from nova.db.sqlalchemy import models
|
||||
from nova import exception as exc
|
||||
import nova.image.fake
|
||||
from nova.openstack.common import jsonutils
|
||||
from nova import quota
|
||||
from nova.tests import fake_network
|
||||
from nova.tests.glance import stubs as glance_stubs
|
||||
from nova import utils
|
||||
from nova import wsgi
|
||||
|
||||
|
||||
QUOTAS = quota.QUOTAS
|
||||
|
||||
|
||||
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
|
||||
FAKE_UUIDS = {}
|
||||
|
||||
@ -142,9 +145,19 @@ def stub_out_rate_limiting(stubs):
|
||||
|
||||
|
||||
def stub_out_instance_quota(stubs, allowed):
|
||||
def fake_allowed_instances(context, max_count, instance_type):
|
||||
return allowed
|
||||
stubs.Set(nova.quota, 'allowed_instances', fake_allowed_instances)
|
||||
def fake_reserve(context, **deltas):
|
||||
instances = deltas.pop('instances', 0)
|
||||
if instances > allowed:
|
||||
raise exc.OverQuota(overs=['instances'], quotas=dict(
|
||||
instances=allowed,
|
||||
cores=10000,
|
||||
ram=10000 * 1024,
|
||||
), usages=dict(
|
||||
instances=dict(in_use=0, reserved=0),
|
||||
cores=dict(in_use=0, reserved=0),
|
||||
ram=dict(in_use=0, reserved=0),
|
||||
))
|
||||
stubs.Set(QUOTAS, 'reserve', fake_reserve)
|
||||
|
||||
|
||||
def stub_out_networking(stubs):
|
||||
|
@ -45,6 +45,7 @@ from nova import log as logging
|
||||
from nova.notifier import test_notifier
|
||||
from nova.openstack.common import importutils
|
||||
import nova.policy
|
||||
from nova import quota
|
||||
from nova import rpc
|
||||
from nova.rpc import common as rpc_common
|
||||
from nova.scheduler import driver as scheduler_driver
|
||||
@ -54,6 +55,7 @@ from nova import utils
|
||||
import nova.volume
|
||||
|
||||
|
||||
QUOTAS = quota.QUOTAS
|
||||
LOG = logging.getLogger(__name__)
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DECLARE('stub_network', 'nova.compute.manager')
|
||||
@ -71,13 +73,14 @@ def rpc_call_wrapper(context, topic, msg, do_cast=True):
|
||||
if (topic == FLAGS.scheduler_topic and
|
||||
msg['method'] == 'run_instance'):
|
||||
request_spec = msg['args']['request_spec']
|
||||
reservations = msg['args'].get('reservations')
|
||||
scheduler = scheduler_driver.Scheduler
|
||||
num_instances = request_spec.get('num_instances', 1)
|
||||
instances = []
|
||||
for num in xrange(num_instances):
|
||||
request_spec['instance_properties']['launch_index'] = num
|
||||
instance = scheduler().create_instance_db_entry(
|
||||
context, request_spec)
|
||||
context, request_spec, reservations)
|
||||
encoded = scheduler_driver.encode_instance(instance)
|
||||
instances.append(encoded)
|
||||
return instances
|
||||
@ -148,6 +151,7 @@ class BaseTestCase(test.TestCase):
|
||||
inst['instance_type_id'] = type_id
|
||||
inst['ami_launch_index'] = 0
|
||||
inst['memory_mb'] = 0
|
||||
inst['vcpus'] = 0
|
||||
inst['root_gb'] = 0
|
||||
inst['ephemeral_gb'] = 0
|
||||
inst.update(params)
|
||||
@ -4123,10 +4127,9 @@ class KeypairAPITestCase(BaseTestCase):
|
||||
self.ctxt, self.ctxt.user_id, 'foo')
|
||||
|
||||
def test_create_keypair_quota_limit(self):
|
||||
def db_key_pair_count_by_user_max(self, user_id):
|
||||
def fake_quotas_count(self, context, resource, *args, **kwargs):
|
||||
return FLAGS.quota_key_pairs
|
||||
self.stubs.Set(db, "key_pair_count_by_user",
|
||||
db_key_pair_count_by_user_max)
|
||||
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
|
||||
self.assertRaises(exception.KeypairLimitExceeded,
|
||||
self.keypair_api.create_key_pair,
|
||||
self.ctxt, self.ctxt.user_id, 'foo')
|
||||
@ -4158,10 +4161,9 @@ class KeypairAPITestCase(BaseTestCase):
|
||||
'* BAD CHARACTERS! *', self.pub_key)
|
||||
|
||||
def test_import_keypair_quota_limit(self):
|
||||
def db_key_pair_count_by_user_max(self, user_id):
|
||||
def fake_quotas_count(self, context, resource, *args, **kwargs):
|
||||
return FLAGS.quota_key_pairs
|
||||
self.stubs.Set(db, "key_pair_count_by_user",
|
||||
db_key_pair_count_by_user_max)
|
||||
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
|
||||
self.assertRaises(exception.KeypairLimitExceeded,
|
||||
self.keypair_api.import_key_pair,
|
||||
self.ctxt, self.ctxt.user_id, 'foo', self.pub_key)
|
||||
|
@ -551,26 +551,12 @@ class VlanNetworkTestCase(test.TestCase):
|
||||
ctxt = context.RequestContext('testuser', 'testproject',
|
||||
is_admin=False)
|
||||
|
||||
def fake1(*args, **kwargs):
|
||||
def fake_allocate_address(*args, **kwargs):
|
||||
return {'address': '10.0.0.1', 'project_id': ctxt.project_id}
|
||||
|
||||
def fake2(*args, **kwargs):
|
||||
return 25
|
||||
self.stubs.Set(self.network.db, 'floating_ip_allocate_address',
|
||||
fake_allocate_address)
|
||||
|
||||
def fake3(*args, **kwargs):
|
||||
return 0
|
||||
|
||||
self.stubs.Set(self.network.db, 'floating_ip_allocate_address', fake1)
|
||||
|
||||
# this time should raise
|
||||
self.stubs.Set(self.network.db, 'floating_ip_count_by_project', fake2)
|
||||
self.assertRaises(exception.QuotaError,
|
||||
self.network.allocate_floating_ip,
|
||||
ctxt,
|
||||
ctxt.project_id)
|
||||
|
||||
# this time should not
|
||||
self.stubs.Set(self.network.db, 'floating_ip_count_by_project', fake3)
|
||||
self.network.allocate_floating_ip(ctxt, ctxt.project_id)
|
||||
|
||||
def test_deallocate_floating_ip(self):
|
||||
|
@ -71,13 +71,14 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
instance2 = {'uuid': 'fake-uuid2'}
|
||||
instance1_encoded = {'uuid': 'fake-uuid1', '_is_precooked': False}
|
||||
instance2_encoded = {'uuid': 'fake-uuid2', '_is_precooked': False}
|
||||
reservations = ['resv1', 'resv2']
|
||||
|
||||
# create_instance_db_entry() usually does this, but we're
|
||||
# stubbing it.
|
||||
def _add_uuid1(ctxt, request_spec):
|
||||
def _add_uuid1(ctxt, request_spec, reservations):
|
||||
request_spec['instance_properties']['uuid'] = 'fake-uuid1'
|
||||
|
||||
def _add_uuid2(ctxt, request_spec):
|
||||
def _add_uuid2(ctxt, request_spec, reservations):
|
||||
request_spec['instance_properties']['uuid'] = 'fake-uuid2'
|
||||
|
||||
self.mox.StubOutWithMock(ctxt, 'elevated')
|
||||
@ -92,8 +93,8 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(
|
||||
['host1', 'host2', 'host3', 'host4'])
|
||||
random.random().AndReturn(.5)
|
||||
self.driver.create_instance_db_entry(ctxt,
|
||||
request_spec).WithSideEffects(_add_uuid1).AndReturn(
|
||||
self.driver.create_instance_db_entry(ctxt, request_spec,
|
||||
reservations).WithSideEffects(_add_uuid1).AndReturn(
|
||||
instance1)
|
||||
driver.cast_to_compute_host(ctxt, 'host3', 'run_instance',
|
||||
instance_uuid=instance1['uuid'], **fake_kwargs)
|
||||
@ -103,8 +104,8 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(
|
||||
['host1', 'host2', 'host3', 'host4'])
|
||||
random.random().AndReturn(.2)
|
||||
self.driver.create_instance_db_entry(ctxt,
|
||||
request_spec).WithSideEffects(_add_uuid2).AndReturn(
|
||||
self.driver.create_instance_db_entry(ctxt, request_spec,
|
||||
reservations).WithSideEffects(_add_uuid2).AndReturn(
|
||||
instance2)
|
||||
driver.cast_to_compute_host(ctxt, 'host1', 'run_instance',
|
||||
instance_uuid=instance2['uuid'], **fake_kwargs)
|
||||
@ -112,7 +113,7 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
|
||||
self.mox.ReplayAll()
|
||||
result = self.driver.schedule_run_instance(ctxt, request_spec,
|
||||
*fake_args, **fake_kwargs)
|
||||
reservations, *fake_args, **fake_kwargs)
|
||||
expected = [instance1_encoded, instance2_encoded]
|
||||
self.assertEqual(result, expected)
|
||||
|
||||
@ -128,7 +129,7 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
# stubbing it.
|
||||
def _add_uuid(num):
|
||||
"""Return a function that adds the provided uuid number."""
|
||||
def _add_uuid_num(_, spec):
|
||||
def _add_uuid_num(_, spec, reservations):
|
||||
spec['instance_properties']['uuid'] = 'fake-uuid%d' % num
|
||||
return _add_uuid_num
|
||||
|
||||
@ -150,7 +151,7 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
# instance 1
|
||||
self.driver._schedule(ctxt, 'compute', request_spec).AndReturn('host')
|
||||
self.driver.create_instance_db_entry(
|
||||
ctxt, mox.Func(_has_launch_index(0))
|
||||
ctxt, mox.Func(_has_launch_index(0)), None
|
||||
).WithSideEffects(_add_uuid(1)).AndReturn(instance1)
|
||||
driver.cast_to_compute_host(ctxt, 'host', 'run_instance',
|
||||
instance_uuid=instance1['uuid'])
|
||||
@ -158,14 +159,14 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
# instance 2
|
||||
self.driver._schedule(ctxt, 'compute', request_spec).AndReturn('host')
|
||||
self.driver.create_instance_db_entry(
|
||||
ctxt, mox.Func(_has_launch_index(1))
|
||||
ctxt, mox.Func(_has_launch_index(1)), None
|
||||
).WithSideEffects(_add_uuid(2)).AndReturn(instance2)
|
||||
driver.cast_to_compute_host(ctxt, 'host', 'run_instance',
|
||||
instance_uuid=instance2['uuid'])
|
||||
driver.encode_instance(instance2).AndReturn(instance2)
|
||||
self.mox.ReplayAll()
|
||||
|
||||
self.driver.schedule_run_instance(ctxt, request_spec)
|
||||
self.driver.schedule_run_instance(ctxt, request_spec, None)
|
||||
|
||||
def test_basic_schedule_run_instance_no_hosts(self):
|
||||
ctxt = context.RequestContext('fake', 'fake', False)
|
||||
|
@ -51,7 +51,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
'ephemeral_gb': 0},
|
||||
'instance_properties': {'project_id': 1}}
|
||||
self.assertRaises(exception.NoValidHost, sched.schedule_run_instance,
|
||||
fake_context, request_spec)
|
||||
fake_context, request_spec, None)
|
||||
|
||||
def test_run_instance_non_admin(self):
|
||||
"""Test creating an instance locally using run_instance, passing
|
||||
@ -72,7 +72,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
request_spec = {'instance_type': {'memory_mb': 1, 'local_gb': 1},
|
||||
'instance_properties': {'project_id': 1}}
|
||||
self.assertRaises(exception.NoValidHost, sched.schedule_run_instance,
|
||||
fake_context, request_spec)
|
||||
fake_context, request_spec, None)
|
||||
self.assertTrue(self.was_admin)
|
||||
|
||||
def test_schedule_bad_topic(self):
|
||||
@ -117,14 +117,16 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
# instance 1
|
||||
self.driver._provision_resource(
|
||||
ctxt, 'host1',
|
||||
mox.Func(_has_launch_index(0)), fake_kwargs).AndReturn(instance1)
|
||||
mox.Func(_has_launch_index(0)), None,
|
||||
fake_kwargs).AndReturn(instance1)
|
||||
# instance 2
|
||||
self.driver._provision_resource(
|
||||
ctxt, 'host2',
|
||||
mox.Func(_has_launch_index(1)), fake_kwargs).AndReturn(instance2)
|
||||
mox.Func(_has_launch_index(1)), None,
|
||||
fake_kwargs).AndReturn(instance2)
|
||||
self.mox.ReplayAll()
|
||||
|
||||
self.driver.schedule_run_instance(context_fake, request_spec,
|
||||
self.driver.schedule_run_instance(context_fake, request_spec, None,
|
||||
**fake_kwargs)
|
||||
|
||||
def test_schedule_happy_day(self):
|
||||
|
@ -68,14 +68,16 @@ class SchedulerRpcAPITestCase(test.TestCase):
|
||||
topic='fake_topic', request_spec='fake_request_spec',
|
||||
admin_password='pw', injected_files='fake_injected_files',
|
||||
requested_networks='fake_requested_networks',
|
||||
is_first_time=True, filter_properties='fake_filter_properties')
|
||||
is_first_time=True, filter_properties='fake_filter_properties',
|
||||
reservations=None)
|
||||
|
||||
def test_run_instance_cast(self):
|
||||
self._test_scheduler_api('run_instance', rpc_method='cast',
|
||||
topic='fake_topic', request_spec='fake_request_spec',
|
||||
admin_password='pw', injected_files='fake_injected_files',
|
||||
requested_networks='fake_requested_networks',
|
||||
is_first_time=True, filter_properties='fake_filter_properties')
|
||||
is_first_time=True, filter_properties='fake_filter_properties',
|
||||
reservations=None)
|
||||
|
||||
def test_prep_resize(self):
|
||||
self._test_scheduler_api('prep_resize', rpc_method='cast',
|
||||
|
@ -389,10 +389,10 @@ class SchedulerTestCase(test.TestCase):
|
||||
self.driver.compute_api.create_db_entry_for_new_instance(
|
||||
self.context, instance_type, image, base_options,
|
||||
security_group,
|
||||
block_device_mapping).AndReturn(fake_instance)
|
||||
block_device_mapping, None).AndReturn(fake_instance)
|
||||
self.mox.ReplayAll()
|
||||
instance = self.driver.create_instance_db_entry(self.context,
|
||||
request_spec)
|
||||
request_spec, None)
|
||||
self.mox.VerifyAll()
|
||||
self.assertEqual(instance, fake_instance)
|
||||
|
||||
@ -407,7 +407,7 @@ class SchedulerTestCase(test.TestCase):
|
||||
|
||||
self.mox.ReplayAll()
|
||||
instance = self.driver.create_instance_db_entry(self.context,
|
||||
request_spec)
|
||||
request_spec, None)
|
||||
self.assertEqual(instance, fake_instance)
|
||||
|
||||
def _live_migration_instance(self):
|
||||
|
@ -297,7 +297,8 @@ class OldQuotaTestCase(test.TestCase):
|
||||
scheduler = scheduler_driver.Scheduler
|
||||
instance = scheduler().create_instance_db_entry(
|
||||
context,
|
||||
msg['args']['request_spec'])
|
||||
msg['args']['request_spec'],
|
||||
None)
|
||||
return [scheduler_driver.encode_instance(instance)]
|
||||
else:
|
||||
return orig_rpc_call(context, topic, msg)
|
||||
@ -1840,7 +1841,10 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
|
||||
def sync(context, project_id, session):
|
||||
self.sync_called.add(res_name)
|
||||
if res_name in self.usages:
|
||||
return {res_name: self.usages[res_name].in_use - 1}
|
||||
if self.usages[res_name].in_use < 0:
|
||||
return {res_name: 2}
|
||||
else:
|
||||
return {res_name: self.usages[res_name].in_use - 1}
|
||||
return {res_name: 0}
|
||||
return sync
|
||||
|
||||
@ -2008,6 +2012,57 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
|
||||
delta=2 * 1024),
|
||||
])
|
||||
|
||||
def test_quota_reserve_negative_in_use(self):
|
||||
self.init_usage('test_project', 'instances', -1, 0, until_refresh=1)
|
||||
self.init_usage('test_project', 'cores', -1, 0, until_refresh=1)
|
||||
self.init_usage('test_project', 'ram', -1, 0, until_refresh=1)
|
||||
context = FakeContext('test_project', 'test_class')
|
||||
quotas = dict(
|
||||
instances=5,
|
||||
cores=10,
|
||||
ram=10 * 1024,
|
||||
)
|
||||
deltas = dict(
|
||||
instances=2,
|
||||
cores=4,
|
||||
ram=2 * 1024,
|
||||
)
|
||||
result = sqa_api.quota_reserve(context, self.resources, quotas,
|
||||
deltas, self.expire, 5, 0)
|
||||
|
||||
self.assertEqual(self.sync_called, set(['instances', 'cores', 'ram']))
|
||||
self.compare_usage(self.usages, [
|
||||
dict(resource='instances',
|
||||
project_id='test_project',
|
||||
in_use=2,
|
||||
reserved=2,
|
||||
until_refresh=5),
|
||||
dict(resource='cores',
|
||||
project_id='test_project',
|
||||
in_use=2,
|
||||
reserved=4,
|
||||
until_refresh=5),
|
||||
dict(resource='ram',
|
||||
project_id='test_project',
|
||||
in_use=2,
|
||||
reserved=2 * 1024,
|
||||
until_refresh=5),
|
||||
])
|
||||
self.assertEqual(self.usages_created, {})
|
||||
self.compare_reservation(result, [
|
||||
dict(resource='instances',
|
||||
usage_id=self.usages['instances'],
|
||||
project_id='test_project',
|
||||
delta=2),
|
||||
dict(resource='cores',
|
||||
usage_id=self.usages['cores'],
|
||||
project_id='test_project',
|
||||
delta=4),
|
||||
dict(resource='ram',
|
||||
usage_id=self.usages['ram'],
|
||||
delta=2 * 1024),
|
||||
])
|
||||
|
||||
def test_quota_reserve_until_refresh(self):
|
||||
self.init_usage('test_project', 'instances', 3, 0, until_refresh=1)
|
||||
self.init_usage('test_project', 'cores', 3, 0, until_refresh=1)
|
||||
@ -2181,10 +2236,8 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
|
||||
cores=-4,
|
||||
ram=-2 * 1024,
|
||||
)
|
||||
self.assertRaises(exception.InvalidQuotaValue,
|
||||
sqa_api.quota_reserve,
|
||||
context, self.resources, quotas,
|
||||
deltas, self.expire, 0, 0)
|
||||
result = sqa_api.quota_reserve(context, self.resources, quotas,
|
||||
deltas, self.expire, 0, 0)
|
||||
|
||||
self.assertEqual(self.sync_called, set([]))
|
||||
self.compare_usage(self.usages, [
|
||||
@ -2205,7 +2258,19 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
|
||||
until_refresh=None),
|
||||
])
|
||||
self.assertEqual(self.usages_created, {})
|
||||
self.assertEqual(self.reservations_created, {})
|
||||
self.compare_reservation(result, [
|
||||
dict(resource='instances',
|
||||
usage_id=self.usages['instances'],
|
||||
project_id='test_project',
|
||||
delta=-2),
|
||||
dict(resource='cores',
|
||||
usage_id=self.usages['cores'],
|
||||
project_id='test_project',
|
||||
delta=-4),
|
||||
dict(resource='ram',
|
||||
usage_id=self.usages['ram'],
|
||||
delta=-2 * 1024),
|
||||
])
|
||||
|
||||
def test_quota_reserve_overs(self):
|
||||
self.init_usage('test_project', 'instances', 4, 0)
|
||||
|
@ -32,10 +32,12 @@ from nova import log as logging
|
||||
from nova.notifier import test_notifier
|
||||
from nova.openstack.common import importutils
|
||||
import nova.policy
|
||||
from nova import quota
|
||||
from nova import rpc
|
||||
from nova import test
|
||||
import nova.volume.api
|
||||
|
||||
QUOTAS = quota.QUOTAS
|
||||
FLAGS = flags.FLAGS
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -90,6 +92,20 @@ class VolumeTestCase(test.TestCase):
|
||||
|
||||
def test_create_delete_volume(self):
|
||||
"""Test volume can be created and deleted."""
|
||||
# Need to stub out reserve, commit, and rollback
|
||||
def fake_reserve(context, expire=None, **deltas):
|
||||
return ["RESERVATION"]
|
||||
|
||||
def fake_commit(context, reservations):
|
||||
pass
|
||||
|
||||
def fake_rollback(context, reservations):
|
||||
pass
|
||||
|
||||
self.stubs.Set(QUOTAS, "reserve", fake_reserve)
|
||||
self.stubs.Set(QUOTAS, "commit", fake_commit)
|
||||
self.stubs.Set(QUOTAS, "rollback", fake_rollback)
|
||||
|
||||
volume = self._create_volume()
|
||||
volume_id = volume['id']
|
||||
self.assertEquals(len(test_notifier.NOTIFICATIONS), 0)
|
||||
|
@ -38,6 +38,8 @@ flags.DECLARE('storage_availability_zone', 'nova.volume.manager')
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
QUOTAS = quota.QUOTAS
|
||||
|
||||
|
||||
def wrap_check_policy(func):
|
||||
"""Check policy corresponding to the wrapped methods prior to execution
|
||||
@ -80,7 +82,9 @@ class API(base.Base):
|
||||
else:
|
||||
snapshot_id = None
|
||||
|
||||
if quota.allowed_volumes(context, 1, size) < 1:
|
||||
try:
|
||||
reservations = QUOTAS.reserve(context, volumes=1, gigabytes=size)
|
||||
except exception.OverQuota:
|
||||
pid = context.project_id
|
||||
LOG.warn(_("Quota exceeded for %(pid)s, tried to create"
|
||||
" %(size)sG volume") % locals())
|
||||
@ -114,7 +118,8 @@ class API(base.Base):
|
||||
{"method": "create_volume",
|
||||
"args": {"topic": FLAGS.volume_topic,
|
||||
"volume_id": volume['id'],
|
||||
"snapshot_id": snapshot_id}})
|
||||
"snapshot_id": snapshot_id,
|
||||
"reservations": reservations}})
|
||||
return volume
|
||||
|
||||
# TODO(yamahata): eliminate dumb polling
|
||||
|
@ -46,6 +46,7 @@ from nova import manager
|
||||
from nova.openstack.common import cfg
|
||||
from nova.openstack.common import excutils
|
||||
from nova.openstack.common import importutils
|
||||
from nova import quota
|
||||
from nova import rpc
|
||||
from nova import utils
|
||||
from nova.volume import utils as volume_utils
|
||||
@ -54,6 +55,8 @@ from nova.volume import volume_types
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
QUOTAS = quota.QUOTAS
|
||||
|
||||
volume_manager_opts = [
|
||||
cfg.StrOpt('storage_availability_zone',
|
||||
default='nova',
|
||||
@ -103,7 +106,8 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||
else:
|
||||
LOG.info(_("volume %s: skipping export"), volume['name'])
|
||||
|
||||
def create_volume(self, context, volume_id, snapshot_id=None):
|
||||
def create_volume(self, context, volume_id, snapshot_id=None,
|
||||
reservations=None):
|
||||
"""Creates and exports the volume."""
|
||||
context = context.elevated()
|
||||
volume_ref = self.db.volume_get(context, volume_id)
|
||||
@ -136,8 +140,14 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||
model_update = self.driver.create_export(context, volume_ref)
|
||||
if model_update:
|
||||
self.db.volume_update(context, volume_ref['id'], model_update)
|
||||
|
||||
# Commit the reservation
|
||||
if reservations:
|
||||
QUOTAS.commit(context, reservations)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
if reservations:
|
||||
QUOTAS.rollback(context, reservations)
|
||||
self.db.volume_update(context,
|
||||
volume_ref['id'], {'status': 'error'})
|
||||
|
||||
@ -179,9 +189,22 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||
volume_ref['id'],
|
||||
{'status': 'error_deleting'})
|
||||
|
||||
# Get reservations
|
||||
try:
|
||||
reservations = QUOTAS.reserve(context, volumes=-1,
|
||||
gigabytes=-volume_ref['size'])
|
||||
except Exception:
|
||||
reservations = None
|
||||
LOG.exception(_("Failed to update usages deleting volume"))
|
||||
|
||||
self.db.volume_destroy(context, volume_id)
|
||||
LOG.debug(_("volume %s: deleted successfully"), volume_ref['name'])
|
||||
self._notify_about_volume_usage(context, volume_ref, "delete.end")
|
||||
|
||||
# Commit the reservations
|
||||
if reservations:
|
||||
QUOTAS.commit(context, reservations)
|
||||
|
||||
return True
|
||||
|
||||
def create_snapshot(self, context, volume_id, snapshot_id):
|
||||
|
Loading…
Reference in New Issue
Block a user