Finish quota refactor.

Finishes quota refactoring by making use of the new quota infrastructure.
Partially implements blueprint quota-refactor (the final piece is to
remove the old quota architecture).

This change is fairly substantial.  To make it easier to review, it has been
broken up into 3 parts.  This is the second part.

Change-Id: I1c8b43198f0d44e9e13a45575361aa043fd0639e
This commit is contained in:
Kevin L. Mitchell 2012-05-11 15:30:14 -05:00
parent 7e15d4e28f
commit b7f0946bbd
34 changed files with 527 additions and 176 deletions

View File

@ -105,6 +105,8 @@ flags.DECLARE('vpn_start', 'nova.network.manager')
flags.DECLARE('default_floating_pool', 'nova.network.manager') flags.DECLARE('default_floating_pool', 'nova.network.manager')
flags.DECLARE('public_interface', 'nova.network.linux_net') flags.DECLARE('public_interface', 'nova.network.linux_net')
QUOTAS = quota.QUOTAS
# Decorators for actions # Decorators for actions
def args(*args, **kwargs): def args(*args, **kwargs):
@ -493,11 +495,11 @@ class ProjectCommands(object):
db.quota_update(ctxt, project_id, key, value) db.quota_update(ctxt, project_id, key, value)
except exception.ProjectQuotaNotFound: except exception.ProjectQuotaNotFound:
db.quota_create(ctxt, project_id, key, value) db.quota_create(ctxt, project_id, key, value)
project_quota = quota.get_project_quotas(ctxt, project_id) project_quota = QUOTAS.get_project_quotas(ctxt, project_id)
for key, value in project_quota.iteritems(): for key, value in project_quota.iteritems():
if value is None: if value['limit'] < 0 or value['limit'] is None:
value = 'unlimited' value['limit'] = 'unlimited'
print '%s: %s' % (key, value) print '%s: %s' % (key, value['limit'])
@args('--project', dest="project_id", metavar='<Project name>', @args('--project', dest="project_id", metavar='<Project name>',
help='Project name') help='Project name')

View File

@ -40,6 +40,7 @@ from nova import flags
from nova.image import s3 from nova.image import s3
from nova import log as logging from nova import log as logging
from nova import network from nova import network
from nova.openstack.common import excutils
from nova.openstack.common import importutils from nova.openstack.common import importutils
from nova import quota from nova import quota
from nova import utils from nova import utils
@ -50,6 +51,8 @@ FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
def validate_ec2_id(val): def validate_ec2_id(val):
if not validator.validate_str()(val): if not validator.validate_str()(val):
@ -713,10 +716,11 @@ class CloudController(object):
raise exception.EC2APIError(err % values_for_rule) raise exception.EC2APIError(err % values_for_rule)
postvalues.append(values_for_rule) postvalues.append(values_for_rule)
allowed = quota.allowed_security_group_rules(context, count = QUOTAS.count(context, 'security_group_rules',
security_group['id'], security_group['id'])
1) try:
if allowed < 1: QUOTAS.limit_check(context, security_group_rules=count + 1)
except exception.OverQuota:
msg = _("Quota exceeded, too many security group rules.") msg = _("Quota exceeded, too many security group rules.")
raise exception.EC2APIError(msg) raise exception.EC2APIError(msg)
@ -777,17 +781,26 @@ class CloudController(object):
msg = _('group %s already exists') msg = _('group %s already exists')
raise exception.EC2APIError(msg % group_name) raise exception.EC2APIError(msg % group_name)
if quota.allowed_security_groups(context, 1) < 1: try:
reservations = QUOTAS.reserve(context, security_groups=1)
except exception.OverQuota:
msg = _("Quota exceeded, too many security groups.") msg = _("Quota exceeded, too many security groups.")
raise exception.EC2APIError(msg) raise exception.EC2APIError(msg)
group = {'user_id': context.user_id, try:
'project_id': context.project_id, group = {'user_id': context.user_id,
'name': group_name, 'project_id': context.project_id,
'description': group_description} 'name': group_name,
group_ref = db.security_group_create(context, group) 'description': group_description}
group_ref = db.security_group_create(context, group)
self.sgh.trigger_security_group_create_refresh(context, group) self.sgh.trigger_security_group_create_refresh(context, group)
# Commit the reservation
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, reservations)
return {'securityGroupSet': [self._format_security_group(context, return {'securityGroupSet': [self._format_security_group(context,
group_ref)]} group_ref)]}
@ -810,11 +823,25 @@ class CloudController(object):
raise notfound(security_group_id=group_id) raise notfound(security_group_id=group_id)
if db.security_group_in_use(context, security_group.id): if db.security_group_in_use(context, security_group.id):
raise exception.InvalidGroup(reason="In Use") raise exception.InvalidGroup(reason="In Use")
# Get reservations
try:
reservations = QUOTAS.reserve(context, security_groups=-1)
except Exception:
reservations = None
LOG.exception(_("Failed to update usages deallocating "
"security group"))
LOG.audit(_("Delete security group %s"), group_name, context=context) LOG.audit(_("Delete security group %s"), group_name, context=context)
db.security_group_destroy(context, security_group.id) db.security_group_destroy(context, security_group.id)
self.sgh.trigger_security_group_destroy_refresh(context, self.sgh.trigger_security_group_destroy_refresh(context,
security_group.id) security_group.id)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations)
return True return True
def get_console_output(self, context, instance_id, **kwargs): def get_console_output(self, context, instance_id, **kwargs):

View File

@ -27,6 +27,7 @@ from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil from nova.api.openstack import xmlutil
from nova.compute import task_states from nova.compute import task_states
from nova.compute import vm_states from nova.compute import vm_states
from nova import exception
from nova import flags from nova import flags
from nova import log as logging from nova import log as logging
from nova.network import model as network_model from nova.network import model as network_model
@ -35,6 +36,7 @@ from nova import quota
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
QUOTAS = quota.QUOTAS
XML_NS_V11 = 'http://docs.openstack.org/compute/api/v1.1' XML_NS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
@ -272,9 +274,9 @@ def get_version_from_href(href):
def check_img_metadata_properties_quota(context, metadata): def check_img_metadata_properties_quota(context, metadata):
if metadata is None: if metadata is None:
return return
num_metadata = len(metadata) try:
quota_metadata = quota.allowed_metadata_items(context, num_metadata) QUOTAS.limit_check(context, metadata_items=len(metadata))
if quota_metadata < num_metadata: except exception.OverQuota:
expl = _("Image metadata limit exceeded") expl = _("Image metadata limit exceeded")
raise webob.exc.HTTPRequestEntityTooLarge(explanation=expl, raise webob.exc.HTTPRequestEntityTooLarge(explanation=expl,
headers={'Retry-After': 0}) headers={'Retry-After': 0})

View File

@ -23,6 +23,9 @@ from nova import exception
from nova import quota from nova import quota
QUOTAS = quota.QUOTAS
authorize = extensions.extension_authorizer('compute', 'quota_classes') authorize = extensions.extension_authorizer('compute', 'quota_classes')
@ -32,7 +35,7 @@ class QuotaClassTemplate(xmlutil.TemplateBuilder):
selector='quota_class_set') selector='quota_class_set')
root.set('id') root.set('id')
for resource in quota.quota_resources: for resource in QUOTAS.resources:
elem = xmlutil.SubTemplateElement(root, resource) elem = xmlutil.SubTemplateElement(root, resource)
elem.text = resource elem.text = resource
@ -46,7 +49,7 @@ class QuotaClassSetsController(object):
result = dict(id=str(quota_class)) result = dict(id=str(quota_class))
for resource in quota.quota_resources: for resource in QUOTAS.resources:
result[resource] = quota_set[resource] result[resource] = quota_set[resource]
return dict(quota_class_set=result) return dict(quota_class_set=result)
@ -58,7 +61,7 @@ class QuotaClassSetsController(object):
try: try:
db.sqlalchemy.api.authorize_quota_class_context(context, id) db.sqlalchemy.api.authorize_quota_class_context(context, id)
return self._format_quota_set(id, return self._format_quota_set(id,
quota.get_class_quotas(context, id)) QUOTAS.get_class_quotas(context, id))
except exception.NotAuthorized: except exception.NotAuthorized:
raise webob.exc.HTTPForbidden() raise webob.exc.HTTPForbidden()
@ -68,7 +71,7 @@ class QuotaClassSetsController(object):
authorize(context) authorize(context)
quota_class = id quota_class = id
for key in body['quota_class_set'].keys(): for key in body['quota_class_set'].keys():
if key in quota.quota_resources: if key in QUOTAS:
value = int(body['quota_class_set'][key]) value = int(body['quota_class_set'][key])
try: try:
db.quota_class_update(context, quota_class, key, value) db.quota_class_update(context, quota_class, key, value)
@ -76,8 +79,8 @@ class QuotaClassSetsController(object):
db.quota_class_create(context, quota_class, key, value) db.quota_class_create(context, quota_class, key, value)
except exception.AdminRequired: except exception.AdminRequired:
raise webob.exc.HTTPForbidden() raise webob.exc.HTTPForbidden()
return {'quota_class_set': quota.get_class_quotas(context, return {'quota_class_set': QUOTAS.get_class_quotas(context,
quota_class)} quota_class)}
class Quota_classes(extensions.ExtensionDescriptor): class Quota_classes(extensions.ExtensionDescriptor):

View File

@ -26,6 +26,9 @@ from nova import exception
from nova import quota from nova import quota
QUOTAS = quota.QUOTAS
authorize = extensions.extension_authorizer('compute', 'quotas') authorize = extensions.extension_authorizer('compute', 'quotas')
@ -34,7 +37,7 @@ class QuotaTemplate(xmlutil.TemplateBuilder):
root = xmlutil.TemplateElement('quota_set', selector='quota_set') root = xmlutil.TemplateElement('quota_set', selector='quota_set')
root.set('id') root.set('id')
for resource in quota.quota_resources: for resource in QUOTAS.resources:
elem = xmlutil.SubTemplateElement(root, resource) elem = xmlutil.SubTemplateElement(root, resource)
elem.text = resource elem.text = resource
@ -48,7 +51,7 @@ class QuotaSetsController(object):
result = dict(id=str(project_id)) result = dict(id=str(project_id))
for resource in quota.quota_resources: for resource in QUOTAS.resources:
result[resource] = quota_set[resource] result[resource] = quota_set[resource]
return dict(quota_set=result) return dict(quota_set=result)
@ -59,14 +62,21 @@ class QuotaSetsController(object):
msg = _("Quota limit must be -1 or greater.") msg = _("Quota limit must be -1 or greater.")
raise webob.exc.HTTPBadRequest(explanation=msg) raise webob.exc.HTTPBadRequest(explanation=msg)
def _get_quotas(self, context, id, usages=False):
values = QUOTAS.get_project_quotas(context, id, usages=usages)
if usages:
return values
else:
return dict((k, v['limit']) for k, v in values.items())
@wsgi.serializers(xml=QuotaTemplate) @wsgi.serializers(xml=QuotaTemplate)
def show(self, req, id): def show(self, req, id):
context = req.environ['nova.context'] context = req.environ['nova.context']
authorize(context) authorize(context)
try: try:
sqlalchemy_api.authorize_project_context(context, id) sqlalchemy_api.authorize_project_context(context, id)
return self._format_quota_set(id, return self._format_quota_set(id, self._get_quotas(context, id))
quota.get_project_quotas(context, id))
except exception.NotAuthorized: except exception.NotAuthorized:
raise webob.exc.HTTPForbidden() raise webob.exc.HTTPForbidden()
@ -76,7 +86,7 @@ class QuotaSetsController(object):
authorize(context) authorize(context)
project_id = id project_id = id
for key in body['quota_set'].keys(): for key in body['quota_set'].keys():
if key in quota.quota_resources: if key in QUOTAS:
value = int(body['quota_set'][key]) value = int(body['quota_set'][key])
self._validate_quota_limit(value) self._validate_quota_limit(value)
try: try:
@ -85,12 +95,13 @@ class QuotaSetsController(object):
db.quota_create(context, project_id, key, value) db.quota_create(context, project_id, key, value)
except exception.AdminRequired: except exception.AdminRequired:
raise webob.exc.HTTPForbidden() raise webob.exc.HTTPForbidden()
return {'quota_set': quota.get_project_quotas(context, project_id)} return {'quota_set': self._get_quotas(context, id)}
@wsgi.serializers(xml=QuotaTemplate) @wsgi.serializers(xml=QuotaTemplate)
def defaults(self, req, id): def defaults(self, req, id):
authorize(req.environ['nova.context']) context = req.environ['nova.context']
return self._format_quota_set(id, quota._get_default_quotas()) authorize(context)
return self._format_quota_set(id, QUOTAS.get_defaults(context))
class Quotas(extensions.ExtensionDescriptor): class Quotas(extensions.ExtensionDescriptor):

View File

@ -31,6 +31,7 @@ from nova import db
from nova import exception from nova import exception
from nova import flags from nova import flags
from nova import log as logging from nova import log as logging
from nova.openstack.common import excutils
from nova.openstack.common import importutils from nova.openstack.common import importutils
from nova import quota from nova import quota
from nova import utils from nova import utils
@ -38,6 +39,7 @@ from nova import utils
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
QUOTAS = quota.QUOTAS
authorize = extensions.extension_authorizer('compute', 'security_groups') authorize = extensions.extension_authorizer('compute', 'security_groups')
@ -244,11 +246,24 @@ class SecurityGroupController(SecurityGroupControllerBase):
if db.security_group_in_use(context, security_group.id): if db.security_group_in_use(context, security_group.id):
msg = _("Security group is still in use") msg = _("Security group is still in use")
raise exc.HTTPBadRequest(explanation=msg) raise exc.HTTPBadRequest(explanation=msg)
# Get reservations
try:
reservations = QUOTAS.reserve(context, security_groups=-1)
except Exception:
reservations = None
LOG.exception(_("Failed to update usages deallocating "
"security group"))
LOG.audit(_("Delete security group %s"), id, context=context) LOG.audit(_("Delete security group %s"), id, context=context)
db.security_group_destroy(context, security_group.id) db.security_group_destroy(context, security_group.id)
self.sgh.trigger_security_group_destroy_refresh( self.sgh.trigger_security_group_destroy_refresh(
context, security_group.id) context, security_group.id)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations)
return webob.Response(status_int=202) return webob.Response(status_int=202)
@wsgi.serializers(xml=SecurityGroupsTemplate) @wsgi.serializers(xml=SecurityGroupsTemplate)
@ -291,22 +306,33 @@ class SecurityGroupController(SecurityGroupControllerBase):
group_name = group_name.strip() group_name = group_name.strip()
group_description = group_description.strip() group_description = group_description.strip()
if quota.allowed_security_groups(context, 1) < 1: try:
reservations = QUOTAS.reserve(context, security_groups=1)
except exception.OverQuota:
msg = _("Quota exceeded, too many security groups.") msg = _("Quota exceeded, too many security groups.")
raise exc.HTTPBadRequest(explanation=msg) raise exc.HTTPBadRequest(explanation=msg)
LOG.audit(_("Create Security Group %s"), group_name, context=context) try:
self.compute_api.ensure_default_security_group(context) LOG.audit(_("Create Security Group %s"), group_name,
if db.security_group_exists(context, context.project_id, group_name): context=context)
msg = _('Security group %s already exists') % group_name self.compute_api.ensure_default_security_group(context)
raise exc.HTTPBadRequest(explanation=msg) if db.security_group_exists(context, context.project_id,
group_name):
msg = _('Security group %s already exists') % group_name
raise exc.HTTPBadRequest(explanation=msg)
group = {'user_id': context.user_id, group = {'user_id': context.user_id,
'project_id': context.project_id, 'project_id': context.project_id,
'name': group_name, 'name': group_name,
'description': group_description} 'description': group_description}
group_ref = db.security_group_create(context, group) group_ref = db.security_group_create(context, group)
self.sgh.trigger_security_group_create_refresh(context, group) self.sgh.trigger_security_group_create_refresh(context, group)
# Commit the reservation
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, reservations)
return {'security_group': self._format_security_group(context, return {'security_group': self._format_security_group(context,
group_ref)} group_ref)}
@ -382,10 +408,10 @@ class SecurityGroupRulesController(SecurityGroupControllerBase):
msg = _('This rule already exists in group %s') % parent_group_id msg = _('This rule already exists in group %s') % parent_group_id
raise exc.HTTPBadRequest(explanation=msg) raise exc.HTTPBadRequest(explanation=msg)
allowed = quota.allowed_security_group_rules(context, count = QUOTAS.count(context, 'security_group_rules', parent_group_id)
parent_group_id, try:
1) QUOTAS.limit_check(context, security_group_rules=count + 1)
if allowed < 1: except exception.OverQuota:
msg = _("Quota exceeded, too many security group rules.") msg = _("Quota exceeded, too many security group rules.")
raise exc.HTTPBadRequest(explanation=msg) raise exc.HTTPBadRequest(explanation=msg)

View File

@ -36,6 +36,9 @@ from nova import quota
from nova import wsgi as base_wsgi from nova import wsgi as base_wsgi
QUOTAS = quota.QUOTAS
# Convenience constants for the limits dictionary passed to Limiter(). # Convenience constants for the limits dictionary passed to Limiter().
PER_SECOND = 1 PER_SECOND = 1
PER_MINUTE = 60 PER_MINUTE = 60
@ -82,7 +85,9 @@ class LimitsController(object):
Return all global and rate limit information. Return all global and rate limit information.
""" """
context = req.environ['nova.context'] context = req.environ['nova.context']
abs_limits = quota.get_project_quotas(context, context.project_id) quotas = QUOTAS.get_project_quotas(context, context.project_id,
usages=False)
abs_limits = dict((k, v['limit']) for k, v in quotas.items())
rate_limits = req.environ.get("nova.limits", []) rate_limits = req.environ.get("nova.limits", [])
builder = self._get_view_builder(req) builder = self._get_view_builder(req)

View File

@ -42,6 +42,7 @@ import nova.image
from nova import log as logging from nova import log as logging
from nova import network from nova import network
from nova.openstack.common import cfg from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import jsonutils from nova.openstack.common import jsonutils
import nova.policy import nova.policy
from nova import quota from nova import quota
@ -56,6 +57,8 @@ LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
flags.DECLARE('consoleauth_topic', 'nova.consoleauth') flags.DECLARE('consoleauth_topic', 'nova.consoleauth')
QUOTAS = quota.QUOTAS
def check_instance_state(vm_state=None, task_state=None): def check_instance_state(vm_state=None, task_state=None):
"""Decorator to check VM and/or task state before entry to API functions. """Decorator to check VM and/or task state before entry to API functions.
@ -126,49 +129,91 @@ class API(base.Base):
""" """
if injected_files is None: if injected_files is None:
return return
limit = quota.allowed_injected_files(context, len(injected_files))
if len(injected_files) > limit: # Check number of files first
try:
QUOTAS.limit_check(context, injected_files=len(injected_files))
except exception.OverQuota:
raise exception.OnsetFileLimitExceeded() raise exception.OnsetFileLimitExceeded()
path_limit = quota.allowed_injected_file_path_bytes(context)
# OK, now count path and content lengths; we're looking for
# the max...
max_path = 0
max_content = 0
for path, content in injected_files: for path, content in injected_files:
if len(path) > path_limit: max_path = max(max_path, len(path))
max_content = max(max_content, len(content))
try:
QUOTAS.limit_check(context, injected_file_path_bytes=max_path,
injected_file_content_bytes=max_content)
except exception.OverQuota as exc:
# Favor path limit over content limit for reporting
# purposes
if 'injected_file_path_bytes' in exc.kwargs['overs']:
raise exception.OnsetFilePathLimitExceeded() raise exception.OnsetFilePathLimitExceeded()
content_limit = quota.allowed_injected_file_content_bytes( else:
context, len(content))
if len(content) > content_limit:
raise exception.OnsetFileContentLimitExceeded() raise exception.OnsetFileContentLimitExceeded()
def _check_num_instances_quota(self, context, instance_type, min_count, def _check_num_instances_quota(self, context, instance_type, min_count,
max_count): max_count):
"""Enforce quota limits on number of instances created.""" """Enforce quota limits on number of instances created."""
num_instances = quota.allowed_instances(context, max_count,
instance_type) # Determine requested cores and ram
if num_instances < min_count: req_cores = max_count * instance_type['vcpus']
req_ram = max_count * instance_type['memory_mb']
# Check the quota
try:
reservations = QUOTAS.reserve(context, instances=max_count,
cores=req_cores, ram=req_ram)
except exception.OverQuota as exc:
# OK, we exceeded quota; let's figure out why...
quotas = exc.kwargs['quotas']
usages = exc.kwargs['usages']
headroom = dict((res, quotas[res] -
(usages[res]['in_use'] + usages[res]['reserved']))
for res in quotas.keys())
allowed = headroom['instances']
if instance_type['vcpus']:
allowed = min(allowed,
headroom['cores'] // instance_type['vcpus'])
if instance_type['memory_mb']:
allowed = min(allowed,
headroom['ram'] // instance_type['memory_mb'])
# Convert to the appropriate exception message
pid = context.project_id pid = context.project_id
if num_instances <= 0: if allowed <= 0:
msg = _("Cannot run any more instances of this type.") msg = _("Cannot run any more instances of this type.")
used = max_count used = max_count
elif min_count <= allowed <= max_count:
# We're actually OK, but still need reservations
return self._check_num_instances_quota(context, instance_type,
min_count, allowed)
else: else:
msg = (_("Can only run %s more instances of this type.") % msg = (_("Can only run %s more instances of this type.") %
num_instances) allowed)
used = max_count - num_instances used = max_count - allowed
LOG.warn(_("Quota exceeded for %(pid)s," LOG.warn(_("Quota exceeded for %(pid)s,"
" tried to run %(min_count)s instances. %(msg)s"), locals()) " tried to run %(min_count)s instances. %(msg)s"), locals())
raise exception.TooManyInstances(used=used, allowed=max_count) raise exception.TooManyInstances(used=used, allowed=max_count)
return num_instances return max_count, reservations
def _check_metadata_properties_quota(self, context, metadata=None): def _check_metadata_properties_quota(self, context, metadata=None):
"""Enforce quota limits on metadata properties.""" """Enforce quota limits on metadata properties."""
if not metadata: if not metadata:
metadata = {} metadata = {}
num_metadata = len(metadata) num_metadata = len(metadata)
quota_metadata = quota.allowed_metadata_items(context, num_metadata) try:
if quota_metadata < num_metadata: QUOTAS.limit_check(context, metadata_items=num_metadata)
except exception.OverQuota as exc:
pid = context.project_id pid = context.project_id
msg = _("Quota exceeded for %(pid)s, tried to set " msg = _("Quota exceeded for %(pid)s, tried to set "
"%(num_metadata)s metadata properties") % locals() "%(num_metadata)s metadata properties") % locals()
LOG.warn(msg) LOG.warn(msg)
quota_metadata = exc.kwargs['quotas']['metadata_items']
raise exception.MetadataLimitExceeded(allowed=quota_metadata) raise exception.MetadataLimitExceeded(allowed=quota_metadata)
# Because metadata is stored in the DB, we hard-code the size limits # Because metadata is stored in the DB, we hard-code the size limits
@ -302,7 +347,7 @@ class API(base.Base):
block_device_mapping = block_device_mapping or [] block_device_mapping = block_device_mapping or []
# Check quotas # Check quotas
num_instances = self._check_num_instances_quota( num_instances, quota_reservations = self._check_num_instances_quota(
context, instance_type, min_count, max_count) context, instance_type, min_count, max_count)
self._check_metadata_properties_quota(context, metadata) self._check_metadata_properties_quota(context, metadata)
self._check_injected_file_quota(context, injected_files) self._check_injected_file_quota(context, injected_files)
@ -313,8 +358,10 @@ class API(base.Base):
image = image_service.show(context, image_id) image = image_service.show(context, image_id)
if instance_type['memory_mb'] < int(image.get('min_ram') or 0): if instance_type['memory_mb'] < int(image.get('min_ram') or 0):
QUOTAS.rollback(context, quota_reservations)
raise exception.InstanceTypeMemoryTooSmall() raise exception.InstanceTypeMemoryTooSmall()
if instance_type['root_gb'] < int(image.get('min_disk') or 0): if instance_type['root_gb'] < int(image.get('min_disk') or 0):
QUOTAS.rollback(context, quota_reservations)
raise exception.InstanceTypeDiskTooSmall() raise exception.InstanceTypeDiskTooSmall()
# Handle config_drive # Handle config_drive
@ -385,7 +432,12 @@ class API(base.Base):
if create_instance_here: if create_instance_here:
instance = self.create_db_entry_for_new_instance( instance = self.create_db_entry_for_new_instance(
context, instance_type, image, base_options, context, instance_type, image, base_options,
security_group, block_device_mapping) security_group, block_device_mapping,
quota_reservations)
# Reservations committed; don't double-commit
quota_reservations = None
# Tells scheduler we created the instance already. # Tells scheduler we created the instance already.
base_options['uuid'] = instance['uuid'] base_options['uuid'] = instance['uuid']
use_call = False use_call = False
@ -412,7 +464,7 @@ class API(base.Base):
admin_password, image, admin_password, image,
num_instances, requested_networks, num_instances, requested_networks,
block_device_mapping, security_group, block_device_mapping, security_group,
filter_properties) filter_properties, quota_reservations)
if create_instance_here: if create_instance_here:
return ([instance], reservation_id) return ([instance], reservation_id)
@ -509,7 +561,7 @@ class API(base.Base):
#NOTE(bcwaldon): No policy check since this is only used by scheduler and #NOTE(bcwaldon): No policy check since this is only used by scheduler and
# the compute api. That should probably be cleaned up, though. # the compute api. That should probably be cleaned up, though.
def create_db_entry_for_new_instance(self, context, instance_type, image, def create_db_entry_for_new_instance(self, context, instance_type, image,
base_options, security_group, block_device_mapping): base_options, security_group, block_device_mapping, reservations):
"""Create an entry in the DB for this new instance, """Create an entry in the DB for this new instance,
including any related table updates (such as security group, including any related table updates (such as security group,
etc). etc).
@ -539,6 +591,11 @@ class API(base.Base):
base_options.setdefault('launch_index', 0) base_options.setdefault('launch_index', 0)
instance = self.db.instance_create(context, base_options) instance = self.db.instance_create(context, base_options)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations)
instance_id = instance['id'] instance_id = instance['id']
instance_uuid = instance['uuid'] instance_uuid = instance['uuid']
@ -593,7 +650,8 @@ class API(base.Base):
requested_networks, requested_networks,
block_device_mapping, block_device_mapping,
security_group, security_group,
filter_properties): filter_properties,
quota_reservations):
"""Send a run_instance request to the schedulers for processing.""" """Send a run_instance request to the schedulers for processing."""
pid = context.project_id pid = context.project_id
@ -615,7 +673,8 @@ class API(base.Base):
topic=FLAGS.compute_topic, request_spec=request_spec, topic=FLAGS.compute_topic, request_spec=request_spec,
admin_password=admin_password, injected_files=injected_files, admin_password=admin_password, injected_files=injected_files,
requested_networks=requested_networks, is_first_time=True, requested_networks=requested_networks, is_first_time=True,
filter_properties=filter_properties, call=use_call) filter_properties=filter_properties,
reservations=quota_reservations, call=use_call)
def _check_create_policies(self, context, availability_zone, def _check_create_policies(self, context, availability_zone,
requested_networks, block_device_mapping): requested_networks, block_device_mapping):
@ -895,10 +954,17 @@ class API(base.Base):
pass pass
def _delete(self, context, instance): def _delete(self, context, instance):
host = instance['host']
reservations = QUOTAS.reserve(context,
instances=-1,
cores=-instance['vcpus'],
ram=-instance['memory_mb'])
try: try:
if not instance['host']: if not instance['host']:
# Just update database, nothing else we can do # Just update database, nothing else we can do
return self.db.instance_destroy(context, instance['id']) result = self.db.instance_destroy(context, instance['id'])
QUOTAS.commit(context, reservations)
return result
self.update(context, self.update(context,
instance, instance,
@ -919,9 +985,13 @@ class API(base.Base):
self.compute_rpcapi.terminate_instance(context, instance) self.compute_rpcapi.terminate_instance(context, instance)
QUOTAS.commit(context, reservations)
except exception.InstanceNotFound: except exception.InstanceNotFound:
# NOTE(comstud): Race condition. Instance already gone. # NOTE(comstud): Race condition. Instance already gone.
pass QUOTAS.rollback(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, reservations)
# NOTE(jerdfelt): The API implies that only ACTIVE and ERROR are # NOTE(jerdfelt): The API implies that only ACTIVE and ERROR are
# allowed but the EC2 API appears to allow from RESCUED and STOPPED # allowed but the EC2 API appears to allow from RESCUED and STOPPED
@ -1885,7 +1955,10 @@ class KeypairAPI(base.Base):
"""Import a key pair using an existing public key.""" """Import a key pair using an existing public key."""
self._validate_keypair_name(context, user_id, key_name) self._validate_keypair_name(context, user_id, key_name)
if quota.allowed_key_pairs(context, 1) < 1: count = QUOTAS.count(context, 'key_pairs', user_id)
try:
QUOTAS.limit_check(context, key_pairs=count + 1)
except exception.OverQuota:
raise exception.KeypairLimitExceeded() raise exception.KeypairLimitExceeded()
try: try:
@ -1906,7 +1979,10 @@ class KeypairAPI(base.Base):
"""Create a new key pair.""" """Create a new key pair."""
self._validate_keypair_name(context, user_id, key_name) self._validate_keypair_name(context, user_id, key_name)
if quota.allowed_key_pairs(context, 1) < 1: count = QUOTAS.count(context, 'key_pairs', user_id)
try:
QUOTAS.limit_check(context, key_pairs=count + 1)
except exception.OverQuota:
raise exception.KeypairLimitExceeded() raise exception.KeypairLimitExceeded()
private_key, public_key, fingerprint = crypto.generate_key_pair() private_key, public_key, fingerprint = crypto.generate_key_pair()

View File

@ -2543,6 +2543,10 @@ def quota_reserve(context, resources, quotas, deltas, expire,
session=session, session=session,
save=False) save=False)
refresh = True refresh = True
elif usages[resource].in_use < 0:
# Negative in_use count indicates a desync, so try to
# heal from that...
refresh = True
elif usages[resource].until_refresh is not None: elif usages[resource].until_refresh is not None:
usages[resource].until_refresh -= 1 usages[resource].until_refresh -= 1
if usages[resource].until_refresh <= 0: if usages[resource].until_refresh <= 0:
@ -2607,7 +2611,7 @@ def quota_reserve(context, resources, quotas, deltas, expire,
# they're not invalidated by being over-quota. # they're not invalidated by being over-quota.
# Create the reservations # Create the reservations
if not unders and not overs: if not overs:
reservations = [] reservations = []
for resource, delta in deltas.items(): for resource, delta in deltas.items():
reservation = reservation_create(elevated, reservation = reservation_create(elevated,
@ -2638,7 +2642,8 @@ def quota_reserve(context, resources, quotas, deltas, expire,
usage_ref.save(session=session) usage_ref.save(session=session)
if unders: if unders:
raise exception.InvalidQuotaValue(unders=sorted(unders)) LOG.warning(_("Change will make usage less than 0 for the following "
"resources: %(unders)s") % locals())
if overs: if overs:
usages = dict((k, dict(in_use=v['in_use'], reserved=v['reserved'])) usages = dict((k, dict(in_use=v['in_use'], reserved=v['reserved']))
for k, v in usages.items()) for k, v in usages.items())

View File

@ -998,6 +998,10 @@ class VolumeSizeTooLarge(QuotaError):
message = _("Maximum volume size exceeded") message = _("Maximum volume size exceeded")
class FloatingIpLimitExceeded(QuotaError):
message = _("Maximum number of floating ips exceeded")
class MetadataLimitExceeded(QuotaError): class MetadataLimitExceeded(QuotaError):
message = _("Maximum number of metadata items exceeds %(allowed)d") message = _("Maximum number of metadata items exceeds %(allowed)d")

View File

@ -64,6 +64,7 @@ from nova.network import api as network_api
from nova.network import model as network_model from nova.network import model as network_model
from nova.notifier import api as notifier from nova.notifier import api as notifier
from nova.openstack.common import cfg from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import importutils from nova.openstack.common import importutils
from nova.openstack.common import jsonutils from nova.openstack.common import jsonutils
import nova.policy import nova.policy
@ -74,6 +75,8 @@ from nova import utils
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
network_opts = [ network_opts = [
cfg.StrOpt('flat_network_bridge', cfg.StrOpt('flat_network_bridge',
default=None, default=None,
@ -398,21 +401,34 @@ class FloatingIP(object):
def allocate_floating_ip(self, context, project_id, pool=None): def allocate_floating_ip(self, context, project_id, pool=None):
"""Gets a floating ip from the pool.""" """Gets a floating ip from the pool."""
# NOTE(tr3buchet): all network hosts in zone now use the same pool # NOTE(tr3buchet): all network hosts in zone now use the same pool
LOG.debug("QUOTA: %s" % quota.allowed_floating_ips(context, 1))
if quota.allowed_floating_ips(context, 1) < 1:
LOG.warn(_('Quota exceeded for %s, tried to allocate address'),
context.project_id)
raise exception.QuotaError(code='AddressLimitExceeded')
pool = pool or FLAGS.default_floating_pool pool = pool or FLAGS.default_floating_pool
floating_ip = self.db.floating_ip_allocate_address(context, # Check the quota; can't put this in the API because we get
project_id, # called into from other places
pool) try:
payload = dict(project_id=project_id, floating_ip=floating_ip) reservations = QUOTAS.reserve(context, floating_ips=1)
notifier.notify(context, except exception.OverQuota:
notifier.publisher_id("network"), pid = context.project_id
'network.floating_ip.allocate', LOG.warn(_("Quota exceeded for %(pid)s, tried to allocate "
notifier.INFO, payload) "floating IP") % locals())
raise exception.FloatingIpLimitExceeded()
try:
floating_ip = self.db.floating_ip_allocate_address(context,
project_id,
pool)
payload = dict(project_id=project_id, floating_ip=floating_ip)
notifier.notify(context,
notifier.publisher_id("network"),
'network.floating_ip.allocate',
notifier.INFO, payload)
# Commit the reservations
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, reservations)
return floating_ip return floating_ip
@wrap_check_policy @wrap_check_policy
@ -443,8 +459,20 @@ class FloatingIP(object):
'network.floating_ip.deallocate', 'network.floating_ip.deallocate',
notifier.INFO, payload=payload) notifier.INFO, payload=payload)
# Get reservations...
try:
reservations = QUOTAS.reserve(context, floating_ips=-1)
except Exception:
reservations = None
LOG.exception(_("Failed to update usages deallocating "
"floating IP"))
self.db.floating_ip_deallocate(context, address) self.db.floating_ip_deallocate(context, address)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations)
@wrap_check_policy @wrap_check_policy
def associate_floating_ip(self, context, floating_address, fixed_address, def associate_floating_ip(self, context, floating_address, fixed_address,
affect_auto_assigned=False): affect_auto_assigned=False):

View File

@ -60,14 +60,16 @@ class ChanceScheduler(driver.Scheduler):
host = self._schedule(context, topic, None, **kwargs) host = self._schedule(context, topic, None, **kwargs)
driver.cast_to_host(context, topic, host, method, **kwargs) driver.cast_to_host(context, topic, host, method, **kwargs)
def schedule_run_instance(self, context, request_spec, *_args, **kwargs): def schedule_run_instance(self, context, request_spec, reservations,
*_args, **kwargs):
"""Create and run an instance or instances""" """Create and run an instance or instances"""
num_instances = request_spec.get('num_instances', 1) num_instances = request_spec.get('num_instances', 1)
instances = [] instances = []
for num in xrange(num_instances): for num in xrange(num_instances):
host = self._schedule(context, 'compute', request_spec, **kwargs) host = self._schedule(context, 'compute', request_spec, **kwargs)
request_spec['instance_properties']['launch_index'] = num request_spec['instance_properties']['launch_index'] = num
instance = self.create_instance_db_entry(context, request_spec) instance = self.create_instance_db_entry(context, request_spec,
reservations)
driver.cast_to_compute_host(context, host, driver.cast_to_compute_host(context, host,
'run_instance', instance_uuid=instance['uuid'], **kwargs) 'run_instance', instance_uuid=instance['uuid'], **kwargs)
instances.append(driver.encode_instance(instance)) instances.append(driver.encode_instance(instance))

View File

@ -159,7 +159,7 @@ class Scheduler(object):
for service in services for service in services
if utils.service_is_up(service)] if utils.service_is_up(service)]
def create_instance_db_entry(self, context, request_spec): def create_instance_db_entry(self, context, request_spec, reservations):
"""Create instance DB entry based on request_spec""" """Create instance DB entry based on request_spec"""
base_options = request_spec['instance_properties'] base_options = request_spec['instance_properties']
if base_options.get('uuid'): if base_options.get('uuid'):
@ -172,7 +172,7 @@ class Scheduler(object):
instance = self.compute_api.create_db_entry_for_new_instance( instance = self.compute_api.create_db_entry_for_new_instance(
context, instance_type, image, base_options, context, instance_type, image, base_options,
security_group, block_device_mapping) security_group, block_device_mapping, reservations)
# NOTE(comstud): This needs to be set for the generic exception # NOTE(comstud): This needs to be set for the generic exception
# checking in scheduler manager, so that it'll set this instance # checking in scheduler manager, so that it'll set this instance
# to ERROR properly. # to ERROR properly.

View File

@ -51,7 +51,8 @@ class FilterScheduler(driver.Scheduler):
msg = _("No host selection for %s defined.") % topic msg = _("No host selection for %s defined.") % topic
raise exception.NoValidHost(reason=msg) raise exception.NoValidHost(reason=msg)
def schedule_run_instance(self, context, request_spec, *args, **kwargs): def schedule_run_instance(self, context, request_spec, reservations,
*args, **kwargs):
"""This method is called from nova.compute.api to provision """This method is called from nova.compute.api to provision
an instance. We first create a build plan (a list of WeightedHosts) an instance. We first create a build plan (a list of WeightedHosts)
and then provision. and then provision.
@ -86,7 +87,8 @@ class FilterScheduler(driver.Scheduler):
request_spec['instance_properties']['launch_index'] = num request_spec['instance_properties']['launch_index'] = num
instance = self._provision_resource(elevated, weighted_host, instance = self._provision_resource(elevated, weighted_host,
request_spec, kwargs) request_spec, reservations,
kwargs)
if instance: if instance:
instances.append(instance) instances.append(instance)
@ -118,9 +120,10 @@ class FilterScheduler(driver.Scheduler):
'prep_resize', **kwargs) 'prep_resize', **kwargs)
def _provision_resource(self, context, weighted_host, request_spec, def _provision_resource(self, context, weighted_host, request_spec,
kwargs): reservations, kwargs):
"""Create the requested resource in this Zone.""" """Create the requested resource in this Zone."""
instance = self.create_instance_db_entry(context, request_spec) instance = self.create_instance_db_entry(context, request_spec,
reservations)
payload = dict(request_spec=request_spec, payload = dict(request_spec=request_spec,
weighted_host=weighted_host.to_dict(), weighted_host=weighted_host.to_dict(),

View File

@ -110,18 +110,24 @@ class SchedulerManager(manager.Manager):
Sets instance vm_state to ERROR on exceptions Sets instance vm_state to ERROR on exceptions
""" """
args = (context,) + args args = (context,) + args
reservations = kwargs.get('reservations', None)
try: try:
return self.driver.schedule_run_instance(*args, **kwargs) result = self.driver.schedule_run_instance(*args, **kwargs)
return result
except exception.NoValidHost as ex: except exception.NoValidHost as ex:
# don't reraise # don't reraise
self._set_vm_state_and_notify('run_instance', self._set_vm_state_and_notify('run_instance',
{'vm_state': vm_states.ERROR}, {'vm_state': vm_states.ERROR},
context, ex, *args, **kwargs) context, ex, *args, **kwargs)
if reservations:
QUOTAS.rollback(context, reservations)
except Exception as ex: except Exception as ex:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify('run_instance', self._set_vm_state_and_notify('run_instance',
{'vm_state': vm_states.ERROR}, {'vm_state': vm_states.ERROR},
context, ex, *args, **kwargs) context, ex, *args, **kwargs)
if reservations:
QUOTAS.rollback(context, reservations)
def prep_resize(self, context, topic, *args, **kwargs): def prep_resize(self, context, topic, *args, **kwargs):
"""Tries to call schedule_prep_resize on the driver. """Tries to call schedule_prep_resize on the driver.

View File

@ -41,14 +41,15 @@ class SchedulerAPI(nova.rpc.proxy.RpcProxy):
def run_instance(self, ctxt, topic, request_spec, admin_password, def run_instance(self, ctxt, topic, request_spec, admin_password,
injected_files, requested_networks, is_first_time, injected_files, requested_networks, is_first_time,
filter_properties, call=True): filter_properties, reservations, call=True):
rpc_method = self.call if call else self.cast rpc_method = self.call if call else self.cast
return rpc_method(ctxt, self.make_msg('run_instance', topic=topic, return rpc_method(ctxt, self.make_msg('run_instance', topic=topic,
request_spec=request_spec, admin_password=admin_password, request_spec=request_spec, admin_password=admin_password,
injected_files=injected_files, injected_files=injected_files,
requested_networks=requested_networks, requested_networks=requested_networks,
is_first_time=is_first_time, is_first_time=is_first_time,
filter_properties=filter_properties)) filter_properties=filter_properties,
reservations=reservations))
def prep_resize(self, ctxt, topic, instance_uuid, instance_type_id, image, def prep_resize(self, ctxt, topic, instance_uuid, instance_type_id, image,
update_db, request_spec, filter_properties): update_db, request_spec, filter_properties):

View File

@ -91,7 +91,8 @@ class SimpleScheduler(chance.ChanceScheduler):
msg = _("Is the appropriate service running?") msg = _("Is the appropriate service running?")
raise exception.NoValidHost(reason=msg) raise exception.NoValidHost(reason=msg)
def schedule_run_instance(self, context, request_spec, *_args, **_kwargs): def schedule_run_instance(self, context, request_spec, reservations,
*_args, **_kwargs):
num_instances = request_spec.get('num_instances', 1) num_instances = request_spec.get('num_instances', 1)
instances = [] instances = []
for num in xrange(num_instances): for num in xrange(num_instances):
@ -99,7 +100,7 @@ class SimpleScheduler(chance.ChanceScheduler):
request_spec['instance_properties'], *_args, **_kwargs) request_spec['instance_properties'], *_args, **_kwargs)
request_spec['instance_properties']['launch_index'] = num request_spec['instance_properties']['launch_index'] = num
instance_ref = self.create_instance_db_entry(context, instance_ref = self.create_instance_db_entry(context,
request_spec) request_spec, reservations)
driver.cast_to_compute_host(context, host, 'run_instance', driver.cast_to_compute_host(context, host, 'run_instance',
instance_uuid=instance_ref['uuid'], **_kwargs) instance_uuid=instance_ref['uuid'], **_kwargs)
instances.append(driver.encode_instance(instance_ref)) instances.append(driver.encode_instance(instance_ref))

View File

@ -22,10 +22,14 @@ from nova.api.openstack.compute.contrib import keypairs
from nova.api.openstack import wsgi from nova.api.openstack import wsgi
from nova import db from nova import db
from nova import exception from nova import exception
from nova import quota
from nova import test from nova import test
from nova.tests.api.openstack import fakes from nova.tests.api.openstack import fakes
QUOTAS = quota.QUOTAS
def fake_keypair(name): def fake_keypair(name):
return {'public_key': 'FAKE_KEY', return {'public_key': 'FAKE_KEY',
'fingerprint': 'FAKE_FINGERPRINT', 'fingerprint': 'FAKE_FINGERPRINT',
@ -120,11 +124,10 @@ class KeypairsTest(test.TestCase):
def test_keypair_create_quota_limit(self): def test_keypair_create_quota_limit(self):
def db_key_pair_count_by_user_max(self, user_id): def fake_quotas_count(self, context, resource, *args, **kwargs):
return 100 return 100
self.stubs.Set(db, "key_pair_count_by_user", self.stubs.Set(QUOTAS, "count", fake_quotas_count)
db_key_pair_count_by_user_max)
req = webob.Request.blank('/v2/fake/os-keypairs') req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST' req.method = 'POST'
@ -163,11 +166,10 @@ class KeypairsTest(test.TestCase):
def test_keypair_import_quota_limit(self): def test_keypair_import_quota_limit(self):
def db_key_pair_count_by_user_max(self, user_id): def fake_quotas_count(self, context, resource, *args, **kwargs):
return 100 return 100
self.stubs.Set(db, "key_pair_count_by_user", self.stubs.Set(QUOTAS, "count", fake_quotas_count)
db_key_pair_count_by_user_max)
body = { body = {
'keypair': { 'keypair': {
@ -191,6 +193,26 @@ class KeypairsTest(test.TestCase):
res = req.get_response(fakes.wsgi_app()) res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 413) self.assertEqual(res.status_int, 413)
def test_keypair_create_quota_limit(self):
def fake_quotas_count(self, context, resource, *args, **kwargs):
return 100
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
body = {
'keypair': {
'name': 'create_test',
},
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 413)
def test_keypair_create_duplicate(self): def test_keypair_create_duplicate(self):
self.stubs.Set(db, "key_pair_get", db_key_pair_get) self.stubs.Set(db, "key_pair_get", db_key_pair_get)
body = {'keypair': {'name': 'create_duplicate'}} body = {'keypair': {'name': 'create_duplicate'}}

View File

@ -28,7 +28,7 @@ def quota_set(class_name):
'floating_ips': 10, 'instances': 10, 'injected_files': 5, 'floating_ips': 10, 'instances': 10, 'injected_files': 5,
'cores': 20, 'injected_file_content_bytes': 10240, 'cores': 20, 'injected_file_content_bytes': 10240,
'security_groups': 10, 'security_group_rules': 20, 'security_groups': 10, 'security_group_rules': 20,
'key_pairs': 100}} 'key_pairs': 100, 'injected_file_path_bytes': 255}}
class QuotaClassSetsTest(test.TestCase): class QuotaClassSetsTest(test.TestCase):
@ -47,6 +47,7 @@ class QuotaClassSetsTest(test.TestCase):
'metadata_items': 128, 'metadata_items': 128,
'gigabytes': 1000, 'gigabytes': 1000,
'injected_files': 5, 'injected_files': 5,
'injected_file_path_bytes': 255,
'injected_file_content_bytes': 10240, 'injected_file_content_bytes': 10240,
'security_groups': 10, 'security_groups': 10,
'security_group_rules': 20, 'security_group_rules': 20,
@ -66,6 +67,7 @@ class QuotaClassSetsTest(test.TestCase):
self.assertEqual(qs['floating_ips'], 10) self.assertEqual(qs['floating_ips'], 10)
self.assertEqual(qs['metadata_items'], 128) self.assertEqual(qs['metadata_items'], 128)
self.assertEqual(qs['injected_files'], 5) self.assertEqual(qs['injected_files'], 5)
self.assertEqual(qs['injected_file_path_bytes'], 255)
self.assertEqual(qs['injected_file_content_bytes'], 10240) self.assertEqual(qs['injected_file_content_bytes'], 10240)
self.assertEqual(qs['security_groups'], 10) self.assertEqual(qs['security_groups'], 10)
self.assertEqual(qs['security_group_rules'], 20) self.assertEqual(qs['security_group_rules'], 20)
@ -91,6 +93,7 @@ class QuotaClassSetsTest(test.TestCase):
'gigabytes': 1000, 'floating_ips': 10, 'gigabytes': 1000, 'floating_ips': 10,
'metadata_items': 128, 'injected_files': 5, 'metadata_items': 128, 'injected_files': 5,
'injected_file_content_bytes': 10240, 'injected_file_content_bytes': 10240,
'injected_file_path_bytes': 255,
'security_groups': 10, 'security_groups': 10,
'security_group_rules': 20, 'security_group_rules': 20,
'key_pairs': 100, 'key_pairs': 100,
@ -130,6 +133,7 @@ class QuotaTemplateXMLSerializerTest(test.TestCase):
exemplar = dict(quota_class_set=dict( exemplar = dict(quota_class_set=dict(
id='test_class', id='test_class',
metadata_items=10, metadata_items=10,
injected_file_path_bytes=255,
injected_file_content_bytes=20, injected_file_content_bytes=20,
volumes=30, volumes=30,
gigabytes=40, gigabytes=40,

View File

@ -30,7 +30,7 @@ def quota_set(id):
'instances': 10, 'injected_files': 5, 'cores': 20, 'instances': 10, 'injected_files': 5, 'cores': 20,
'injected_file_content_bytes': 10240, 'injected_file_content_bytes': 10240,
'security_groups': 10, 'security_group_rules': 20, 'security_groups': 10, 'security_group_rules': 20,
'key_pairs': 100}} 'key_pairs': 100, 'injected_file_path_bytes': 255}}
class QuotaSetsTest(test.TestCase): class QuotaSetsTest(test.TestCase):
@ -49,6 +49,7 @@ class QuotaSetsTest(test.TestCase):
'metadata_items': 128, 'metadata_items': 128,
'gigabytes': 1000, 'gigabytes': 1000,
'injected_files': 5, 'injected_files': 5,
'injected_file_path_bytes': 255,
'injected_file_content_bytes': 10240, 'injected_file_content_bytes': 10240,
'security_groups': 10, 'security_groups': 10,
'security_group_rules': 20, 'security_group_rules': 20,
@ -67,6 +68,7 @@ class QuotaSetsTest(test.TestCase):
self.assertEqual(qs['floating_ips'], 10) self.assertEqual(qs['floating_ips'], 10)
self.assertEqual(qs['metadata_items'], 128) self.assertEqual(qs['metadata_items'], 128)
self.assertEqual(qs['injected_files'], 5) self.assertEqual(qs['injected_files'], 5)
self.assertEqual(qs['injected_file_path_bytes'], 255)
self.assertEqual(qs['injected_file_content_bytes'], 10240) self.assertEqual(qs['injected_file_content_bytes'], 10240)
self.assertEqual(qs['security_groups'], 10) self.assertEqual(qs['security_groups'], 10)
self.assertEqual(qs['security_group_rules'], 20) self.assertEqual(qs['security_group_rules'], 20)
@ -88,6 +90,7 @@ class QuotaSetsTest(test.TestCase):
'floating_ips': 10, 'floating_ips': 10,
'metadata_items': 128, 'metadata_items': 128,
'injected_files': 5, 'injected_files': 5,
'injected_file_path_bytes': 255,
'injected_file_content_bytes': 10240, 'injected_file_content_bytes': 10240,
'security_groups': 10, 'security_groups': 10,
'security_group_rules': 20, 'security_group_rules': 20,
@ -114,6 +117,7 @@ class QuotaSetsTest(test.TestCase):
'gigabytes': 1000, 'floating_ips': 10, 'gigabytes': 1000, 'floating_ips': 10,
'metadata_items': 128, 'injected_files': 5, 'metadata_items': 128, 'injected_files': 5,
'injected_file_content_bytes': 10240, 'injected_file_content_bytes': 10240,
'injected_file_path_bytes': 255,
'security_groups': 10, 'security_groups': 10,
'security_group_rules': 20, 'security_group_rules': 20,
'key_pairs': 100}} 'key_pairs': 100}}
@ -161,6 +165,7 @@ class QuotaXMLSerializerTest(test.TestCase):
exemplar = dict(quota_set=dict( exemplar = dict(quota_set=dict(
id='project_id', id='project_id',
metadata_items=10, metadata_items=10,
injected_file_path_bytes=255,
injected_file_content_bytes=20, injected_file_content_bytes=20,
volumes=30, volumes=30,
gigabytes=40, gigabytes=40,

View File

@ -224,7 +224,7 @@ class TestSecurityGroups(test.TestCase):
def test_create_security_group_quota_limit(self): def test_create_security_group_quota_limit(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups') req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
for num in range(1, FLAGS.quota_security_groups): for num in range(1, FLAGS.quota_security_groups + 1):
name = 'test%s' % num name = 'test%s' % num
sg = security_group_template(name=name) sg = security_group_template(name=name)
res_dict = self.controller.create(req, {'security_group': sg}) res_dict = self.controller.create(req, {'security_group': sg})

View File

@ -57,10 +57,11 @@ class BaseLimitTestSuite(test.TestCase):
self.stubs.Set(limits.Limit, "_get_time", self._get_time) self.stubs.Set(limits.Limit, "_get_time", self._get_time)
self.absolute_limits = {} self.absolute_limits = {}
def stub_get_project_quotas(context, project_id): def stub_get_project_quotas(context, project_id, usages=True):
return self.absolute_limits return dict((k, dict(limit=v))
for k, v in self.absolute_limits.items())
self.stubs.Set(nova.quota, "get_project_quotas", self.stubs.Set(nova.quota.QUOTAS, "get_project_quotas",
stub_get_project_quotas) stub_get_project_quotas)
def _get_time(self): def _get_time(self):

View File

@ -1287,6 +1287,7 @@ class ServersControllerTest(test.TestCase):
self.assertEqual(s['name'], 'server%d' % (i + 1)) self.assertEqual(s['name'], 'server%d' % (i + 1))
def test_delete_server_instance(self): def test_delete_server_instance(self):
fakes.stub_out_instance_quota(self.stubs, 0)
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s' % FAKE_UUID) req = fakes.HTTPRequest.blank('/v2/fake/servers/%s' % FAKE_UUID)
req.method = 'DELETE' req.method = 'DELETE'
@ -1304,6 +1305,7 @@ class ServersControllerTest(test.TestCase):
self.assertEqual(self.server_delete_called, True) self.assertEqual(self.server_delete_called, True)
def test_delete_server_instance_while_building(self): def test_delete_server_instance_while_building(self):
fakes.stub_out_instance_quota(self.stubs, 0)
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s' % FAKE_UUID) req = fakes.HTTPRequest.blank('/v2/fake/servers/%s' % FAKE_UUID)
req.method = 'DELETE' req.method = 'DELETE'
@ -2338,7 +2340,7 @@ class ServersControllerCreateTest(test.TestCase):
req.headers["content-type"] = "application/json" req.headers["content-type"] = "application/json"
try: try:
server = self.controller.create(req, body).obj['server'] server = self.controller.create(req, body).obj['server']
fail('excepted quota to be exceeded') self.fail('expected quota to be exceeded')
except webob.exc.HTTPRequestEntityTooLarge as e: except webob.exc.HTTPRequestEntityTooLarge as e:
self.assertEquals(e.explanation, self.assertEquals(e.explanation,
_('Quota exceeded: already used 1 of 1 instances')) _('Quota exceeded: already used 1 of 1 instances'))

View File

@ -17,13 +17,12 @@
import datetime import datetime
from glance import client as glance_client
import routes import routes
import webob import webob
import webob.dec import webob.dec
import webob.request import webob.request
from glance import client as glance_client
from nova.api import auth as api_auth from nova.api import auth as api_auth
from nova.api import openstack as openstack_api from nova.api import openstack as openstack_api
from nova.api.openstack import auth from nova.api.openstack import auth
@ -40,12 +39,16 @@ from nova.db.sqlalchemy import models
from nova import exception as exc from nova import exception as exc
import nova.image.fake import nova.image.fake
from nova.openstack.common import jsonutils from nova.openstack.common import jsonutils
from nova import quota
from nova.tests import fake_network from nova.tests import fake_network
from nova.tests.glance import stubs as glance_stubs from nova.tests.glance import stubs as glance_stubs
from nova import utils from nova import utils
from nova import wsgi from nova import wsgi
QUOTAS = quota.QUOTAS
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_UUIDS = {} FAKE_UUIDS = {}
@ -142,9 +145,19 @@ def stub_out_rate_limiting(stubs):
def stub_out_instance_quota(stubs, allowed): def stub_out_instance_quota(stubs, allowed):
def fake_allowed_instances(context, max_count, instance_type): def fake_reserve(context, **deltas):
return allowed instances = deltas.pop('instances', 0)
stubs.Set(nova.quota, 'allowed_instances', fake_allowed_instances) if instances > allowed:
raise exc.OverQuota(overs=['instances'], quotas=dict(
instances=allowed,
cores=10000,
ram=10000 * 1024,
), usages=dict(
instances=dict(in_use=0, reserved=0),
cores=dict(in_use=0, reserved=0),
ram=dict(in_use=0, reserved=0),
))
stubs.Set(QUOTAS, 'reserve', fake_reserve)
def stub_out_networking(stubs): def stub_out_networking(stubs):

View File

@ -45,6 +45,7 @@ from nova import log as logging
from nova.notifier import test_notifier from nova.notifier import test_notifier
from nova.openstack.common import importutils from nova.openstack.common import importutils
import nova.policy import nova.policy
from nova import quota
from nova import rpc from nova import rpc
from nova.rpc import common as rpc_common from nova.rpc import common as rpc_common
from nova.scheduler import driver as scheduler_driver from nova.scheduler import driver as scheduler_driver
@ -54,6 +55,7 @@ from nova import utils
import nova.volume import nova.volume
QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
flags.DECLARE('stub_network', 'nova.compute.manager') flags.DECLARE('stub_network', 'nova.compute.manager')
@ -71,13 +73,14 @@ def rpc_call_wrapper(context, topic, msg, do_cast=True):
if (topic == FLAGS.scheduler_topic and if (topic == FLAGS.scheduler_topic and
msg['method'] == 'run_instance'): msg['method'] == 'run_instance'):
request_spec = msg['args']['request_spec'] request_spec = msg['args']['request_spec']
reservations = msg['args'].get('reservations')
scheduler = scheduler_driver.Scheduler scheduler = scheduler_driver.Scheduler
num_instances = request_spec.get('num_instances', 1) num_instances = request_spec.get('num_instances', 1)
instances = [] instances = []
for num in xrange(num_instances): for num in xrange(num_instances):
request_spec['instance_properties']['launch_index'] = num request_spec['instance_properties']['launch_index'] = num
instance = scheduler().create_instance_db_entry( instance = scheduler().create_instance_db_entry(
context, request_spec) context, request_spec, reservations)
encoded = scheduler_driver.encode_instance(instance) encoded = scheduler_driver.encode_instance(instance)
instances.append(encoded) instances.append(encoded)
return instances return instances
@ -148,6 +151,7 @@ class BaseTestCase(test.TestCase):
inst['instance_type_id'] = type_id inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0 inst['ami_launch_index'] = 0
inst['memory_mb'] = 0 inst['memory_mb'] = 0
inst['vcpus'] = 0
inst['root_gb'] = 0 inst['root_gb'] = 0
inst['ephemeral_gb'] = 0 inst['ephemeral_gb'] = 0
inst.update(params) inst.update(params)
@ -4123,10 +4127,9 @@ class KeypairAPITestCase(BaseTestCase):
self.ctxt, self.ctxt.user_id, 'foo') self.ctxt, self.ctxt.user_id, 'foo')
def test_create_keypair_quota_limit(self): def test_create_keypair_quota_limit(self):
def db_key_pair_count_by_user_max(self, user_id): def fake_quotas_count(self, context, resource, *args, **kwargs):
return FLAGS.quota_key_pairs return FLAGS.quota_key_pairs
self.stubs.Set(db, "key_pair_count_by_user", self.stubs.Set(QUOTAS, "count", fake_quotas_count)
db_key_pair_count_by_user_max)
self.assertRaises(exception.KeypairLimitExceeded, self.assertRaises(exception.KeypairLimitExceeded,
self.keypair_api.create_key_pair, self.keypair_api.create_key_pair,
self.ctxt, self.ctxt.user_id, 'foo') self.ctxt, self.ctxt.user_id, 'foo')
@ -4158,10 +4161,9 @@ class KeypairAPITestCase(BaseTestCase):
'* BAD CHARACTERS! *', self.pub_key) '* BAD CHARACTERS! *', self.pub_key)
def test_import_keypair_quota_limit(self): def test_import_keypair_quota_limit(self):
def db_key_pair_count_by_user_max(self, user_id): def fake_quotas_count(self, context, resource, *args, **kwargs):
return FLAGS.quota_key_pairs return FLAGS.quota_key_pairs
self.stubs.Set(db, "key_pair_count_by_user", self.stubs.Set(QUOTAS, "count", fake_quotas_count)
db_key_pair_count_by_user_max)
self.assertRaises(exception.KeypairLimitExceeded, self.assertRaises(exception.KeypairLimitExceeded,
self.keypair_api.import_key_pair, self.keypair_api.import_key_pair,
self.ctxt, self.ctxt.user_id, 'foo', self.pub_key) self.ctxt, self.ctxt.user_id, 'foo', self.pub_key)

View File

@ -551,26 +551,12 @@ class VlanNetworkTestCase(test.TestCase):
ctxt = context.RequestContext('testuser', 'testproject', ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False) is_admin=False)
def fake1(*args, **kwargs): def fake_allocate_address(*args, **kwargs):
return {'address': '10.0.0.1', 'project_id': ctxt.project_id} return {'address': '10.0.0.1', 'project_id': ctxt.project_id}
def fake2(*args, **kwargs): self.stubs.Set(self.network.db, 'floating_ip_allocate_address',
return 25 fake_allocate_address)
def fake3(*args, **kwargs):
return 0
self.stubs.Set(self.network.db, 'floating_ip_allocate_address', fake1)
# this time should raise
self.stubs.Set(self.network.db, 'floating_ip_count_by_project', fake2)
self.assertRaises(exception.QuotaError,
self.network.allocate_floating_ip,
ctxt,
ctxt.project_id)
# this time should not
self.stubs.Set(self.network.db, 'floating_ip_count_by_project', fake3)
self.network.allocate_floating_ip(ctxt, ctxt.project_id) self.network.allocate_floating_ip(ctxt, ctxt.project_id)
def test_deallocate_floating_ip(self): def test_deallocate_floating_ip(self):

View File

@ -71,13 +71,14 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
instance2 = {'uuid': 'fake-uuid2'} instance2 = {'uuid': 'fake-uuid2'}
instance1_encoded = {'uuid': 'fake-uuid1', '_is_precooked': False} instance1_encoded = {'uuid': 'fake-uuid1', '_is_precooked': False}
instance2_encoded = {'uuid': 'fake-uuid2', '_is_precooked': False} instance2_encoded = {'uuid': 'fake-uuid2', '_is_precooked': False}
reservations = ['resv1', 'resv2']
# create_instance_db_entry() usually does this, but we're # create_instance_db_entry() usually does this, but we're
# stubbing it. # stubbing it.
def _add_uuid1(ctxt, request_spec): def _add_uuid1(ctxt, request_spec, reservations):
request_spec['instance_properties']['uuid'] = 'fake-uuid1' request_spec['instance_properties']['uuid'] = 'fake-uuid1'
def _add_uuid2(ctxt, request_spec): def _add_uuid2(ctxt, request_spec, reservations):
request_spec['instance_properties']['uuid'] = 'fake-uuid2' request_spec['instance_properties']['uuid'] = 'fake-uuid2'
self.mox.StubOutWithMock(ctxt, 'elevated') self.mox.StubOutWithMock(ctxt, 'elevated')
@ -92,8 +93,8 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn( self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(
['host1', 'host2', 'host3', 'host4']) ['host1', 'host2', 'host3', 'host4'])
random.random().AndReturn(.5) random.random().AndReturn(.5)
self.driver.create_instance_db_entry(ctxt, self.driver.create_instance_db_entry(ctxt, request_spec,
request_spec).WithSideEffects(_add_uuid1).AndReturn( reservations).WithSideEffects(_add_uuid1).AndReturn(
instance1) instance1)
driver.cast_to_compute_host(ctxt, 'host3', 'run_instance', driver.cast_to_compute_host(ctxt, 'host3', 'run_instance',
instance_uuid=instance1['uuid'], **fake_kwargs) instance_uuid=instance1['uuid'], **fake_kwargs)
@ -103,8 +104,8 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn( self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(
['host1', 'host2', 'host3', 'host4']) ['host1', 'host2', 'host3', 'host4'])
random.random().AndReturn(.2) random.random().AndReturn(.2)
self.driver.create_instance_db_entry(ctxt, self.driver.create_instance_db_entry(ctxt, request_spec,
request_spec).WithSideEffects(_add_uuid2).AndReturn( reservations).WithSideEffects(_add_uuid2).AndReturn(
instance2) instance2)
driver.cast_to_compute_host(ctxt, 'host1', 'run_instance', driver.cast_to_compute_host(ctxt, 'host1', 'run_instance',
instance_uuid=instance2['uuid'], **fake_kwargs) instance_uuid=instance2['uuid'], **fake_kwargs)
@ -112,7 +113,7 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.mox.ReplayAll() self.mox.ReplayAll()
result = self.driver.schedule_run_instance(ctxt, request_spec, result = self.driver.schedule_run_instance(ctxt, request_spec,
*fake_args, **fake_kwargs) reservations, *fake_args, **fake_kwargs)
expected = [instance1_encoded, instance2_encoded] expected = [instance1_encoded, instance2_encoded]
self.assertEqual(result, expected) self.assertEqual(result, expected)
@ -128,7 +129,7 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
# stubbing it. # stubbing it.
def _add_uuid(num): def _add_uuid(num):
"""Return a function that adds the provided uuid number.""" """Return a function that adds the provided uuid number."""
def _add_uuid_num(_, spec): def _add_uuid_num(_, spec, reservations):
spec['instance_properties']['uuid'] = 'fake-uuid%d' % num spec['instance_properties']['uuid'] = 'fake-uuid%d' % num
return _add_uuid_num return _add_uuid_num
@ -150,7 +151,7 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
# instance 1 # instance 1
self.driver._schedule(ctxt, 'compute', request_spec).AndReturn('host') self.driver._schedule(ctxt, 'compute', request_spec).AndReturn('host')
self.driver.create_instance_db_entry( self.driver.create_instance_db_entry(
ctxt, mox.Func(_has_launch_index(0)) ctxt, mox.Func(_has_launch_index(0)), None
).WithSideEffects(_add_uuid(1)).AndReturn(instance1) ).WithSideEffects(_add_uuid(1)).AndReturn(instance1)
driver.cast_to_compute_host(ctxt, 'host', 'run_instance', driver.cast_to_compute_host(ctxt, 'host', 'run_instance',
instance_uuid=instance1['uuid']) instance_uuid=instance1['uuid'])
@ -158,14 +159,14 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
# instance 2 # instance 2
self.driver._schedule(ctxt, 'compute', request_spec).AndReturn('host') self.driver._schedule(ctxt, 'compute', request_spec).AndReturn('host')
self.driver.create_instance_db_entry( self.driver.create_instance_db_entry(
ctxt, mox.Func(_has_launch_index(1)) ctxt, mox.Func(_has_launch_index(1)), None
).WithSideEffects(_add_uuid(2)).AndReturn(instance2) ).WithSideEffects(_add_uuid(2)).AndReturn(instance2)
driver.cast_to_compute_host(ctxt, 'host', 'run_instance', driver.cast_to_compute_host(ctxt, 'host', 'run_instance',
instance_uuid=instance2['uuid']) instance_uuid=instance2['uuid'])
driver.encode_instance(instance2).AndReturn(instance2) driver.encode_instance(instance2).AndReturn(instance2)
self.mox.ReplayAll() self.mox.ReplayAll()
self.driver.schedule_run_instance(ctxt, request_spec) self.driver.schedule_run_instance(ctxt, request_spec, None)
def test_basic_schedule_run_instance_no_hosts(self): def test_basic_schedule_run_instance_no_hosts(self):
ctxt = context.RequestContext('fake', 'fake', False) ctxt = context.RequestContext('fake', 'fake', False)

View File

@ -51,7 +51,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
'ephemeral_gb': 0}, 'ephemeral_gb': 0},
'instance_properties': {'project_id': 1}} 'instance_properties': {'project_id': 1}}
self.assertRaises(exception.NoValidHost, sched.schedule_run_instance, self.assertRaises(exception.NoValidHost, sched.schedule_run_instance,
fake_context, request_spec) fake_context, request_spec, None)
def test_run_instance_non_admin(self): def test_run_instance_non_admin(self):
"""Test creating an instance locally using run_instance, passing """Test creating an instance locally using run_instance, passing
@ -72,7 +72,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
request_spec = {'instance_type': {'memory_mb': 1, 'local_gb': 1}, request_spec = {'instance_type': {'memory_mb': 1, 'local_gb': 1},
'instance_properties': {'project_id': 1}} 'instance_properties': {'project_id': 1}}
self.assertRaises(exception.NoValidHost, sched.schedule_run_instance, self.assertRaises(exception.NoValidHost, sched.schedule_run_instance,
fake_context, request_spec) fake_context, request_spec, None)
self.assertTrue(self.was_admin) self.assertTrue(self.was_admin)
def test_schedule_bad_topic(self): def test_schedule_bad_topic(self):
@ -117,14 +117,16 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
# instance 1 # instance 1
self.driver._provision_resource( self.driver._provision_resource(
ctxt, 'host1', ctxt, 'host1',
mox.Func(_has_launch_index(0)), fake_kwargs).AndReturn(instance1) mox.Func(_has_launch_index(0)), None,
fake_kwargs).AndReturn(instance1)
# instance 2 # instance 2
self.driver._provision_resource( self.driver._provision_resource(
ctxt, 'host2', ctxt, 'host2',
mox.Func(_has_launch_index(1)), fake_kwargs).AndReturn(instance2) mox.Func(_has_launch_index(1)), None,
fake_kwargs).AndReturn(instance2)
self.mox.ReplayAll() self.mox.ReplayAll()
self.driver.schedule_run_instance(context_fake, request_spec, self.driver.schedule_run_instance(context_fake, request_spec, None,
**fake_kwargs) **fake_kwargs)
def test_schedule_happy_day(self): def test_schedule_happy_day(self):

View File

@ -68,14 +68,16 @@ class SchedulerRpcAPITestCase(test.TestCase):
topic='fake_topic', request_spec='fake_request_spec', topic='fake_topic', request_spec='fake_request_spec',
admin_password='pw', injected_files='fake_injected_files', admin_password='pw', injected_files='fake_injected_files',
requested_networks='fake_requested_networks', requested_networks='fake_requested_networks',
is_first_time=True, filter_properties='fake_filter_properties') is_first_time=True, filter_properties='fake_filter_properties',
reservations=None)
def test_run_instance_cast(self): def test_run_instance_cast(self):
self._test_scheduler_api('run_instance', rpc_method='cast', self._test_scheduler_api('run_instance', rpc_method='cast',
topic='fake_topic', request_spec='fake_request_spec', topic='fake_topic', request_spec='fake_request_spec',
admin_password='pw', injected_files='fake_injected_files', admin_password='pw', injected_files='fake_injected_files',
requested_networks='fake_requested_networks', requested_networks='fake_requested_networks',
is_first_time=True, filter_properties='fake_filter_properties') is_first_time=True, filter_properties='fake_filter_properties',
reservations=None)
def test_prep_resize(self): def test_prep_resize(self):
self._test_scheduler_api('prep_resize', rpc_method='cast', self._test_scheduler_api('prep_resize', rpc_method='cast',

View File

@ -389,10 +389,10 @@ class SchedulerTestCase(test.TestCase):
self.driver.compute_api.create_db_entry_for_new_instance( self.driver.compute_api.create_db_entry_for_new_instance(
self.context, instance_type, image, base_options, self.context, instance_type, image, base_options,
security_group, security_group,
block_device_mapping).AndReturn(fake_instance) block_device_mapping, None).AndReturn(fake_instance)
self.mox.ReplayAll() self.mox.ReplayAll()
instance = self.driver.create_instance_db_entry(self.context, instance = self.driver.create_instance_db_entry(self.context,
request_spec) request_spec, None)
self.mox.VerifyAll() self.mox.VerifyAll()
self.assertEqual(instance, fake_instance) self.assertEqual(instance, fake_instance)
@ -407,7 +407,7 @@ class SchedulerTestCase(test.TestCase):
self.mox.ReplayAll() self.mox.ReplayAll()
instance = self.driver.create_instance_db_entry(self.context, instance = self.driver.create_instance_db_entry(self.context,
request_spec) request_spec, None)
self.assertEqual(instance, fake_instance) self.assertEqual(instance, fake_instance)
def _live_migration_instance(self): def _live_migration_instance(self):

View File

@ -297,7 +297,8 @@ class OldQuotaTestCase(test.TestCase):
scheduler = scheduler_driver.Scheduler scheduler = scheduler_driver.Scheduler
instance = scheduler().create_instance_db_entry( instance = scheduler().create_instance_db_entry(
context, context,
msg['args']['request_spec']) msg['args']['request_spec'],
None)
return [scheduler_driver.encode_instance(instance)] return [scheduler_driver.encode_instance(instance)]
else: else:
return orig_rpc_call(context, topic, msg) return orig_rpc_call(context, topic, msg)
@ -1840,7 +1841,10 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
def sync(context, project_id, session): def sync(context, project_id, session):
self.sync_called.add(res_name) self.sync_called.add(res_name)
if res_name in self.usages: if res_name in self.usages:
return {res_name: self.usages[res_name].in_use - 1} if self.usages[res_name].in_use < 0:
return {res_name: 2}
else:
return {res_name: self.usages[res_name].in_use - 1}
return {res_name: 0} return {res_name: 0}
return sync return sync
@ -2008,6 +2012,57 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
delta=2 * 1024), delta=2 * 1024),
]) ])
def test_quota_reserve_negative_in_use(self):
self.init_usage('test_project', 'instances', -1, 0, until_refresh=1)
self.init_usage('test_project', 'cores', -1, 0, until_refresh=1)
self.init_usage('test_project', 'ram', -1, 0, until_refresh=1)
context = FakeContext('test_project', 'test_class')
quotas = dict(
instances=5,
cores=10,
ram=10 * 1024,
)
deltas = dict(
instances=2,
cores=4,
ram=2 * 1024,
)
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 5, 0)
self.assertEqual(self.sync_called, set(['instances', 'cores', 'ram']))
self.compare_usage(self.usages, [
dict(resource='instances',
project_id='test_project',
in_use=2,
reserved=2,
until_refresh=5),
dict(resource='cores',
project_id='test_project',
in_use=2,
reserved=4,
until_refresh=5),
dict(resource='ram',
project_id='test_project',
in_use=2,
reserved=2 * 1024,
until_refresh=5),
])
self.assertEqual(self.usages_created, {})
self.compare_reservation(result, [
dict(resource='instances',
usage_id=self.usages['instances'],
project_id='test_project',
delta=2),
dict(resource='cores',
usage_id=self.usages['cores'],
project_id='test_project',
delta=4),
dict(resource='ram',
usage_id=self.usages['ram'],
delta=2 * 1024),
])
def test_quota_reserve_until_refresh(self): def test_quota_reserve_until_refresh(self):
self.init_usage('test_project', 'instances', 3, 0, until_refresh=1) self.init_usage('test_project', 'instances', 3, 0, until_refresh=1)
self.init_usage('test_project', 'cores', 3, 0, until_refresh=1) self.init_usage('test_project', 'cores', 3, 0, until_refresh=1)
@ -2181,10 +2236,8 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
cores=-4, cores=-4,
ram=-2 * 1024, ram=-2 * 1024,
) )
self.assertRaises(exception.InvalidQuotaValue, result = sqa_api.quota_reserve(context, self.resources, quotas,
sqa_api.quota_reserve, deltas, self.expire, 0, 0)
context, self.resources, quotas,
deltas, self.expire, 0, 0)
self.assertEqual(self.sync_called, set([])) self.assertEqual(self.sync_called, set([]))
self.compare_usage(self.usages, [ self.compare_usage(self.usages, [
@ -2205,7 +2258,19 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
until_refresh=None), until_refresh=None),
]) ])
self.assertEqual(self.usages_created, {}) self.assertEqual(self.usages_created, {})
self.assertEqual(self.reservations_created, {}) self.compare_reservation(result, [
dict(resource='instances',
usage_id=self.usages['instances'],
project_id='test_project',
delta=-2),
dict(resource='cores',
usage_id=self.usages['cores'],
project_id='test_project',
delta=-4),
dict(resource='ram',
usage_id=self.usages['ram'],
delta=-2 * 1024),
])
def test_quota_reserve_overs(self): def test_quota_reserve_overs(self):
self.init_usage('test_project', 'instances', 4, 0) self.init_usage('test_project', 'instances', 4, 0)

View File

@ -32,10 +32,12 @@ from nova import log as logging
from nova.notifier import test_notifier from nova.notifier import test_notifier
from nova.openstack.common import importutils from nova.openstack.common import importutils
import nova.policy import nova.policy
from nova import quota
from nova import rpc from nova import rpc
from nova import test from nova import test
import nova.volume.api import nova.volume.api
QUOTAS = quota.QUOTAS
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -90,6 +92,20 @@ class VolumeTestCase(test.TestCase):
def test_create_delete_volume(self): def test_create_delete_volume(self):
"""Test volume can be created and deleted.""" """Test volume can be created and deleted."""
# Need to stub out reserve, commit, and rollback
def fake_reserve(context, expire=None, **deltas):
return ["RESERVATION"]
def fake_commit(context, reservations):
pass
def fake_rollback(context, reservations):
pass
self.stubs.Set(QUOTAS, "reserve", fake_reserve)
self.stubs.Set(QUOTAS, "commit", fake_commit)
self.stubs.Set(QUOTAS, "rollback", fake_rollback)
volume = self._create_volume() volume = self._create_volume()
volume_id = volume['id'] volume_id = volume['id']
self.assertEquals(len(test_notifier.NOTIFICATIONS), 0) self.assertEquals(len(test_notifier.NOTIFICATIONS), 0)

View File

@ -38,6 +38,8 @@ flags.DECLARE('storage_availability_zone', 'nova.volume.manager')
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
def wrap_check_policy(func): def wrap_check_policy(func):
"""Check policy corresponding to the wrapped methods prior to execution """Check policy corresponding to the wrapped methods prior to execution
@ -80,7 +82,9 @@ class API(base.Base):
else: else:
snapshot_id = None snapshot_id = None
if quota.allowed_volumes(context, 1, size) < 1: try:
reservations = QUOTAS.reserve(context, volumes=1, gigabytes=size)
except exception.OverQuota:
pid = context.project_id pid = context.project_id
LOG.warn(_("Quota exceeded for %(pid)s, tried to create" LOG.warn(_("Quota exceeded for %(pid)s, tried to create"
" %(size)sG volume") % locals()) " %(size)sG volume") % locals())
@ -114,7 +118,8 @@ class API(base.Base):
{"method": "create_volume", {"method": "create_volume",
"args": {"topic": FLAGS.volume_topic, "args": {"topic": FLAGS.volume_topic,
"volume_id": volume['id'], "volume_id": volume['id'],
"snapshot_id": snapshot_id}}) "snapshot_id": snapshot_id,
"reservations": reservations}})
return volume return volume
# TODO(yamahata): eliminate dumb polling # TODO(yamahata): eliminate dumb polling

View File

@ -46,6 +46,7 @@ from nova import manager
from nova.openstack.common import cfg from nova.openstack.common import cfg
from nova.openstack.common import excutils from nova.openstack.common import excutils
from nova.openstack.common import importutils from nova.openstack.common import importutils
from nova import quota
from nova import rpc from nova import rpc
from nova import utils from nova import utils
from nova.volume import utils as volume_utils from nova.volume import utils as volume_utils
@ -54,6 +55,8 @@ from nova.volume import volume_types
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
volume_manager_opts = [ volume_manager_opts = [
cfg.StrOpt('storage_availability_zone', cfg.StrOpt('storage_availability_zone',
default='nova', default='nova',
@ -103,7 +106,8 @@ class VolumeManager(manager.SchedulerDependentManager):
else: else:
LOG.info(_("volume %s: skipping export"), volume['name']) LOG.info(_("volume %s: skipping export"), volume['name'])
def create_volume(self, context, volume_id, snapshot_id=None): def create_volume(self, context, volume_id, snapshot_id=None,
reservations=None):
"""Creates and exports the volume.""" """Creates and exports the volume."""
context = context.elevated() context = context.elevated()
volume_ref = self.db.volume_get(context, volume_id) volume_ref = self.db.volume_get(context, volume_id)
@ -136,8 +140,14 @@ class VolumeManager(manager.SchedulerDependentManager):
model_update = self.driver.create_export(context, volume_ref) model_update = self.driver.create_export(context, volume_ref)
if model_update: if model_update:
self.db.volume_update(context, volume_ref['id'], model_update) self.db.volume_update(context, volume_ref['id'], model_update)
# Commit the reservation
if reservations:
QUOTAS.commit(context, reservations)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
if reservations:
QUOTAS.rollback(context, reservations)
self.db.volume_update(context, self.db.volume_update(context,
volume_ref['id'], {'status': 'error'}) volume_ref['id'], {'status': 'error'})
@ -179,9 +189,22 @@ class VolumeManager(manager.SchedulerDependentManager):
volume_ref['id'], volume_ref['id'],
{'status': 'error_deleting'}) {'status': 'error_deleting'})
# Get reservations
try:
reservations = QUOTAS.reserve(context, volumes=-1,
gigabytes=-volume_ref['size'])
except Exception:
reservations = None
LOG.exception(_("Failed to update usages deleting volume"))
self.db.volume_destroy(context, volume_id) self.db.volume_destroy(context, volume_id)
LOG.debug(_("volume %s: deleted successfully"), volume_ref['name']) LOG.debug(_("volume %s: deleted successfully"), volume_ref['name'])
self._notify_about_volume_usage(context, volume_ref, "delete.end") self._notify_about_volume_usage(context, volume_ref, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations)
return True return True
def create_snapshot(self, context, volume_id, snapshot_id): def create_snapshot(self, context, volume_id, snapshot_id):