Count instances to check quota

This changes instances, cores, and ram from ReservableResources to
CountableResources and replaces quota reserve/commit/rollback with
check_deltas accordingly.

All of the reservation and usage related unit tests are removed because:

  1. They rely on some global QuotaEngine resources being
     ReservableResources and every ReservableResource has been removed.
  2. Reservations and usages are no longer in use anywhere in the
     codebase.

Part of blueprint cells-count-resources-to-check-quota-in-api

Change-Id: I9269ffa2b80e48db96c622d0dc0817738854f602
This commit is contained in:
melanie witt 2017-03-29 19:47:53 +00:00
parent 430ec6504b
commit 5c90b25e49
22 changed files with 1211 additions and 2096 deletions

View File

@ -315,111 +315,6 @@ class API(base.Base):
else: else:
raise exception.OnsetFileContentLimitExceeded() raise exception.OnsetFileContentLimitExceeded()
def _get_headroom(self, quotas, usages, deltas):
headroom = {res: quotas[res] -
(usages[res]['in_use'] + usages[res]['reserved'])
for res in quotas.keys()}
# If quota_cores is unlimited [-1]:
# - set cores headroom based on instances headroom:
if quotas.get('cores') == -1:
if deltas.get('cores'):
hc = headroom.get('instances', 1) * deltas['cores']
headroom['cores'] = hc / deltas.get('instances', 1)
else:
headroom['cores'] = headroom.get('instances', 1)
# If quota_ram is unlimited [-1]:
# - set ram headroom based on instances headroom:
if quotas.get('ram') == -1:
if deltas.get('ram'):
hr = headroom.get('instances', 1) * deltas['ram']
headroom['ram'] = hr / deltas.get('instances', 1)
else:
headroom['ram'] = headroom.get('instances', 1)
return headroom
def _check_num_instances_quota(self, context, instance_type, min_count,
max_count, project_id=None, user_id=None):
"""Enforce quota limits on number of instances created."""
# Determine requested cores and ram
req_cores = max_count * instance_type['vcpus']
req_ram = max_count * instance_type['memory_mb']
# Check the quota
try:
quotas = objects.Quotas(context=context)
quotas.reserve(instances=max_count,
cores=req_cores, ram=req_ram,
project_id=project_id, user_id=user_id)
except exception.OverQuota as exc:
# OK, we exceeded quota; let's figure out why...
quotas = exc.kwargs['quotas']
overs = exc.kwargs['overs']
usages = exc.kwargs['usages']
deltas = {'instances': max_count,
'cores': req_cores, 'ram': req_ram}
headroom = self._get_headroom(quotas, usages, deltas)
allowed = headroom.get('instances', 1)
# Reduce 'allowed' instances in line with the cores & ram headroom
if instance_type['vcpus']:
allowed = min(allowed,
headroom['cores'] // instance_type['vcpus'])
if instance_type['memory_mb']:
allowed = min(allowed,
headroom['ram'] // instance_type['memory_mb'])
# Convert to the appropriate exception message
if allowed <= 0:
msg = _("Cannot run any more instances of this type.")
elif min_count <= allowed <= max_count:
# We're actually OK, but still need reservations
return self._check_num_instances_quota(context, instance_type,
min_count, allowed)
else:
msg = (_("Can only run %s more instances of this type.") %
allowed)
num_instances = (str(min_count) if min_count == max_count else
"%s-%s" % (min_count, max_count))
requested = dict(instances=num_instances, cores=req_cores,
ram=req_ram)
(overs, reqs, total_alloweds, useds) = self._get_over_quota_detail(
headroom, overs, quotas, requested)
params = {'overs': overs, 'pid': context.project_id,
'min_count': min_count, 'max_count': max_count,
'msg': msg}
if min_count == max_count:
LOG.debug(("%(overs)s quota exceeded for %(pid)s,"
" tried to run %(min_count)d instances. "
"%(msg)s"), params)
else:
LOG.debug(("%(overs)s quota exceeded for %(pid)s,"
" tried to run between %(min_count)d and"
" %(max_count)d instances. %(msg)s"),
params)
raise exception.TooManyInstances(overs=overs,
req=reqs,
used=useds,
allowed=total_alloweds)
return max_count, quotas
def _get_over_quota_detail(self, headroom, overs, quotas, requested):
reqs = []
useds = []
total_alloweds = []
for resource in overs:
reqs.append(str(requested[resource]))
useds.append(str(quotas[resource] - headroom[resource]))
total_alloweds.append(str(quotas[resource]))
(overs, reqs, useds, total_alloweds) = map(', '.join, (
overs, reqs, useds, total_alloweds))
return overs, reqs, total_alloweds, useds
def _check_metadata_properties_quota(self, context, metadata=None): def _check_metadata_properties_quota(self, context, metadata=None):
"""Enforce quota limits on metadata properties.""" """Enforce quota limits on metadata properties."""
if not metadata: if not metadata:
@ -987,8 +882,8 @@ class API(base.Base):
block_device_mapping, shutdown_terminate, block_device_mapping, shutdown_terminate,
instance_group, check_server_group_quota, filter_properties, instance_group, check_server_group_quota, filter_properties,
key_pair, tags): key_pair, tags):
# Reserve quotas # Check quotas
num_instances, quotas = self._check_num_instances_quota( num_instances = compute_utils.check_num_instances_quota(
context, instance_type, min_count, max_count) context, instance_type, min_count, max_count)
security_groups = self.security_group_api.populate_security_groups( security_groups = self.security_group_api.populate_security_groups(
security_groups) security_groups)
@ -1086,29 +981,10 @@ class API(base.Base):
# instance # instance
instance_group.members.extend(members) instance_group.members.extend(members)
# In the case of any exceptions, attempt DB cleanup and rollback the # In the case of any exceptions, attempt DB cleanup
# quota reservations.
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
try: self._cleanup_build_artifacts(None, instances_to_build)
for rs, br, im in instances_to_build:
try:
rs.destroy()
except exception.RequestSpecNotFound:
pass
try:
im.destroy()
except exception.InstanceMappingNotFound:
pass
try:
br.destroy()
except exception.BuildRequestNotFound:
pass
finally:
quotas.rollback()
# Commit the reservations
quotas.commit()
return instances_to_build return instances_to_build
@ -1263,6 +1139,24 @@ class API(base.Base):
# we stop supporting v1. # we stop supporting v1.
for instance in instances: for instance in instances:
instance.create() instance.create()
# NOTE(melwitt): We recheck the quota after creating the objects
# to prevent users from allocating more resources than their
# allowed quota in the event of a race. This is configurable
# because it can be expensive if strict quota limits are not
# required in a deployment.
if CONF.quota.recheck_quota:
try:
compute_utils.check_num_instances_quota(
context, instance_type, 0, 0,
orig_num_req=len(instances))
except exception.TooManyInstances:
with excutils.save_and_reraise_exception():
# Need to clean up all the instances we created
# along with the build requests, request specs,
# and instance mappings.
self._cleanup_build_artifacts(instances,
instances_to_build)
self.compute_task_api.build_instances(context, self.compute_task_api.build_instances(context,
instances=instances, image=boot_meta, instances=instances, image=boot_meta,
filter_properties=filter_properties, filter_properties=filter_properties,
@ -1286,6 +1180,31 @@ class API(base.Base):
return (instances, reservation_id) return (instances, reservation_id)
@staticmethod
def _cleanup_build_artifacts(instances, instances_to_build):
# instances_to_build is a list of tuples:
# (RequestSpec, BuildRequest, InstanceMapping)
# Be paranoid about artifacts being deleted underneath us.
for instance in instances or []:
try:
instance.destroy()
except exception.InstanceNotFound:
pass
for rs, build_request, im in instances_to_build or []:
try:
rs.destroy()
except exception.RequestSpecNotFound:
pass
try:
build_request.destroy()
except exception.BuildRequestNotFound:
pass
try:
im.destroy()
except exception.InstanceMappingNotFound:
pass
@staticmethod @staticmethod
def _volume_size(instance_type, bdm): def _volume_size(instance_type, bdm):
size = bdm.get('volume_size') size = bdm.get('volume_size')
@ -1815,26 +1734,17 @@ class API(base.Base):
# buildrequest indicates that the build process should be halted by # buildrequest indicates that the build process should be halted by
# the conductor. # the conductor.
# Since conductor has halted the build process no cleanup of the # NOTE(alaski): Though the conductor halts the build process it
# instance is necessary, but quotas must still be decremented. # does not currently delete the instance record. This is
project_id, user_id = quotas_obj.ids_from_instance( # because in the near future the instance record will not be
context, instance) # created if the buildrequest has been deleted here. For now we
# This is confusing but actually decrements quota. # ensure the instance has been set to deleted at this point.
quotas = self._create_reservations(context, # Yes this directly contradicts the comment earlier in this
instance, # method, but this is a temporary measure.
instance.task_state, # Look up the instance because the current instance object was
project_id, user_id) # stashed on the buildrequest and therefore not complete enough
# to run .destroy().
try: try:
# NOTE(alaski): Though the conductor halts the build process it
# does not currently delete the instance record. This is
# because in the near future the instance record will not be
# created if the buildrequest has been deleted here. For now we
# ensure the instance has been set to deleted at this point.
# Yes this directly contradicts the comment earlier in this
# method, but this is a temporary measure.
# Look up the instance because the current instance object was
# stashed on the buildrequest and therefore not complete enough
# to run .destroy().
instance_uuid = instance.uuid instance_uuid = instance.uuid
cell, instance = self._lookup_instance(context, instance_uuid) cell, instance = self._lookup_instance(context, instance_uuid)
if instance is not None: if instance is not None:
@ -1848,13 +1758,8 @@ class API(base.Base):
instance.destroy() instance.destroy()
else: else:
instance.destroy() instance.destroy()
quotas.commit()
else:
# The instance is already deleted so rollback the quota
# usage decrement reservation in the not found block below.
raise exception.InstanceNotFound(instance_id=instance_uuid)
except exception.InstanceNotFound: except exception.InstanceNotFound:
quotas.rollback() pass
return True return True
return False return False
@ -1898,41 +1803,13 @@ class API(base.Base):
# acceptable to skip the rest of the delete processing. # acceptable to skip the rest of the delete processing.
cell, instance = self._lookup_instance(context, instance.uuid) cell, instance = self._lookup_instance(context, instance.uuid)
if cell and instance: if cell and instance:
# Conductor may have buried the instance in cell0 but
# quotas must still be decremented in the main cell DB.
project_id, user_id = quotas_obj.ids_from_instance(
context, instance)
# TODO(mriedem): This is a hack until we have quotas in the
# API database. When we looked up the instance in
# _get_instance if the instance has a mapping then the
# context is modified to set the target cell permanently.
# However, if the instance is in cell0 then the context
# is targeting cell0 and the quotas would be decremented
# from cell0 and we actually need them decremented from
# the cell database. So we temporarily untarget the
# context while we do the quota stuff and re-target after
# we're done.
# We have to get the flavor from the instance while the
# context is still targeted to where the instance lives.
quota_flavor = self._get_flavor_for_reservation(instance)
with nova_context.target_cell(context, None) as cctxt:
# This is confusing but actually decrements quota usage
quotas = self._create_reservations(
cctxt, instance, instance.task_state,
project_id, user_id, flavor=quota_flavor)
try: try:
# Now destroy the instance from the cell it lives in. # Now destroy the instance from the cell it lives in.
with compute_utils.notify_about_instance_delete( with compute_utils.notify_about_instance_delete(
self.notifier, context, instance): self.notifier, context, instance):
instance.destroy() instance.destroy()
# Now commit the quota reservation to decrement usage.
quotas.commit()
except exception.InstanceNotFound: except exception.InstanceNotFound:
quotas.rollback() pass
# The instance was deleted or is already gone. # The instance was deleted or is already gone.
return return
if not instance: if not instance:
@ -1954,8 +1831,6 @@ class API(base.Base):
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid) context, instance.uuid)
project_id, user_id = quotas_obj.ids_from_instance(context, instance)
# At these states an instance has a snapshot associate. # At these states an instance has a snapshot associate.
if instance.vm_state in (vm_states.SHELVED, if instance.vm_state in (vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED): vm_states.SHELVED_OFFLOADED):
@ -1976,21 +1851,12 @@ class API(base.Base):
instance=instance) instance=instance)
original_task_state = instance.task_state original_task_state = instance.task_state
quotas = None
try: try:
# NOTE(maoy): no expected_task_state needs to be set # NOTE(maoy): no expected_task_state needs to be set
instance.update(instance_attrs) instance.update(instance_attrs)
instance.progress = 0 instance.progress = 0
instance.save() instance.save()
# NOTE(comstud): If we delete the instance locally, we'll
# commit the reservations here. Otherwise, the manager side
# will commit or rollback the reservations based on success.
quotas = self._create_reservations(context,
instance,
original_task_state,
project_id, user_id)
# NOTE(dtp): cells.enable = False means "use cells v2". # NOTE(dtp): cells.enable = False means "use cells v2".
# Run everywhere except v1 compute cells. # Run everywhere except v1 compute cells.
if not CONF.cells.enable or self.cell_type == 'api': if not CONF.cells.enable or self.cell_type == 'api':
@ -2000,11 +1866,8 @@ class API(base.Base):
if self.cell_type == 'api': if self.cell_type == 'api':
# NOTE(comstud): If we're in the API cell, we need to # NOTE(comstud): If we're in the API cell, we need to
# skip all remaining logic and just call the callback, # skip all remaining logic and just call the callback,
# which will cause a cast to the child cell. Also, # which will cause a cast to the child cell.
# commit reservations here early until we have a better cb(context, instance, bdms)
# way to deal with quotas with cells.
cb(context, instance, bdms, reservations=None)
quotas.commit()
return return
shelved_offloaded = (instance.vm_state shelved_offloaded = (instance.vm_state
== vm_states.SHELVED_OFFLOADED) == vm_states.SHELVED_OFFLOADED)
@ -2018,7 +1881,6 @@ class API(base.Base):
self.notifier, context, instance, self.notifier, context, instance,
"%s.end" % delete_type, "%s.end" % delete_type,
system_metadata=instance.system_metadata) system_metadata=instance.system_metadata)
quotas.commit()
LOG.info('Instance deleted and does not have host ' LOG.info('Instance deleted and does not have host '
'field, its vm_state is %(state)s.', 'field, its vm_state is %(state)s.',
{'state': instance.vm_state}, {'state': instance.vm_state},
@ -2043,21 +1905,11 @@ class API(base.Base):
LOG.info('Instance is already in deleting state, ' LOG.info('Instance is already in deleting state, '
'ignoring this request', 'ignoring this request',
instance=instance) instance=instance)
quotas.rollback()
return return
self._record_action_start(context, instance, self._record_action_start(context, instance,
instance_actions.DELETE) instance_actions.DELETE)
# NOTE(snikitin): If instance's vm_state is 'soft-delete', cb(context, instance, bdms)
# we should not count reservations here, because instance
# in soft-delete vm_state have already had quotas
# decremented. More details:
# https://bugs.launchpad.net/nova/+bug/1333145
if instance.vm_state == vm_states.SOFT_DELETED:
quotas.rollback()
cb(context, instance, bdms,
reservations=quotas.reservations)
except exception.ComputeHostNotFound: except exception.ComputeHostNotFound:
pass pass
@ -2081,16 +1933,10 @@ class API(base.Base):
instance=instance) instance=instance)
with nova_context.target_cell(context, cell) as cctxt: with nova_context.target_cell(context, cell) as cctxt:
self._local_delete(cctxt, instance, bdms, delete_type, cb) self._local_delete(cctxt, instance, bdms, delete_type, cb)
quotas.commit()
except exception.InstanceNotFound: except exception.InstanceNotFound:
# NOTE(comstud): Race condition. Instance already gone. # NOTE(comstud): Race condition. Instance already gone.
if quotas: pass
quotas.rollback()
except Exception:
with excutils.save_and_reraise_exception():
if quotas:
quotas.rollback()
def _confirm_resize_on_deleting(self, context, instance): def _confirm_resize_on_deleting(self, context, instance):
# If in the middle of a resize, use confirm_resize to # If in the middle of a resize, use confirm_resize to
@ -2115,65 +1961,12 @@ class API(base.Base):
return return
src_host = migration.source_compute src_host = migration.source_compute
# Call since this can race with the terminate_instance.
# The resize is done but awaiting confirmation/reversion,
# so there are two cases:
# 1. up-resize: here -instance['vcpus'/'memory_mb'] match
# the quota usages accounted for this instance,
# so no further quota adjustment is needed
# 2. down-resize: here -instance['vcpus'/'memory_mb'] are
# shy by delta(old, new) from the quota usages accounted
# for this instance, so we must adjust
try:
deltas = compute_utils.downsize_quota_delta(context, instance)
except KeyError:
LOG.info('Migration %s may have been confirmed during delete',
migration.id, instance=instance)
return
quotas = compute_utils.reserve_quota_delta(context, deltas, instance)
self._record_action_start(context, instance, self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE) instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context, self.compute_rpcapi.confirm_resize(context,
instance, migration, instance, migration, src_host, cast=False)
src_host, quotas.reservations,
cast=False)
def _get_flavor_for_reservation(self, instance):
"""Returns the flavor needed for _create_reservations.
This is used when the context is targeted to a cell that is
different from the one that the instance lives in.
"""
if instance.task_state in (task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH):
return instance.old_flavor
return instance.flavor
def _create_reservations(self, context, instance, original_task_state,
project_id, user_id, flavor=None):
# NOTE(wangpan): if the instance is resizing, and the resources
# are updated to new instance type, we should use
# the old instance type to create reservation.
# see https://bugs.launchpad.net/nova/+bug/1099729 for more details
if original_task_state in (task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH):
old_flavor = flavor or instance.old_flavor
instance_vcpus = old_flavor.vcpus
instance_memory_mb = old_flavor.memory_mb
else:
flavor = flavor or instance.flavor
instance_vcpus = flavor.vcpus
instance_memory_mb = flavor.memory_mb
quotas = objects.Quotas(context=context)
quotas.reserve(project_id=project_id,
user_id=user_id,
instances=-1,
cores=-instance_vcpus,
ram=-instance_memory_mb)
return quotas
def _get_stashed_volume_connector(self, bdm, instance): def _get_stashed_volume_connector(self, bdm, instance):
"""Lookup a connector dict from the bdm.connection_info if set """Lookup a connector dict from the bdm.connection_info if set
@ -2277,8 +2070,7 @@ class API(base.Base):
self.notifier, context, instance, "%s.end" % delete_type, self.notifier, context, instance, "%s.end" % delete_type,
system_metadata=sys_meta) system_metadata=sys_meta)
def _do_delete(self, context, instance, bdms, reservations=None, def _do_delete(self, context, instance, bdms, local=False):
local=False):
if local: if local:
instance.vm_state = vm_states.DELETED instance.vm_state = vm_states.DELETED
instance.task_state = None instance.task_state = None
@ -2286,11 +2078,9 @@ class API(base.Base):
instance.save() instance.save()
else: else:
self.compute_rpcapi.terminate_instance(context, instance, bdms, self.compute_rpcapi.terminate_instance(context, instance, bdms,
reservations=reservations,
delete_type='delete') delete_type='delete')
def _do_force_delete(self, context, instance, bdms, reservations=None, def _do_force_delete(self, context, instance, bdms, local=False):
local=False):
if local: if local:
instance.vm_state = vm_states.DELETED instance.vm_state = vm_states.DELETED
instance.task_state = None instance.task_state = None
@ -2298,19 +2088,16 @@ class API(base.Base):
instance.save() instance.save()
else: else:
self.compute_rpcapi.terminate_instance(context, instance, bdms, self.compute_rpcapi.terminate_instance(context, instance, bdms,
reservations=reservations,
delete_type='force_delete') delete_type='force_delete')
def _do_soft_delete(self, context, instance, bdms, reservations=None, def _do_soft_delete(self, context, instance, bdms, local=False):
local=False):
if local: if local:
instance.vm_state = vm_states.SOFT_DELETED instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None instance.task_state = None
instance.terminated_at = timeutils.utcnow() instance.terminated_at = timeutils.utcnow()
instance.save() instance.save()
else: else:
self.compute_rpcapi.soft_delete_instance(context, instance, self.compute_rpcapi.soft_delete_instance(context, instance)
reservations=reservations)
# NOTE(maoy): we allow delete to be called no matter what vm_state says. # NOTE(maoy): we allow delete to be called no matter what vm_state says.
@check_instance_lock @check_instance_lock
@ -2343,31 +2130,28 @@ class API(base.Base):
@check_instance_state(vm_state=[vm_states.SOFT_DELETED]) @check_instance_state(vm_state=[vm_states.SOFT_DELETED])
def restore(self, context, instance): def restore(self, context, instance):
"""Restore a previously deleted (but not reclaimed) instance.""" """Restore a previously deleted (but not reclaimed) instance."""
# Reserve quotas # Check quotas
flavor = instance.get_flavor() flavor = instance.get_flavor()
project_id, user_id = quotas_obj.ids_from_instance(context, instance) project_id, user_id = quotas_obj.ids_from_instance(context, instance)
num_instances, quotas = self._check_num_instances_quota( compute_utils.check_num_instances_quota(context, flavor, 1, 1,
context, flavor, 1, 1,
project_id=project_id, user_id=user_id) project_id=project_id, user_id=user_id)
self._record_action_start(context, instance, instance_actions.RESTORE) self._record_action_start(context, instance, instance_actions.RESTORE)
try: if instance.host:
if instance.host: instance.task_state = task_states.RESTORING
instance.task_state = task_states.RESTORING instance.deleted_at = None
instance.deleted_at = None instance.save(expected_task_state=[None])
instance.save(expected_task_state=[None]) # TODO(melwitt): We're not rechecking for strict quota here to
self.compute_rpcapi.restore_instance(context, instance) # guard against going over quota during a race at this time because
else: # the resource consumption for this operation is written to the
instance.vm_state = vm_states.ACTIVE # database by compute.
instance.task_state = None self.compute_rpcapi.restore_instance(context, instance)
instance.deleted_at = None else:
instance.save(expected_task_state=[None]) instance.vm_state = vm_states.ACTIVE
instance.task_state = None
quotas.commit() instance.deleted_at = None
except Exception: instance.save(expected_task_state=[None])
with excutils.save_and_reraise_exception():
quotas.rollback()
@check_instance_lock @check_instance_lock
@check_instance_state(must_have_launched=False) @check_instance_state(must_have_launched=False)
@ -3210,6 +2994,40 @@ class API(base.Base):
request_spec=request_spec, request_spec=request_spec,
kwargs=kwargs) kwargs=kwargs)
@staticmethod
def _check_quota_for_upsize(context, instance, current_flavor, new_flavor):
project_id, user_id = quotas_obj.ids_from_instance(context,
instance)
# Deltas will be empty if the resize is not an upsize.
deltas = compute_utils.upsize_quota_delta(context, new_flavor,
current_flavor)
if deltas:
try:
res_deltas = {'cores': deltas.get('cores', 0),
'ram': deltas.get('ram', 0)}
objects.Quotas.check_deltas(context, res_deltas,
project_id, user_id=user_id,
check_project_id=project_id,
check_user_id=user_id)
except exception.OverQuota as exc:
quotas = exc.kwargs['quotas']
overs = exc.kwargs['overs']
usages = exc.kwargs['usages']
headroom = compute_utils.get_headroom(quotas, usages,
deltas)
(overs, reqs, total_alloweds,
useds) = compute_utils.get_over_quota_detail(headroom,
overs,
quotas,
deltas)
LOG.warning("%(overs)s quota exceeded for %(pid)s,"
" tried to resize instance.",
{'overs': overs, 'pid': context.project_id})
raise exception.TooManyInstances(overs=overs,
req=reqs,
used=useds,
allowed=total_alloweds)
@check_instance_lock @check_instance_lock
@check_instance_cell @check_instance_cell
@check_instance_state(vm_state=[vm_states.RESIZED]) @check_instance_state(vm_state=[vm_states.RESIZED])
@ -3219,31 +3037,26 @@ class API(base.Base):
migration = objects.Migration.get_by_instance_and_status( migration = objects.Migration.get_by_instance_and_status(
elevated, instance.uuid, 'finished') elevated, instance.uuid, 'finished')
# reverse quota reservation for increased resource usage # If this is a resize down, a revert might go over quota.
deltas = compute_utils.reverse_upsize_quota_delta(context, instance) self._check_quota_for_upsize(context, instance, instance.flavor,
quotas = compute_utils.reserve_quota_delta(context, deltas, instance) instance.old_flavor)
instance.task_state = task_states.RESIZE_REVERTING instance.task_state = task_states.RESIZE_REVERTING
try: instance.save(expected_task_state=[None])
instance.save(expected_task_state=[None])
except Exception:
with excutils.save_and_reraise_exception():
quotas.rollback()
migration.status = 'reverting' migration.status = 'reverting'
migration.save() migration.save()
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable:
quotas.commit()
self._record_action_start(context, instance, self._record_action_start(context, instance,
instance_actions.REVERT_RESIZE) instance_actions.REVERT_RESIZE)
# TODO(melwitt): We're not rechecking for strict quota here to guard
# against going over quota during a race at this time because the
# resource consumption for this operation is written to the database
# by compute.
self.compute_rpcapi.revert_resize(context, instance, self.compute_rpcapi.revert_resize(context, instance,
migration, migration,
migration.dest_compute, migration.dest_compute)
quotas.reservations or [])
@check_instance_lock @check_instance_lock
@check_instance_cell @check_instance_cell
@ -3251,20 +3064,17 @@ class API(base.Base):
def confirm_resize(self, context, instance, migration=None): def confirm_resize(self, context, instance, migration=None):
"""Confirms a migration/resize and deletes the 'old' instance.""" """Confirms a migration/resize and deletes the 'old' instance."""
elevated = context.elevated() elevated = context.elevated()
# NOTE(melwitt): We're not checking quota here because there isn't a
# change in resource usage when confirming a resize. Resource
# consumption for resizes are written to the database by compute, so
# a confirm resize is just a clean up of the migration objects and a
# state change in compute.
if migration is None: if migration is None:
migration = objects.Migration.get_by_instance_and_status( migration = objects.Migration.get_by_instance_and_status(
elevated, instance.uuid, 'finished') elevated, instance.uuid, 'finished')
# reserve quota only for any decrease in resource usage
deltas = compute_utils.downsize_quota_delta(context, instance)
quotas = compute_utils.reserve_quota_delta(context, deltas, instance)
migration.status = 'confirming' migration.status = 'confirming'
migration.save() migration.save()
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable:
quotas.commit()
self._record_action_start(context, instance, self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE) instance_actions.CONFIRM_RESIZE)
@ -3272,19 +3082,15 @@ class API(base.Base):
self.compute_rpcapi.confirm_resize(context, self.compute_rpcapi.confirm_resize(context,
instance, instance,
migration, migration,
migration.source_compute, migration.source_compute)
quotas.reservations or [])
@staticmethod @staticmethod
def _resize_cells_support(context, quotas, instance, def _resize_cells_support(context, instance,
current_instance_type, new_instance_type): current_instance_type, new_instance_type):
"""Special API cell logic for resize.""" """Special API cell logic for resize."""
# With cells, the best we can do right now is commit the
# reservations immediately...
quotas.commit()
# NOTE(johannes/comstud): The API cell needs a local migration # NOTE(johannes/comstud): The API cell needs a local migration
# record for later resize_confirm and resize_reverts to deal # record for later resize_confirm and resize_reverts.
# with quotas. We don't need source and/or destination # We don't need source and/or destination
# information, just the old and new flavors. Status is set to # information, just the old and new flavors. Status is set to
# 'finished' since nothing else will update the status along # 'finished' since nothing else will update the status along
# the way. # the way.
@ -3352,29 +3158,9 @@ class API(base.Base):
# ensure there is sufficient headroom for upsizes # ensure there is sufficient headroom for upsizes
if flavor_id: if flavor_id:
deltas = compute_utils.upsize_quota_delta(context, self._check_quota_for_upsize(context, instance,
new_instance_type, current_instance_type,
current_instance_type) new_instance_type)
try:
quotas = compute_utils.reserve_quota_delta(context, deltas,
instance)
except exception.OverQuota as exc:
quotas = exc.kwargs['quotas']
overs = exc.kwargs['overs']
usages = exc.kwargs['usages']
headroom = self._get_headroom(quotas, usages, deltas)
(overs, reqs, total_alloweds,
useds) = self._get_over_quota_detail(headroom, overs, quotas,
deltas)
LOG.warning("%(overs)s quota exceeded for %(pid)s,"
" tried to resize instance.",
{'overs': overs, 'pid': context.project_id})
raise exception.TooManyInstances(overs=overs,
req=reqs,
used=useds,
allowed=total_alloweds)
else:
quotas = objects.Quotas(context=context)
instance.task_state = task_states.RESIZE_PREP instance.task_state = task_states.RESIZE_PREP
instance.progress = 0 instance.progress = 0
@ -3387,8 +3173,8 @@ class API(base.Base):
filter_properties['ignore_hosts'].append(instance.host) filter_properties['ignore_hosts'].append(instance.host)
if self.cell_type == 'api': if self.cell_type == 'api':
# Commit reservations early and create migration record. # Create migration record.
self._resize_cells_support(context, quotas, instance, self._resize_cells_support(context, instance,
current_instance_type, current_instance_type,
new_instance_type) new_instance_type)
@ -3412,11 +3198,14 @@ class API(base.Base):
# to them, we need to support the old way # to them, we need to support the old way
request_spec = None request_spec = None
# TODO(melwitt): We're not rechecking for strict quota here to guard
# against going over quota during a race at this time because the
# resource consumption for this operation is written to the database
# by compute.
scheduler_hint = {'filter_properties': filter_properties} scheduler_hint = {'filter_properties': filter_properties}
self.compute_task_api.resize_instance(context, instance, self.compute_task_api.resize_instance(context, instance,
extra_instance_updates, scheduler_hint=scheduler_hint, extra_instance_updates, scheduler_hint=scheduler_hint,
flavor=new_instance_type, flavor=new_instance_type,
reservations=quotas.reservations or [],
clean_shutdown=clean_shutdown, clean_shutdown=clean_shutdown,
request_spec=request_spec) request_spec=request_spec)

View File

@ -697,23 +697,13 @@ class ComputeManager(manager.Manager):
instance.destroy() instance.destroy()
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid) context, instance.uuid)
quotas = objects.Quotas(context=context)
project_id, user_id = objects.quotas.ids_from_instance(context,
instance)
quotas.reserve(project_id=project_id, user_id=user_id, instances=-1,
cores=-instance.flavor.vcpus,
ram=-instance.flavor.memory_mb)
self._complete_deletion(context, self._complete_deletion(context,
instance, instance,
bdms, bdms,
quotas,
system_meta) system_meta)
def _complete_deletion(self, context, instance, bdms, def _complete_deletion(self, context, instance, bdms,
quotas, system_meta): system_meta):
if quotas:
quotas.commit()
# ensure block device mappings are not leaked # ensure block device mappings are not leaked
for bdm in bdms: for bdm in bdms:
bdm.destroy() bdm.destroy()
@ -726,18 +716,6 @@ class ComputeManager(manager.Manager):
phase=fields.NotificationPhase.END) phase=fields.NotificationPhase.END)
self._delete_scheduler_instance_info(context, instance.uuid) self._delete_scheduler_instance_info(context, instance.uuid)
def _create_reservations(self, context, instance, project_id, user_id):
vcpus = instance.flavor.vcpus
mem_mb = instance.flavor.memory_mb
quotas = objects.Quotas(context=context)
quotas.reserve(project_id=project_id,
user_id=user_id,
instances=-1,
cores=-vcpus,
ram=-mem_mb)
return quotas
def _init_instance(self, context, instance): def _init_instance(self, context, instance):
'''Initialize this instance during service init.''' '''Initialize this instance during service init.'''
@ -838,12 +816,7 @@ class ComputeManager(manager.Manager):
instance.obj_load_attr('system_metadata') instance.obj_load_attr('system_metadata')
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid) context, instance.uuid)
project_id, user_id = objects.quotas.ids_from_instance( self._delete_instance(context, instance, bdms)
context, instance)
quotas = self._create_reservations(context, instance,
project_id, user_id)
self._delete_instance(context, instance, bdms, quotas)
except Exception: except Exception:
# we don't want that an exception blocks the init_host # we don't want that an exception blocks the init_host
LOG.exception('Failed to complete a deletion', LOG.exception('Failed to complete a deletion',
@ -2351,78 +2324,62 @@ class ComputeManager(manager.Manager):
six.reraise(exc_info[0], exc_info[1], exc_info[2]) six.reraise(exc_info[0], exc_info[1], exc_info[2])
@hooks.add_hook("delete_instance") @hooks.add_hook("delete_instance")
def _delete_instance(self, context, instance, bdms, quotas): def _delete_instance(self, context, instance, bdms):
"""Delete an instance on this host. Commit or rollback quotas """Delete an instance on this host.
as necessary.
:param context: nova request context :param context: nova request context
:param instance: nova.objects.instance.Instance object :param instance: nova.objects.instance.Instance object
:param bdms: nova.objects.block_device.BlockDeviceMappingList object :param bdms: nova.objects.block_device.BlockDeviceMappingList object
:param quotas: nova.objects.quotas.Quotas object
""" """
was_soft_deleted = instance.vm_state == vm_states.SOFT_DELETED events = self.instance_events.clear_events_for_instance(instance)
if was_soft_deleted: if events:
# Instances in SOFT_DELETED vm_state have already had quotas LOG.debug('Events pending at deletion: %(events)s',
# decremented. {'events': ','.join(events.keys())},
try: instance=instance)
quotas.rollback() self._notify_about_instance_usage(context, instance,
except Exception: "delete.start")
pass compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.DELETE,
phase=fields.NotificationPhase.START)
try: self._shutdown_instance(context, instance, bdms)
events = self.instance_events.clear_events_for_instance(instance) # NOTE(dims): instance.info_cache.delete() should be called after
if events: # _shutdown_instance in the compute manager as shutdown calls
LOG.debug('Events pending at deletion: %(events)s', # deallocate_for_instance so the info_cache is still needed
{'events': ','.join(events.keys())}, # at this point.
instance=instance) if instance.info_cache is not None:
self._notify_about_instance_usage(context, instance, instance.info_cache.delete()
"delete.start") else:
compute_utils.notify_about_instance_action(context, instance, # NOTE(yoshimatsu): Avoid AttributeError if instance.info_cache
self.host, action=fields.NotificationAction.DELETE, # is None. When the root cause that instance.info_cache becomes
phase=fields.NotificationPhase.START) # None is fixed, the log level should be reconsidered.
LOG.warning("Info cache for instance could not be found. "
"Ignore.", instance=instance)
self._shutdown_instance(context, instance, bdms) # NOTE(vish): We have already deleted the instance, so we have
# NOTE(dims): instance.info_cache.delete() should be called after # to ignore problems cleaning up the volumes. It
# _shutdown_instance in the compute manager as shutdown calls # would be nice to let the user know somehow that
# deallocate_for_instance so the info_cache is still needed # the volume deletion failed, but it is not
# at this point. # acceptable to have an instance that can not be
if instance.info_cache is not None: # deleted. Perhaps this could be reworked in the
instance.info_cache.delete() # future to set an instance fault the first time
else: # and to only ignore the failure if the instance
# NOTE(yoshimatsu): Avoid AttributeError if instance.info_cache # is already in ERROR.
# is None. When the root cause that instance.info_cache becomes self._cleanup_volumes(context, instance.uuid, bdms,
# None is fixed, the log level should be reconsidered. raise_exc=False)
LOG.warning("Info cache for instance could not be found. " # if a delete task succeeded, always update vm state and task
"Ignore.", instance=instance) # state without expecting task state to be DELETING
instance.vm_state = vm_states.DELETED
# NOTE(vish): We have already deleted the instance, so we have instance.task_state = None
# to ignore problems cleaning up the volumes. It instance.power_state = power_state.NOSTATE
# would be nice to let the user know somehow that instance.terminated_at = timeutils.utcnow()
# the volume deletion failed, but it is not instance.save()
# acceptable to have an instance that can not be system_meta = instance.system_metadata
# deleted. Perhaps this could be reworked in the instance.destroy()
# future to set an instance fault the first time
# and to only ignore the failure if the instance
# is already in ERROR.
self._cleanup_volumes(context, instance.uuid, bdms,
raise_exc=False)
# if a delete task succeeded, always update vm state and task
# state without expecting task state to be DELETING
instance.vm_state = vm_states.DELETED
instance.task_state = None
instance.power_state = power_state.NOSTATE
instance.terminated_at = timeutils.utcnow()
instance.save()
system_meta = instance.system_metadata
instance.destroy()
except Exception:
with excutils.save_and_reraise_exception():
quotas.rollback()
self._complete_deletion(context, self._complete_deletion(context,
instance, instance,
bdms, bdms,
quotas,
system_meta) system_meta)
@wrap_exception() @wrap_exception()
@ -2431,10 +2388,6 @@ class ComputeManager(manager.Manager):
@wrap_instance_fault @wrap_instance_fault
def terminate_instance(self, context, instance, bdms, reservations): def terminate_instance(self, context, instance, bdms, reservations):
"""Terminate an instance on this host.""" """Terminate an instance on this host."""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
@utils.synchronized(instance.uuid) @utils.synchronized(instance.uuid)
def do_terminate_instance(instance, bdms): def do_terminate_instance(instance, bdms):
# NOTE(mriedem): If we are deleting the instance while it was # NOTE(mriedem): If we are deleting the instance while it was
@ -2454,7 +2407,7 @@ class ComputeManager(manager.Manager):
context, instance.uuid) context, instance.uuid)
break break
try: try:
self._delete_instance(context, instance, bdms, quotas) self._delete_instance(context, instance, bdms)
except exception.InstanceNotFound: except exception.InstanceNotFound:
LOG.info("Instance disappeared during terminate", LOG.info("Instance disappeared during terminate",
instance=instance) instance=instance)
@ -2607,30 +2560,21 @@ class ComputeManager(manager.Manager):
@wrap_instance_fault @wrap_instance_fault
def soft_delete_instance(self, context, instance, reservations): def soft_delete_instance(self, context, instance, reservations):
"""Soft delete an instance on this host.""" """Soft delete an instance on this host."""
self._notify_about_instance_usage(context, instance,
quotas = objects.Quotas.from_reservations(context, "soft_delete.start")
reservations, compute_utils.notify_about_instance_action(context, instance,
instance=instance) self.host, action=fields.NotificationAction.SOFT_DELETE,
phase=fields.NotificationPhase.START)
try: try:
self._notify_about_instance_usage(context, instance, self.driver.soft_delete(instance)
"soft_delete.start") except NotImplementedError:
compute_utils.notify_about_instance_action(context, instance, # Fallback to just powering off the instance if the
self.host, action=fields.NotificationAction.SOFT_DELETE, # hypervisor doesn't implement the soft_delete method
phase=fields.NotificationPhase.START) self.driver.power_off(instance)
try: instance.power_state = self._get_power_state(context, instance)
self.driver.soft_delete(instance) instance.vm_state = vm_states.SOFT_DELETED
except NotImplementedError: instance.task_state = None
# Fallback to just powering off the instance if the instance.save(expected_task_state=[task_states.SOFT_DELETING])
# hypervisor doesn't implement the soft_delete method
self.driver.power_off(instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
instance.save(expected_task_state=[task_states.SOFT_DELETING])
except Exception:
with excutils.save_and_reraise_exception():
quotas.rollback()
quotas.commit()
self._notify_about_instance_usage(context, instance, "soft_delete.end") self._notify_about_instance_usage(context, instance, "soft_delete.end")
compute_utils.notify_about_instance_action(context, instance, compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.SOFT_DELETE, self.host, action=fields.NotificationAction.SOFT_DELETE,
@ -3504,11 +3448,6 @@ class ComputeManager(manager.Manager):
@wrap_instance_event(prefix='compute') @wrap_instance_event(prefix='compute')
@wrap_instance_fault @wrap_instance_fault
def confirm_resize(self, context, instance, reservations, migration): def confirm_resize(self, context, instance, reservations, migration):
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
@utils.synchronized(instance.uuid) @utils.synchronized(instance.uuid)
def do_confirm_resize(context, instance, migration_id): def do_confirm_resize(context, instance, migration_id):
# NOTE(wangpan): Get the migration status from db, if it has been # NOTE(wangpan): Get the migration status from db, if it has been
@ -3523,20 +3462,17 @@ class ComputeManager(manager.Manager):
except exception.MigrationNotFound: except exception.MigrationNotFound:
LOG.error("Migration %s is not found during confirmation", LOG.error("Migration %s is not found during confirmation",
migration_id, instance=instance) migration_id, instance=instance)
quotas.rollback()
return return
if migration.status == 'confirmed': if migration.status == 'confirmed':
LOG.info("Migration %s is already confirmed", LOG.info("Migration %s is already confirmed",
migration_id, instance=instance) migration_id, instance=instance)
quotas.rollback()
return return
elif migration.status not in ('finished', 'confirming'): elif migration.status not in ('finished', 'confirming'):
LOG.warning("Unexpected confirmation status '%(status)s' " LOG.warning("Unexpected confirmation status '%(status)s' "
"of migration %(id)s, exit confirmation process", "of migration %(id)s, exit confirmation process",
{"status": migration.status, "id": migration_id}, {"status": migration.status, "id": migration_id},
instance=instance) instance=instance)
quotas.rollback()
return return
# NOTE(wangpan): Get the instance from db, if it has been # NOTE(wangpan): Get the instance from db, if it has been
@ -3549,22 +3485,18 @@ class ComputeManager(manager.Manager):
except exception.InstanceNotFound: except exception.InstanceNotFound:
LOG.info("Instance is not found during confirmation", LOG.info("Instance is not found during confirmation",
instance=instance) instance=instance)
quotas.rollback()
return return
self._confirm_resize(context, instance, quotas, self._confirm_resize(context, instance, migration=migration)
migration=migration)
do_confirm_resize(context, instance, migration.id) do_confirm_resize(context, instance, migration.id)
def _confirm_resize(self, context, instance, quotas, def _confirm_resize(self, context, instance, migration=None):
migration=None):
"""Destroys the source instance.""" """Destroys the source instance."""
self._notify_about_instance_usage(context, instance, self._notify_about_instance_usage(context, instance,
"resize.confirm.start") "resize.confirm.start")
with self._error_out_instance_on_exception(context, instance, with self._error_out_instance_on_exception(context, instance):
quotas=quotas):
# NOTE(danms): delete stashed migration information # NOTE(danms): delete stashed migration information
old_instance_type = instance.old_flavor old_instance_type = instance.old_flavor
instance.old_flavor = None instance.old_flavor = None
@ -3614,8 +3546,6 @@ class ComputeManager(manager.Manager):
context, instance, "resize.confirm.end", context, instance, "resize.confirm.end",
network_info=network_info) network_info=network_info)
quotas.commit()
@wrap_exception() @wrap_exception()
@reverts_task_state @reverts_task_state
@wrap_instance_event(prefix='compute') @wrap_instance_event(prefix='compute')
@ -3628,18 +3558,12 @@ class ComputeManager(manager.Manager):
source machine. source machine.
""" """
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
# NOTE(comstud): A revert_resize is essentially a resize back to # NOTE(comstud): A revert_resize is essentially a resize back to
# the old size, so we need to send a usage event here. # the old size, so we need to send a usage event here.
compute_utils.notify_usage_exists(self.notifier, context, instance, compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period=True) current_period=True)
with self._error_out_instance_on_exception(context, instance, with self._error_out_instance_on_exception(context, instance):
quotas=quotas):
# NOTE(tr3buchet): tear down networks on destination host # NOTE(tr3buchet): tear down networks on destination host
self.network_api.setup_networks_on_host(context, instance, self.network_api.setup_networks_on_host(context, instance,
teardown=True) teardown=True)
@ -3681,8 +3605,7 @@ class ComputeManager(manager.Manager):
rt.drop_move_claim(context, instance, instance.node) rt.drop_move_claim(context, instance, instance.node)
self.compute_rpcapi.finish_revert_resize(context, instance, self.compute_rpcapi.finish_revert_resize(context, instance,
migration, migration.source_compute, migration, migration.source_compute)
quotas.reservations)
@wrap_exception() @wrap_exception()
@reverts_task_state @reverts_task_state
@ -3696,13 +3619,7 @@ class ComputeManager(manager.Manager):
revert the resized attributes in the database. revert the resized attributes in the database.
""" """
with self._error_out_instance_on_exception(context, instance):
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
self._notify_about_instance_usage( self._notify_about_instance_usage(
context, instance, "resize.revert.start") context, instance, "resize.revert.start")
@ -3762,11 +3679,9 @@ class ComputeManager(manager.Manager):
self._notify_about_instance_usage( self._notify_about_instance_usage(
context, instance, "resize.revert.end") context, instance, "resize.revert.end")
quotas.commit()
def _prep_resize(self, context, image, instance, instance_type, def _prep_resize(self, context, image, instance, instance_type,
quotas, request_spec, filter_properties, node, request_spec, filter_properties, node, clean_shutdown=True):
clean_shutdown=True):
if not filter_properties: if not filter_properties:
filter_properties = {} filter_properties = {}
@ -3801,8 +3716,7 @@ class ComputeManager(manager.Manager):
LOG.info('Migrating', instance=instance) LOG.info('Migrating', instance=instance)
self.compute_rpcapi.resize_instance( self.compute_rpcapi.resize_instance(
context, instance, claim.migration, image, context, instance, claim.migration, image,
instance_type, quotas.reservations, instance_type, clean_shutdown)
clean_shutdown)
@wrap_exception() @wrap_exception()
@reverts_task_state @reverts_task_state
@ -3827,21 +3741,15 @@ class ComputeManager(manager.Manager):
if not isinstance(instance_type, objects.Flavor): if not isinstance(instance_type, objects.Flavor):
instance_type = objects.Flavor.get_by_id(context, instance_type = objects.Flavor.get_by_id(context,
instance_type['id']) instance_type['id'])
with self._error_out_instance_on_exception(context, instance):
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
compute_utils.notify_usage_exists(self.notifier, context, instance, compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period=True) current_period=True)
self._notify_about_instance_usage( self._notify_about_instance_usage(
context, instance, "resize.prep.start") context, instance, "resize.prep.start")
try: try:
self._prep_resize(context, image, instance, self._prep_resize(context, image, instance,
instance_type, quotas, instance_type, request_spec,
request_spec, filter_properties, filter_properties, node, clean_shutdown)
node, clean_shutdown)
# NOTE(dgenin): This is thrown in LibvirtDriver when the # NOTE(dgenin): This is thrown in LibvirtDriver when the
# instance to be migrated is backed by LVM. # instance to be migrated is backed by LVM.
# Remove when LVM migration is implemented. # Remove when LVM migration is implemented.
@ -3851,7 +3759,7 @@ class ComputeManager(manager.Manager):
# try to re-schedule the resize elsewhere: # try to re-schedule the resize elsewhere:
exc_info = sys.exc_info() exc_info = sys.exc_info()
self._reschedule_resize_or_reraise(context, image, instance, self._reschedule_resize_or_reraise(context, image, instance,
exc_info, instance_type, quotas, request_spec, exc_info, instance_type, request_spec,
filter_properties) filter_properties)
finally: finally:
extra_usage_info = dict( extra_usage_info = dict(
@ -3863,7 +3771,7 @@ class ComputeManager(manager.Manager):
extra_usage_info=extra_usage_info) extra_usage_info=extra_usage_info)
def _reschedule_resize_or_reraise(self, context, image, instance, exc_info, def _reschedule_resize_or_reraise(self, context, image, instance, exc_info,
instance_type, quotas, request_spec, filter_properties): instance_type, request_spec, filter_properties):
"""Try to re-schedule the resize or re-raise the original error to """Try to re-schedule the resize or re-raise the original error to
error out the instance. error out the instance.
""" """
@ -3878,8 +3786,7 @@ class ComputeManager(manager.Manager):
try: try:
reschedule_method = self.compute_task_api.resize_instance reschedule_method = self.compute_task_api.resize_instance
scheduler_hint = dict(filter_properties=filter_properties) scheduler_hint = dict(filter_properties=filter_properties)
method_args = (instance, None, scheduler_hint, instance_type, method_args = (instance, None, scheduler_hint, instance_type)
quotas.reservations)
task_state = task_states.RESIZE_PREP task_state = task_states.RESIZE_PREP
rescheduled = self._reschedule(context, request_spec, rescheduled = self._reschedule(context, request_spec,
@ -3914,12 +3821,7 @@ class ComputeManager(manager.Manager):
reservations, migration, instance_type, reservations, migration, instance_type,
clean_shutdown): clean_shutdown):
"""Starts the migration of a running instance to another host.""" """Starts the migration of a running instance to another host."""
with self._error_out_instance_on_exception(context, instance):
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
# TODO(chaochin) Remove this until v5 RPC API # TODO(chaochin) Remove this until v5 RPC API
# Code downstream may expect extra_specs to be populated since it # Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this. # is receiving an object, so lookup the flavor to ensure this.
@ -3975,8 +3877,7 @@ class ComputeManager(manager.Manager):
instance.save(expected_task_state=task_states.RESIZE_MIGRATING) instance.save(expected_task_state=task_states.RESIZE_MIGRATING)
self.compute_rpcapi.finish_resize(context, instance, self.compute_rpcapi.finish_resize(context, instance,
migration, image, disk_info, migration, image, disk_info, migration.dest_compute)
migration.dest_compute, reservations=quotas.reservations)
self._notify_about_instance_usage(context, instance, "resize.end", self._notify_about_instance_usage(context, instance, "resize.end",
network_info=network_info) network_info=network_info)
@ -4003,8 +3904,6 @@ class ComputeManager(manager.Manager):
@staticmethod @staticmethod
def _set_instance_info(instance, instance_type): def _set_instance_info(instance, instance_type):
instance.instance_type_id = instance_type.id instance.instance_type_id = instance_type.id
# NOTE(danms): These are purely for any legacy code that still
# looks at them.
instance.memory_mb = instance_type.memory_mb instance.memory_mb = instance_type.memory_mb
instance.vcpus = instance_type.vcpus instance.vcpus = instance_type.vcpus
instance.root_gb = instance_type.root_gb instance.root_gb = instance_type.root_gb
@ -4103,24 +4002,14 @@ class ComputeManager(manager.Manager):
new host machine. new host machine.
""" """
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
try: try:
image_meta = objects.ImageMeta.from_dict(image) image_meta = objects.ImageMeta.from_dict(image)
self._finish_resize(context, instance, migration, self._finish_resize(context, instance, migration,
disk_info, image_meta) disk_info, image_meta)
quotas.commit()
except Exception: except Exception:
LOG.exception('Setting instance vm_state to ERROR', LOG.exception('Setting instance vm_state to ERROR',
instance=instance) instance=instance)
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
try:
quotas.rollback()
except Exception:
LOG.exception("Failed to rollback quota for failed "
"finish_resize",
instance=instance)
self._set_instance_obj_error_state(context, instance) self._set_instance_obj_error_state(context, instance)
@wrap_exception() @wrap_exception()
@ -6651,14 +6540,6 @@ class ComputeManager(manager.Manager):
LOG.debug("CONF.reclaim_instance_interval <= 0, skipping...") LOG.debug("CONF.reclaim_instance_interval <= 0, skipping...")
return return
# TODO(comstud, jichenjc): Dummy quota object for now See bug 1296414.
# The only case that the quota might be inconsistent is
# the compute node died between set instance state to SOFT_DELETED
# and quota commit to DB. When compute node starts again
# it will have no idea the reservation is committed or not or even
# expired, since it's a rare case, so marked as todo.
quotas = objects.Quotas.from_reservations(context, None)
filters = {'vm_state': vm_states.SOFT_DELETED, filters = {'vm_state': vm_states.SOFT_DELETED,
'task_state': None, 'task_state': None,
'host': self.host} 'host': self.host}
@ -6672,7 +6553,7 @@ class ComputeManager(manager.Manager):
context, instance.uuid) context, instance.uuid)
LOG.info('Reclaiming deleted instance', instance=instance) LOG.info('Reclaiming deleted instance', instance=instance)
try: try:
self._delete_instance(context, instance, bdms, quotas) self._delete_instance(context, instance, bdms)
except Exception as e: except Exception as e:
LOG.warning("Periodic reclaim failed to delete " LOG.warning("Periodic reclaim failed to delete "
"instance: %s", "instance: %s",
@ -6848,15 +6729,12 @@ class ComputeManager(manager.Manager):
@contextlib.contextmanager @contextlib.contextmanager
def _error_out_instance_on_exception(self, context, instance, def _error_out_instance_on_exception(self, context, instance,
quotas=None,
instance_state=vm_states.ACTIVE): instance_state=vm_states.ACTIVE):
instance_uuid = instance.uuid instance_uuid = instance.uuid
try: try:
yield yield
except NotImplementedError as error: except NotImplementedError as error:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
if quotas:
quotas.rollback()
LOG.info("Setting instance back to %(state)s after: " LOG.info("Setting instance back to %(state)s after: "
"%(error)s", "%(error)s",
{'state': instance_state, 'error': error}, {'state': instance_state, 'error': error},
@ -6865,8 +6743,6 @@ class ComputeManager(manager.Manager):
vm_state=instance_state, vm_state=instance_state,
task_state=None) task_state=None)
except exception.InstanceFaultRollback as error: except exception.InstanceFaultRollback as error:
if quotas:
quotas.rollback()
LOG.info("Setting instance back to ACTIVE after: %s", LOG.info("Setting instance back to ACTIVE after: %s",
error, instance_uuid=instance_uuid) error, instance_uuid=instance_uuid)
self._instance_update(context, instance, self._instance_update(context, instance,
@ -6877,8 +6753,6 @@ class ComputeManager(manager.Manager):
LOG.exception('Setting instance vm_state to ERROR', LOG.exception('Setting instance vm_state to ERROR',
instance_uuid=instance_uuid) instance_uuid=instance_uuid)
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
if quotas:
quotas.rollback()
self._set_instance_obj_error_state(context, instance) self._set_instance_obj_error_state(context, instance)
@wrap_exception() @wrap_exception()

View File

@ -687,6 +687,132 @@ def reserve_quota_delta(context, deltas, instance):
return quotas return quotas
def get_headroom(quotas, usages, deltas):
headroom = {res: quotas[res] - usages[res]
for res in quotas.keys()}
# If quota_cores is unlimited [-1]:
# - set cores headroom based on instances headroom:
if quotas.get('cores') == -1:
if deltas.get('cores'):
hc = headroom.get('instances', 1) * deltas['cores']
headroom['cores'] = hc / deltas.get('instances', 1)
else:
headroom['cores'] = headroom.get('instances', 1)
# If quota_ram is unlimited [-1]:
# - set ram headroom based on instances headroom:
if quotas.get('ram') == -1:
if deltas.get('ram'):
hr = headroom.get('instances', 1) * deltas['ram']
headroom['ram'] = hr / deltas.get('instances', 1)
else:
headroom['ram'] = headroom.get('instances', 1)
return headroom
def check_num_instances_quota(context, instance_type, min_count,
max_count, project_id=None, user_id=None,
orig_num_req=None):
"""Enforce quota limits on number of instances created."""
# project_id is used for the TooManyInstances error message
if project_id is None:
project_id = context.project_id
# Determine requested cores and ram
req_cores = max_count * instance_type.vcpus
req_ram = max_count * instance_type.memory_mb
deltas = {'instances': max_count, 'cores': req_cores, 'ram': req_ram}
try:
objects.Quotas.check_deltas(context, deltas,
project_id, user_id=user_id,
check_project_id=project_id,
check_user_id=user_id)
except exception.OverQuota as exc:
quotas = exc.kwargs['quotas']
overs = exc.kwargs['overs']
usages = exc.kwargs['usages']
# This is for the recheck quota case where we used a delta of zero.
if min_count == max_count == 0:
# orig_num_req is the original number of instances requested in the
# case of a recheck quota, for use in the over quota exception.
req_cores = orig_num_req * instance_type.vcpus
req_ram = orig_num_req * instance_type.memory_mb
requested = {'instances': orig_num_req, 'cores': req_cores,
'ram': req_ram}
(overs, reqs, total_alloweds, useds) = get_over_quota_detail(
deltas, overs, quotas, requested)
msg = "Cannot run any more instances of this type."
params = {'overs': overs, 'pid': project_id, 'msg': msg}
LOG.debug("%(overs)s quota exceeded for %(pid)s. %(msg)s",
params)
raise exception.TooManyInstances(overs=overs,
req=reqs,
used=useds,
allowed=total_alloweds)
# OK, we exceeded quota; let's figure out why...
headroom = get_headroom(quotas, usages, deltas)
allowed = headroom.get('instances', 1)
# Reduce 'allowed' instances in line with the cores & ram headroom
if instance_type.vcpus:
allowed = min(allowed,
headroom['cores'] // instance_type.vcpus)
if instance_type.memory_mb:
allowed = min(allowed,
headroom['ram'] // instance_type.memory_mb)
# Convert to the appropriate exception message
if allowed <= 0:
msg = "Cannot run any more instances of this type."
elif min_count <= allowed <= max_count:
# We're actually OK, but still need to check against allowed
return check_num_instances_quota(context, instance_type, min_count,
allowed, project_id=project_id,
user_id=user_id)
else:
msg = "Can only run %s more instances of this type." % allowed
num_instances = (str(min_count) if min_count == max_count else
"%s-%s" % (min_count, max_count))
requested = dict(instances=num_instances, cores=req_cores,
ram=req_ram)
(overs, reqs, total_alloweds, useds) = get_over_quota_detail(
headroom, overs, quotas, requested)
params = {'overs': overs, 'pid': project_id,
'min_count': min_count, 'max_count': max_count,
'msg': msg}
if min_count == max_count:
LOG.debug("%(overs)s quota exceeded for %(pid)s,"
" tried to run %(min_count)d instances. "
"%(msg)s", params)
else:
LOG.debug("%(overs)s quota exceeded for %(pid)s,"
" tried to run between %(min_count)d and"
" %(max_count)d instances. %(msg)s",
params)
raise exception.TooManyInstances(overs=overs,
req=reqs,
used=useds,
allowed=total_alloweds)
return max_count
def get_over_quota_detail(headroom, overs, quotas, requested):
reqs = []
useds = []
total_alloweds = []
for resource in overs:
reqs.append(str(requested[resource]))
useds.append(str(quotas[resource] - headroom[resource]))
total_alloweds.append(str(quotas[resource]))
(overs, reqs, useds, total_alloweds) = map(', '.join, (
overs, reqs, useds, total_alloweds))
return overs, reqs, total_alloweds, useds
def remove_shelved_keys_from_system_metadata(instance): def remove_shelved_keys_from_system_metadata(instance):
# Delete system_metadata for a shelved instance # Delete system_metadata for a shelved instance
for key in ['shelved_at', 'shelved_image_id', 'shelved_host']: for key in ['shelved_at', 'shelved_image_id', 'shelved_host']:

View File

@ -925,15 +925,11 @@ class ComputeTaskManager(base.Base):
return return
host_mapping_cache = {} host_mapping_cache = {}
instances = []
for (build_request, request_spec, host) in six.moves.zip( for (build_request, request_spec, host) in six.moves.zip(
build_requests, request_specs, hosts): build_requests, request_specs, hosts):
filter_props = request_spec.to_legacy_filter_properties_dict()
instance = build_request.get_new_instance(context) instance = build_request.get_new_instance(context)
scheduler_utils.populate_retry(filter_props, instance.uuid)
scheduler_utils.populate_filter_properties(filter_props,
host)
# Convert host from the scheduler into a cell record # Convert host from the scheduler into a cell record
if host['host'] not in host_mapping_cache: if host['host'] not in host_mapping_cache:
try: try:
@ -947,6 +943,8 @@ class ComputeTaskManager(base.Base):
self._bury_in_cell0(context, request_spec, exc, self._bury_in_cell0(context, request_spec, exc,
build_requests=[build_request], build_requests=[build_request],
instances=[instance]) instances=[instance])
# This is a placeholder in case the quota recheck fails.
instances.append(None)
continue continue
else: else:
host_mapping = host_mapping_cache[host['host']] host_mapping = host_mapping_cache[host['host']]
@ -963,6 +961,8 @@ class ComputeTaskManager(base.Base):
# the build request is gone so we're done for this instance # the build request is gone so we're done for this instance
LOG.debug('While scheduling instance, the build request ' LOG.debug('While scheduling instance, the build request '
'was already deleted.', instance=instance) 'was already deleted.', instance=instance)
# This is a placeholder in case the quota recheck fails.
instances.append(None)
continue continue
else: else:
instance.availability_zone = ( instance.availability_zone = (
@ -970,7 +970,34 @@ class ComputeTaskManager(base.Base):
context, host['host'])) context, host['host']))
with obj_target_cell(instance, cell): with obj_target_cell(instance, cell):
instance.create() instance.create()
instances.append(instance)
# NOTE(melwitt): We recheck the quota after creating the
# objects to prevent users from allocating more resources
# than their allowed quota in the event of a race. This is
# configurable because it can be expensive if strict quota
# limits are not required in a deployment.
if CONF.quota.recheck_quota:
try:
compute_utils.check_num_instances_quota(
context, instance.flavor, 0, 0,
orig_num_req=len(build_requests))
except exception.TooManyInstances as exc:
with excutils.save_and_reraise_exception():
self._cleanup_build_artifacts(context, exc, instances,
build_requests,
request_specs)
for (build_request, request_spec, host, instance) in six.moves.zip(
build_requests, request_specs, hosts, instances):
if instance is None:
# Skip placeholders that were buried in cell0 or had their
# build requests deleted by the user before instance create.
continue
filter_props = request_spec.to_legacy_filter_properties_dict()
scheduler_utils.populate_retry(filter_props, instance.uuid)
scheduler_utils.populate_filter_properties(filter_props,
host)
# send a state update notification for the initial create to # send a state update notification for the initial create to
# show it going from non-existent to BUILDING # show it going from non-existent to BUILDING
notifications.send_update_with_states(context, instance, None, notifications.send_update_with_states(context, instance, None,
@ -1019,6 +1046,29 @@ class ComputeTaskManager(base.Base):
host=host['host'], node=host['nodename'], host=host['host'], node=host['nodename'],
limits=host['limits']) limits=host['limits'])
def _cleanup_build_artifacts(self, context, exc, instances, build_requests,
request_specs):
for (instance, build_request, request_spec) in six.moves.zip(
instances, build_requests, request_specs):
# Skip placeholders that were buried in cell0 or had their
# build requests deleted by the user before instance create.
if instance is None:
continue
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
legacy_spec = request_spec.to_legacy_request_spec_dict()
self._set_vm_state_and_notify(context, instance.uuid,
'build_instances', updates, exc,
legacy_spec)
# Be paranoid about artifacts being deleted underneath us.
try:
build_request.destroy()
except exception.BuildRequestNotFound:
pass
try:
request_spec.destroy()
except exception.RequestSpecNotFound:
pass
def _delete_build_request(self, context, build_request, instance, cell, def _delete_build_request(self, context, build_request, instance, cell,
instance_bdms, instance_tags): instance_bdms, instance_tags):
"""Delete a build request after creating the instance in the cell. """Delete a build request after creating the instance in the cell.

View File

@ -30,15 +30,11 @@ class MigrationTask(base.TaskBase):
self.request_spec = request_spec self.request_spec = request_spec
self.reservations = reservations self.reservations = reservations
self.flavor = flavor self.flavor = flavor
self.quotas = None
self.compute_rpcapi = compute_rpcapi self.compute_rpcapi = compute_rpcapi
self.scheduler_client = scheduler_client self.scheduler_client = scheduler_client
def _execute(self): def _execute(self):
self.quotas = objects.Quotas.from_reservations(self.context,
self.reservations,
instance=self.instance)
# TODO(sbauza): Remove that once prep_resize() accepts a RequestSpec # TODO(sbauza): Remove that once prep_resize() accepts a RequestSpec
# object in the signature and all the scheduler.utils methods too # object in the signature and all the scheduler.utils methods too
legacy_spec = self.request_spec.to_legacy_request_spec_dict() legacy_spec = self.request_spec.to_legacy_request_spec_dict()
@ -96,5 +92,4 @@ class MigrationTask(base.TaskBase):
node=node, clean_shutdown=self.clean_shutdown) node=node, clean_shutdown=self.clean_shutdown)
def rollback(self): def rollback(self):
if self.quotas: pass
self.quotas.rollback()

View File

@ -20,7 +20,10 @@ from oslo_log import log as logging
from oslo_serialization import jsonutils from oslo_serialization import jsonutils
from oslo_utils import timeutils from oslo_utils import timeutils
from oslo_utils import versionutils from oslo_utils import versionutils
from sqlalchemy import or_
from sqlalchemy.orm import joinedload from sqlalchemy.orm import joinedload
from sqlalchemy.sql import func
from sqlalchemy.sql import null
from nova.cells import opts as cells_opts from nova.cells import opts as cells_opts
from nova.cells import rpcapi as cells_rpcapi from nova.cells import rpcapi as cells_rpcapi
@ -1206,7 +1209,8 @@ class InstanceList(base.ObjectListBase, base.NovaObject):
# Version 2.1: Add get_uuids_by_host() # Version 2.1: Add get_uuids_by_host()
# Version 2.2: Pagination for get_active_by_window_joined() # Version 2.2: Pagination for get_active_by_window_joined()
# Version 2.3: Add get_count_by_vm_state() # Version 2.3: Add get_count_by_vm_state()
VERSION = '2.3' # Version 2.4: Add get_counts()
VERSION = '2.4'
fields = { fields = {
'objects': fields.ListOfObjectsField('Instance'), 'objects': fields.ListOfObjectsField('Instance'),
@ -1407,6 +1411,55 @@ class InstanceList(base.ObjectListBase, base.NovaObject):
return cls._get_count_by_vm_state_in_db(context, project_id, user_id, return cls._get_count_by_vm_state_in_db(context, project_id, user_id,
vm_state) vm_state)
@staticmethod
@db_api.pick_context_manager_reader
def _get_counts_in_db(context, project_id, user_id=None):
# NOTE(melwitt): Copied from nova/db/sqlalchemy/api.py:
# It would be better to have vm_state not be nullable
# but until then we test it explicitly as a workaround.
not_soft_deleted = or_(
models.Instance.vm_state != vm_states.SOFT_DELETED,
models.Instance.vm_state == null()
)
project_query = context.session.query(
func.count(models.Instance.id),
func.sum(models.Instance.vcpus),
func.sum(models.Instance.memory_mb)).\
filter_by(deleted=0).\
filter(not_soft_deleted).\
filter_by(project_id=project_id)
project_result = project_query.first()
fields = ('instances', 'cores', 'ram')
project_counts = {field: int(project_result[idx] or 0)
for idx, field in enumerate(fields)}
counts = {'project': project_counts}
if user_id:
user_result = project_query.filter_by(user_id=user_id).first()
user_counts = {field: int(user_result[idx] or 0)
for idx, field in enumerate(fields)}
counts['user'] = user_counts
return counts
@base.remotable_classmethod
def get_counts(cls, context, project_id, user_id=None):
"""Get the counts of Instance objects in the database.
:param context: The request context for database access
:param project_id: The project_id to count across
:param user_id: The user_id to count across
:returns: A dict containing the project-scoped counts and user-scoped
counts if user_id is specified. For example:
{'project': {'instances': <count across project>,
'cores': <count across project>,
'ram': <count across project},
'user': {'instances': <count across user>,
'cores': <count across user>,
'ram': <count across user>}}
"""
return cls._get_counts_in_db(context, project_id, user_id=user_id)
@db_api.pick_context_manager_writer @db_api.pick_context_manager_writer
def _migrate_instance_keypairs(ctxt, count): def _migrate_instance_keypairs(ctxt, count):

View File

@ -136,7 +136,7 @@ class DbQuotaDriver(object):
usage = usages.get(resource.name, {}) usage = usages.get(resource.name, {})
modified_quotas[resource.name].update( modified_quotas[resource.name].update(
in_use=usage.get('in_use', 0), in_use=usage.get('in_use', 0),
reserved=usage.get('reserved', 0), reserved=0,
) )
# Initialize remains quotas with the default limits. # Initialize remains quotas with the default limits.
@ -238,8 +238,7 @@ class DbQuotaDriver(object):
default value, if there is no value from the default value, if there is no value from the
quota class) will be reported if there is no quota class) will be reported if there is no
specific value for the resource. specific value for the resource.
:param usages: If True, the current in_use and reserved counts :param usages: If True, the current counts will also be returned.
will also be returned.
:param project_quotas: Quotas dictionary for the specified project. :param project_quotas: Quotas dictionary for the specified project.
:param user_quotas: Quotas dictionary for the specified project :param user_quotas: Quotas dictionary for the specified project
and user. and user.
@ -260,17 +259,6 @@ class DbQuotaDriver(object):
if usages: if usages:
user_usages = self._get_usages(context, resources, project_id, user_usages = self._get_usages(context, resources, project_id,
user_id=user_id) user_id=user_id)
# TODO(melwitt): This is for compat with ReservableResources and
# should be removed when all of the ReservableResources have been
# removed. ReservableResource usage comes from the quota_usages
# table in the database, so we need to query usages from there as
# long as we still have ReservableResources.
from_usages_table = db.quota_usage_get_all_by_project_and_user(
context, project_id, user_id)
for k, v in from_usages_table.items():
if k in user_usages:
continue
user_usages[k] = v
return self._process_quotas(context, resources, project_id, return self._process_quotas(context, resources, project_id,
user_quotas, quota_class, user_quotas, quota_class,
defaults=defaults, usages=user_usages) defaults=defaults, usages=user_usages)
@ -293,8 +281,7 @@ class DbQuotaDriver(object):
default value, if there is no value from the default value, if there is no value from the
quota class) will be reported if there is no quota class) will be reported if there is no
specific value for the resource. specific value for the resource.
:param usages: If True, the current in_use and reserved counts :param usages: If True, the current counts will also be returned.
will also be returned.
:param remains: If True, the current remains of the project will :param remains: If True, the current remains of the project will
will be returned. will be returned.
:param project_quotas: Quotas dictionary for the specified project. :param project_quotas: Quotas dictionary for the specified project.
@ -304,17 +291,6 @@ class DbQuotaDriver(object):
project_usages = {} project_usages = {}
if usages: if usages:
project_usages = self._get_usages(context, resources, project_id) project_usages = self._get_usages(context, resources, project_id)
# TODO(melwitt): This is for compat with ReservableResources and
# should be removed when all of the ReservableResources have been
# removed. ReservableResource usage comes from the quota_usages
# table in the database, so we need to query usages from there as
# long as we still have ReservableResources.
from_usages_table = db.quota_usage_get_all_by_project(context,
project_id)
for k, v in from_usages_table.items():
if k in project_usages:
continue
project_usages[k] = v
return self._process_quotas(context, resources, project_id, return self._process_quotas(context, resources, project_id,
project_quotas, quota_class, project_quotas, quota_class,
defaults=defaults, usages=project_usages, defaults=defaults, usages=project_usages,
@ -385,14 +361,14 @@ class DbQuotaDriver(object):
# attempts to update a quota, the value chosen must be at least # attempts to update a quota, the value chosen must be at least
# as much as the current usage and less than or equal to the # as much as the current usage and less than or equal to the
# project limit less the sum of existing per user limits. # project limit less the sum of existing per user limits.
minimum = value['in_use'] + value['reserved'] minimum = value['in_use']
settable_quotas[key] = {'minimum': minimum, 'maximum': maximum} settable_quotas[key] = {'minimum': minimum, 'maximum': maximum}
else: else:
for key, value in project_quotas.items(): for key, value in project_quotas.items():
minimum = \ minimum = \
max(int(self._sub_quota_values(value['limit'], max(int(self._sub_quota_values(value['limit'],
value['remains'])), value['remains'])),
int(value['in_use'] + value['reserved'])) int(value['in_use']))
settable_quotas[key] = {'minimum': minimum, 'maximum': -1} settable_quotas[key] = {'minimum': minimum, 'maximum': -1}
return settable_quotas return settable_quotas
@ -423,7 +399,7 @@ class DbQuotaDriver(object):
syncable_resources.append(key) syncable_resources.append(key)
return syncable_resources return syncable_resources
def _get_quotas(self, context, resources, keys, has_sync, project_id=None, def _get_quotas(self, context, resources, keys, project_id=None,
user_id=None, project_quotas=None): user_id=None, project_quotas=None):
"""A helper method which retrieves the quotas for the specific """A helper method which retrieves the quotas for the specific
resources identified by keys, and which apply to the current resources identified by keys, and which apply to the current
@ -432,10 +408,6 @@ class DbQuotaDriver(object):
:param context: The request context, for access checks. :param context: The request context, for access checks.
:param resources: A dictionary of the registered resources. :param resources: A dictionary of the registered resources.
:param keys: A list of the desired quotas to retrieve. :param keys: A list of the desired quotas to retrieve.
:param has_sync: If True, indicates that the resource must
have a sync function; if False, indicates
that the resource must NOT have a sync
function.
:param project_id: Specify the project_id if current context :param project_id: Specify the project_id if current context
is admin and admin wants to impact on is admin and admin wants to impact on
common user's tenant. common user's tenant.
@ -446,13 +418,8 @@ class DbQuotaDriver(object):
""" """
# Filter resources # Filter resources
if has_sync:
sync_filt = lambda x: hasattr(x, 'sync')
else:
sync_filt = lambda x: not hasattr(x, 'sync')
desired = set(keys) desired = set(keys)
sub_resources = {k: v for k, v in resources.items() sub_resources = {k: v for k, v in resources.items() if k in desired}
if k in desired and sync_filt(v)}
# Make sure we accounted for all of them... # Make sure we accounted for all of them...
if len(keys) != len(sub_resources): if len(keys) != len(sub_resources):
@ -526,10 +493,10 @@ class DbQuotaDriver(object):
# Get the applicable quotas # Get the applicable quotas
project_quotas = db.quota_get_all_by_project(context, project_id) project_quotas = db.quota_get_all_by_project(context, project_id)
quotas = self._get_quotas(context, resources, values.keys(), quotas = self._get_quotas(context, resources, values.keys(),
has_sync=False, project_id=project_id, project_id=project_id,
project_quotas=project_quotas) project_quotas=project_quotas)
user_quotas = self._get_quotas(context, resources, values.keys(), user_quotas = self._get_quotas(context, resources, values.keys(),
has_sync=False, project_id=project_id, project_id=project_id,
user_id=user_id, user_id=user_id,
project_quotas=project_quotas) project_quotas=project_quotas)
@ -635,12 +602,12 @@ class DbQuotaDriver(object):
# per user quotas, project quota limits (for quotas that have # per user quotas, project quota limits (for quotas that have
# user-scoping, limits for the project) # user-scoping, limits for the project)
quotas = self._get_quotas(context, resources, all_keys, quotas = self._get_quotas(context, resources, all_keys,
has_sync=False, project_id=project_id, project_id=project_id,
project_quotas=project_quotas) project_quotas=project_quotas)
# per user quotas, user quota limits (for quotas that have # per user quotas, user quota limits (for quotas that have
# user-scoping, the limits for the user) # user-scoping, the limits for the user)
user_quotas = self._get_quotas(context, resources, all_keys, user_quotas = self._get_quotas(context, resources, all_keys,
has_sync=False, project_id=project_id, project_id=project_id,
user_id=user_id, user_id=user_id,
project_quotas=project_quotas) project_quotas=project_quotas)
@ -770,12 +737,12 @@ class DbQuotaDriver(object):
'project_quotas': project_quotas}) 'project_quotas': project_quotas})
quotas = self._get_quotas(context, resources, deltas.keys(), quotas = self._get_quotas(context, resources, deltas.keys(),
has_sync=True, project_id=project_id, project_id=project_id,
project_quotas=project_quotas) project_quotas=project_quotas)
LOG.debug('Quotas for project %(project_id)s after resource sync: ' LOG.debug('Quotas for project %(project_id)s after resource sync: '
'%(quotas)s', {'project_id': project_id, 'quotas': quotas}) '%(quotas)s', {'project_id': project_id, 'quotas': quotas})
user_quotas = self._get_quotas(context, resources, deltas.keys(), user_quotas = self._get_quotas(context, resources, deltas.keys(),
has_sync=True, project_id=project_id, project_id=project_id,
user_id=user_id, user_id=user_id,
project_quotas=project_quotas) project_quotas=project_quotas)
LOG.debug('Quotas for project %(project_id)s and user %(user_id)s ' LOG.debug('Quotas for project %(project_id)s and user %(user_id)s '
@ -1031,8 +998,7 @@ class NoopQuotaDriver(object):
default value, if there is no value from the default value, if there is no value from the
quota class) will be reported if there is no quota class) will be reported if there is no
specific value for the resource. specific value for the resource.
:param usages: If True, the current in_use and reserved counts :param usages: If True, the current counts will also be returned.
will also be returned.
""" """
return self._get_noop_quotas(resources, usages=usages) return self._get_noop_quotas(resources, usages=usages)
@ -1054,8 +1020,7 @@ class NoopQuotaDriver(object):
default value, if there is no value from the default value, if there is no value from the
quota class) will be reported if there is no quota class) will be reported if there is no
specific value for the resource. specific value for the resource.
:param usages: If True, the current in_use and reserved counts :param usages: If True, the current counts will also be returned.
will also be returned.
:param remains: If True, the current remains of the project will :param remains: If True, the current remains of the project will
will be returned. will be returned.
""" """
@ -1536,8 +1501,7 @@ class QuotaEngine(object):
default value, if there is no value from the default value, if there is no value from the
quota class) will be reported if there is no quota class) will be reported if there is no
specific value for the resource. specific value for the resource.
:param usages: If True, the current in_use and reserved counts :param usages: If True, the current counts will also be returned.
will also be returned.
""" """
return self._driver.get_user_quotas(context, self._resources, return self._driver.get_user_quotas(context, self._resources,
@ -1559,8 +1523,7 @@ class QuotaEngine(object):
default value, if there is no value from the default value, if there is no value from the
quota class) will be reported if there is no quota class) will be reported if there is no
specific value for the resource. specific value for the resource.
:param usages: If True, the current in_use and reserved counts :param usages: If True, the current counts will also be returned.
will also be returned.
:param remains: If True, the current remains of the project will :param remains: If True, the current remains of the project will
will be returned. will be returned.
""" """
@ -1906,6 +1869,42 @@ def _floating_ip_count(context, project_id):
return {'project': {'floating_ips': count}} return {'project': {'floating_ips': count}}
def _instances_cores_ram_count(context, project_id, user_id=None):
"""Get the counts of instances, cores, and ram in the database.
:param context: The request context for database access
:param project_id: The project_id to count across
:param user_id: The user_id to count across
:returns: A dict containing the project-scoped counts and user-scoped
counts if user_id is specified. For example:
{'project': {'instances': <count across project>,
'cores': <count across project>,
'ram': <count across project>},
'user': {'instances': <count across user>,
'cores': <count across user>,
'ram': <count across user>}}
"""
# TODO(melwitt): Counting across cells for instances means we will miss
# counting resources if a cell is down. In the future, we should query
# placement for cores/ram and InstanceMappings for instances (once we are
# deleting InstanceMappings when we delete instances).
results = nova_context.scatter_gather_all_cells(
context, objects.InstanceList.get_counts, project_id, user_id=user_id)
total_counts = {'project': {'instances': 0, 'cores': 0, 'ram': 0}}
if user_id:
total_counts['user'] = {'instances': 0, 'cores': 0, 'ram': 0}
for cell_uuid, result in results.items():
if result not in (nova_context.did_not_respond_sentinel,
nova_context.raised_exception_sentinel):
for resource, count in result['project'].items():
total_counts['project'][resource] += count
if user_id:
for resource, count in result['user'].items():
total_counts['user'][resource] += count
return total_counts
def _server_group_count(context, project_id, user_id=None): def _server_group_count(context, project_id, user_id=None):
"""Get the counts of server groups in the database. """Get the counts of server groups in the database.
@ -1935,9 +1934,9 @@ QUOTAS = QuotaEngine()
resources = [ resources = [
ReservableResource('instances', '_sync_instances', 'instances'), CountableResource('instances', _instances_cores_ram_count, 'instances'),
ReservableResource('cores', '_sync_instances', 'cores'), CountableResource('cores', _instances_cores_ram_count, 'cores'),
ReservableResource('ram', '_sync_instances', 'ram'), CountableResource('ram', _instances_cores_ram_count, 'ram'),
CountableResource('security_groups', _security_group_count, CountableResource('security_groups', _security_group_count,
'security_groups'), 'security_groups'),
CountableResource('fixed_ips', _fixed_ip_count, 'fixed_ips'), CountableResource('fixed_ips', _fixed_ip_count, 'fixed_ips'),

View File

@ -75,3 +75,51 @@ class QuotaTestCase(test.NoDBTestCase):
'fake-user') 'fake-user')
self.assertEqual(2, count['user']['server_group_members']) self.assertEqual(2, count['user']['server_group_members'])
def test_instances_cores_ram_count(self):
ctxt = context.RequestContext('fake-user', 'fake-project')
mapping1 = objects.CellMapping(context=ctxt,
uuid=uuidutils.generate_uuid(),
database_connection='cell1',
transport_url='none:///')
mapping2 = objects.CellMapping(context=ctxt,
uuid=uuidutils.generate_uuid(),
database_connection='cell2',
transport_url='none:///')
mapping1.create()
mapping2.create()
# Create an instance in cell1
with context.target_cell(ctxt, mapping1) as cctxt:
instance = objects.Instance(context=cctxt,
project_id='fake-project',
user_id='fake-user',
vcpus=2, memory_mb=512)
instance.create()
# Create an instance in cell2
with context.target_cell(ctxt, mapping2) as cctxt:
instance = objects.Instance(context=cctxt,
project_id='fake-project',
user_id='fake-user',
vcpus=4, memory_mb=1024)
instance.create()
# Create an instance in cell2 for a different user
with context.target_cell(ctxt, mapping2) as cctxt:
instance = objects.Instance(context=cctxt,
project_id='fake-project',
user_id='other-fake-user',
vcpus=4, memory_mb=1024)
instance.create()
# Count instances, cores, and ram across cells
count = quota._instances_cores_ram_count(ctxt, 'fake-project',
user_id='fake-user')
self.assertEqual(3, count['project']['instances'])
self.assertEqual(10, count['project']['cores'])
self.assertEqual(2560, count['project']['ram'])
self.assertEqual(2, count['user']['instances'])
self.assertEqual(6, count['user']['cores'])
self.assertEqual(1536, count['user']['ram'])

View File

@ -274,6 +274,53 @@ class ServersTest(ServersTestBase):
found_server = self._wait_for_state_change(found_server, 'DELETED') found_server = self._wait_for_state_change(found_server, 'DELETED')
self.assertEqual('ACTIVE', found_server['status']) self.assertEqual('ACTIVE', found_server['status'])
def test_deferred_delete_restore_overquota(self):
# Test that a restore that would put the user over quota fails
self.flags(instances=1, group='quota')
# Creates, deletes and restores a server.
self.flags(reclaim_instance_interval=3600)
# Create server
server = self._build_minimal_create_server_request()
created_server1 = self.api.post_server({'server': server})
LOG.debug("created_server: %s", created_server1)
self.assertTrue(created_server1['id'])
created_server_id1 = created_server1['id']
# Wait for it to finish being created
found_server1 = self._wait_for_state_change(created_server1, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server1['status'])
# Delete the server
self.api.delete_server(created_server_id1)
# Wait for queued deletion
found_server1 = self._wait_for_state_change(found_server1, 'ACTIVE')
self.assertEqual('SOFT_DELETED', found_server1['status'])
# Create a second server
server = self._build_minimal_create_server_request()
created_server2 = self.api.post_server({'server': server})
LOG.debug("created_server: %s", created_server2)
self.assertTrue(created_server2['id'])
# Wait for it to finish being created
found_server2 = self._wait_for_state_change(created_server2, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server2['status'])
# Try to restore the first server, it should fail
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action,
created_server_id1, {'restore': {}})
self.assertEqual(403, ex.response.status_code)
self.assertEqual('SOFT_DELETED', found_server1['status'])
def test_deferred_delete_force(self): def test_deferred_delete_force(self):
# Creates, deletes and force deletes a server. # Creates, deletes and force deletes a server.
self.flags(reclaim_instance_interval=3600) self.flags(reclaim_instance_interval=3600)
@ -668,6 +715,24 @@ class ServersTest(ServersTestBase):
# Cleanup # Cleanup
self._delete_server(created_server_id) self._delete_server(created_server_id)
def test_resize_server_overquota(self):
self.flags(cores=1, group='quota')
self.flags(ram=512, group='quota')
# Create server with default flavor, 1 core, 512 ram
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({"server": server})
created_server_id = created_server['id']
found_server = self._wait_for_state_change(created_server, 'BUILD')
self.assertEqual('ACTIVE', found_server['status'])
# Try to resize to flavorid 2, 1 core, 2048 ram
post = {'resize': {'flavorRef': '2'}}
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action,
created_server_id, post)
self.assertEqual(403, ex.response.status_code)
class ServersTestV21(ServersTest): class ServersTestV21(ServersTest):
api_major_version = 'v2.1' api_major_version = 'v2.1'

View File

@ -431,3 +431,36 @@ class MultiCreateExtensionTestV21(test.TestCase):
self.assertRaises(self.validation_error, self.assertRaises(self.validation_error,
self.controller.create, self.req, body=body) self.controller.create, self.req, body=body)
def test_create_multiple_instance_max_count_overquota_min_count_ok(self):
self.flags(instances=3, group='quota')
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MIN_ATTRIBUTE_NAME: 2,
multiple_create_v21.MAX_ATTRIBUTE_NAME: 5,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
}
}
res = self.controller.create(self.req, body=body).obj
instance_uuids = self.instance_cache_by_uuid.keys()
self.assertIn(res["server"]["id"], instance_uuids)
def test_create_multiple_instance_max_count_overquota_min_count_over(self):
self.flags(instances=3, group='quota')
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MIN_ATTRIBUTE_NAME: 4,
multiple_create_v21.MAX_ATTRIBUTE_NAME: 5,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
}
}
self.assertRaises(webob.exc.HTTPForbidden, self.controller.create,
self.req, body=body)

View File

@ -51,6 +51,7 @@ from nova.compute import vm_states
import nova.conf import nova.conf
from nova import context from nova import context
from nova import db from nova import db
from nova.db.sqlalchemy import api as db_api
from nova.db.sqlalchemy import models from nova.db.sqlalchemy import models
from nova import exception from nova import exception
from nova.image import glance from nova.image import glance
@ -3231,8 +3232,27 @@ class ServersControllerCreateTest(test.TestCase):
self.assertEqual(encodeutils.safe_decode(robj['Location']), selfhref) self.assertEqual(encodeutils.safe_decode(robj['Location']), selfhref)
def _do_test_create_instance_above_quota(self, resource, allowed, quota, @mock.patch('nova.db.quota_get_all_by_project')
expected_msg): @mock.patch('nova.db.quota_get_all_by_project_and_user')
@mock.patch('nova.objects.Quotas.count_as_dict')
def _do_test_create_instance_above_quota(self, resource, allowed,
quota, expected_msg, mock_count, mock_get_all_pu,
mock_get_all_p):
count = {'project': {}, 'user': {}}
for res in ('instances', 'ram', 'cores'):
if res == resource:
value = quota - allowed
count['project'][res] = count['user'][res] = value
else:
count['project'][res] = count['user'][res] = 0
mock_count.return_value = count
mock_get_all_p.return_value = {'project_id': 'fake'}
mock_get_all_pu.return_value = {'project_id': 'fake',
'user_id': 'fake_user'}
if resource in db_api.PER_PROJECT_QUOTAS:
mock_get_all_p.return_value[resource] = quota
else:
mock_get_all_pu.return_value[resource] = quota
fakes.stub_out_instance_quota(self, allowed, quota, resource) fakes.stub_out_instance_quota(self, allowed, quota, resource)
self.body['server']['flavorRef'] = 3 self.body['server']['flavorRef'] = 3
self.req.body = jsonutils.dump_as_bytes(self.body) self.req.body = jsonutils.dump_as_bytes(self.body)
@ -3264,12 +3284,16 @@ class ServersControllerCreateTest(test.TestCase):
fake_group.user_id = ctxt.user_id fake_group.user_id = ctxt.user_id
fake_group.create() fake_group.create()
real_count = fakes.QUOTAS.count_as_dict
def fake_count(context, name, group, user_id): def fake_count(context, name, group, user_id):
self.assertEqual(name, "server_group_members") if name == 'server_group_members':
self.assertEqual(group.uuid, fake_group.uuid) self.assertEqual(group.uuid, fake_group.uuid)
self.assertEqual(user_id, self.assertEqual(user_id,
self.req.environ['nova.context'].user_id) self.req.environ['nova.context'].user_id)
return {'user': {'server_group_members': 10}} return {'user': {'server_group_members': 10}}
else:
return real_count(context, name, group, user_id)
def fake_limit_check(context, **kwargs): def fake_limit_check(context, **kwargs):
if 'server_group_members' in kwargs: if 'server_group_members' in kwargs:

View File

@ -65,7 +65,6 @@ from nova.objects import block_device as block_device_obj
from nova.objects import fields as obj_fields from nova.objects import fields as obj_fields
from nova.objects import instance as instance_obj from nova.objects import instance as instance_obj
from nova.objects import migrate_data as migrate_data_obj from nova.objects import migrate_data as migrate_data_obj
from nova import quota
from nova.scheduler import client as scheduler_client from nova.scheduler import client as scheduler_client
from nova import test from nova import test
from nova.tests import fixtures from nova.tests import fixtures
@ -94,7 +93,6 @@ from nova.virt import fake
from nova.virt import hardware from nova.virt import hardware
from nova.volume import cinder from nova.volume import cinder
QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF CONF = nova.conf.CONF
@ -216,8 +214,6 @@ class BaseTestCase(test.TestCase):
self.project_id = 'fake' self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.context = context.RequestContext(self.user_id,
self.project_id) self.project_id)
self.none_quotas = objects.Quotas.from_reservations(
self.context, None)
def fake_show(meh, context, id, **kwargs): def fake_show(meh, context, id, **kwargs):
if id: if id:
@ -4322,8 +4318,7 @@ class ComputeTestCase(BaseTestCase,
self.compute._delete_instance, self.compute._delete_instance,
self.context, self.context,
instance, instance,
[], [])
self.none_quotas)
mock_destroy.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, mock_destroy.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY,
mock.ANY) mock.ANY)
@ -4340,8 +4335,7 @@ class ComputeTestCase(BaseTestCase,
self.compute._delete_instance, self.compute._delete_instance,
self.context, self.context,
instance, instance,
[], [])
self.none_quotas)
mock_destroy.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, mock_destroy.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY,
mock.ANY) mock.ANY)
@ -4350,8 +4344,7 @@ class ComputeTestCase(BaseTestCase,
def test_delete_instance_changes_power_state(self): def test_delete_instance_changes_power_state(self):
"""Test that the power state is NOSTATE after deleting an instance.""" """Test that the power state is NOSTATE after deleting an instance."""
instance = self._create_fake_instance_obj() instance = self._create_fake_instance_obj()
self.compute._delete_instance(self.context, instance, [], self.compute._delete_instance(self.context, instance, [])
self.none_quotas)
self.assertEqual(power_state.NOSTATE, instance.power_state) self.assertEqual(power_state.NOSTATE, instance.power_state)
def test_instance_termination_exception_sets_error(self): def test_instance_termination_exception_sets_error(self):
@ -4360,8 +4353,7 @@ class ComputeTestCase(BaseTestCase,
""" """
instance = self._create_fake_instance_obj() instance = self._create_fake_instance_obj()
def fake_delete_instance(self, context, instance, bdms, def fake_delete_instance(self, context, instance, bdms):
reservations=None):
raise exception.InstanceTerminationFailure(reason='') raise exception.InstanceTerminationFailure(reason='')
self.stub_out('nova.compute.manager.ComputeManager._delete_instance', self.stub_out('nova.compute.manager.ComputeManager._delete_instance',
@ -4495,23 +4487,12 @@ class ComputeTestCase(BaseTestCase,
fake_migration_save) fake_migration_save)
self._test_state_revert(instance, *operation) self._test_state_revert(instance, *operation)
def _ensure_quota_reservations(self, instance, def test_quotas_successful_delete(self):
reservations, mock_quota):
"""Mock up commit/rollback of quota reservations."""
mock_quota.assert_called_once_with(mock.ANY, reservations,
project_id=instance['project_id'],
user_id=instance['user_id'])
@mock.patch.object(nova.quota.QUOTAS, 'commit')
def test_quotas_successful_delete(self, mock_commit):
instance = self._create_fake_instance_obj() instance = self._create_fake_instance_obj()
resvs = list('fake_res')
self.compute.terminate_instance(self.context, instance, self.compute.terminate_instance(self.context, instance,
bdms=[], reservations=resvs) bdms=[], reservations=[])
self._ensure_quota_reservations(instance, resvs, mock_commit)
@mock.patch.object(nova.quota.QUOTAS, 'rollback') def test_quotas_failed_delete(self):
def test_quotas_failed_delete(self, mock_rollback):
instance = self._create_fake_instance_obj() instance = self._create_fake_instance_obj()
def fake_shutdown_instance(*args, **kwargs): def fake_shutdown_instance(*args, **kwargs):
@ -4520,25 +4501,18 @@ class ComputeTestCase(BaseTestCase,
self.stub_out('nova.compute.manager.ComputeManager._shutdown_instance', self.stub_out('nova.compute.manager.ComputeManager._shutdown_instance',
fake_shutdown_instance) fake_shutdown_instance)
resvs = list('fake_res')
self.assertRaises(test.TestingException, self.assertRaises(test.TestingException,
self.compute.terminate_instance, self.compute.terminate_instance,
self.context, instance, self.context, instance,
bdms=[], reservations=resvs) bdms=[], reservations=[])
self._ensure_quota_reservations(instance,
resvs, mock_rollback)
@mock.patch.object(nova.quota.QUOTAS, 'commit') def test_quotas_successful_soft_delete(self):
def test_quotas_successful_soft_delete(self, mock_commit):
instance = self._create_fake_instance_obj( instance = self._create_fake_instance_obj(
params=dict(task_state=task_states.SOFT_DELETING)) params=dict(task_state=task_states.SOFT_DELETING))
resvs = list('fake_res')
self.compute.soft_delete_instance(self.context, instance, self.compute.soft_delete_instance(self.context, instance,
reservations=resvs) reservations=[])
self._ensure_quota_reservations(instance, resvs, mock_commit)
@mock.patch.object(nova.quota.QUOTAS, 'rollback') def test_quotas_failed_soft_delete(self):
def test_quotas_failed_soft_delete(self, mock_rollback):
instance = self._create_fake_instance_obj( instance = self._create_fake_instance_obj(
params=dict(task_state=task_states.SOFT_DELETING)) params=dict(task_state=task_states.SOFT_DELETING))
@ -4548,29 +4522,17 @@ class ComputeTestCase(BaseTestCase,
self.stub_out('nova.virt.fake.FakeDriver.soft_delete', self.stub_out('nova.virt.fake.FakeDriver.soft_delete',
fake_soft_delete) fake_soft_delete)
resvs = list('fake_res')
self.assertRaises(test.TestingException, self.assertRaises(test.TestingException,
self.compute.soft_delete_instance, self.compute.soft_delete_instance,
self.context, instance, self.context, instance,
reservations=resvs) reservations=[])
self._ensure_quota_reservations(instance, def test_quotas_destroy_of_soft_deleted_instance(self):
resvs, mock_rollback)
@mock.patch.object(nova.quota.QUOTAS, 'rollback')
def test_quotas_destroy_of_soft_deleted_instance(self, mock_rollback):
instance = self._create_fake_instance_obj( instance = self._create_fake_instance_obj(
params=dict(vm_state=vm_states.SOFT_DELETED)) params=dict(vm_state=vm_states.SOFT_DELETED))
# Termination should be successful, but quota reservations
# rolled back because the instance was in SOFT_DELETED state.
resvs = list('fake_res')
self.compute.terminate_instance(self.context, instance, self.compute.terminate_instance(self.context, instance,
bdms=[], reservations=resvs) bdms=[], reservations=[])
self._ensure_quota_reservations(instance,
resvs, mock_rollback)
def _stub_out_resize_network_methods(self): def _stub_out_resize_network_methods(self):
def fake(cls, ctxt, instance, *args, **kwargs): def fake(cls, ctxt, instance, *args, **kwargs):
@ -4637,10 +4599,9 @@ class ComputeTestCase(BaseTestCase,
mock.patch.object(self.compute, '_get_instance_block_device_info'), mock.patch.object(self.compute, '_get_instance_block_device_info'),
mock.patch.object(migration, 'save'), mock.patch.object(migration, 'save'),
mock.patch.object(instance, 'save'), mock.patch.object(instance, 'save'),
mock.patch.object(nova.quota.QUOTAS, 'commit')
) as (mock_setup, mock_net_mig, mock_get_nw, mock_notify, ) as (mock_setup, mock_net_mig, mock_get_nw, mock_notify,
mock_notify_action, mock_virt_mig, mock_get_blk, mock_mig_save, mock_notify_action, mock_virt_mig, mock_get_blk, mock_mig_save,
mock_inst_save, mock_commit): mock_inst_save):
def _mig_save(): def _mig_save():
self.assertEqual(migration.status, 'finished') self.assertEqual(migration.status, 'finished')
self.assertEqual(vm_state, instance.vm_state) self.assertEqual(vm_state, instance.vm_state)
@ -4715,8 +4676,6 @@ class ComputeTestCase(BaseTestCase,
refresh_conn_info=True) refresh_conn_info=True)
mock_inst_save.assert_has_calls(inst_call_list) mock_inst_save.assert_has_calls(inst_call_list)
mock_mig_save.assert_called_once_with() mock_mig_save.assert_called_once_with()
self._ensure_quota_reservations(instance, reservations,
mock_commit)
def test_finish_resize_from_active(self): def test_finish_resize_from_active(self):
self._test_finish_resize(power_on=True) self._test_finish_resize(power_on=True)
@ -4727,8 +4686,7 @@ class ComputeTestCase(BaseTestCase,
def test_finish_resize_without_resize_instance(self): def test_finish_resize_without_resize_instance(self):
self._test_finish_resize(power_on=True, resize_instance=False) self._test_finish_resize(power_on=True, resize_instance=False)
@mock.patch.object(nova.quota.QUOTAS, 'commit') def test_finish_resize_with_volumes(self):
def test_finish_resize_with_volumes(self, mock_commit):
"""Contrived test to ensure finish_resize doesn't raise anything.""" """Contrived test to ensure finish_resize doesn't raise anything."""
# create instance # create instance
@ -4888,14 +4846,11 @@ class ComputeTestCase(BaseTestCase,
volume['device_path'] = None volume['device_path'] = None
volume['instance_uuid'] = None volume['instance_uuid'] = None
self.stub_out('nova.volume.cinder.API.detach', fake_detach) self.stub_out('nova.volume.cinder.API.detach', fake_detach)
self._ensure_quota_reservations(instance,
reservations, mock_commit)
# clean up # clean up
self.compute.terminate_instance(self.context, instance, [], []) self.compute.terminate_instance(self.context, instance, [], [])
@mock.patch.object(nova.quota.QUOTAS, 'rollback') def test_finish_resize_handles_error(self):
def test_finish_resize_handles_error(self, mock_rollback):
# Make sure we don't leave the instance in RESIZE on error. # Make sure we don't leave the instance in RESIZE on error.
def throw_up(*args, **kwargs): def throw_up(*args, **kwargs):
@ -4907,13 +4862,12 @@ class ComputeTestCase(BaseTestCase,
old_flavor_name = 'm1.tiny' old_flavor_name = 'm1.tiny'
instance = self._create_fake_instance_obj(type_name=old_flavor_name) instance = self._create_fake_instance_obj(type_name=old_flavor_name)
reservations = list('fake_res')
instance_type = flavors.get_flavor_by_name('m1.small') instance_type = flavors.get_flavor_by_name('m1.small')
self.compute.prep_resize(self.context, instance=instance, self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type, instance_type=instance_type,
image={}, reservations=reservations, image={}, reservations=[],
request_spec={}, filter_properties={}, request_spec={}, filter_properties={},
node=None, clean_shutdown=True) node=None, clean_shutdown=True)
@ -4928,7 +4882,7 @@ class ComputeTestCase(BaseTestCase,
self.context, self.context,
migration=migration, migration=migration,
disk_info={}, image={}, instance=instance, disk_info={}, image={}, instance=instance,
reservations=reservations) reservations=[])
instance.refresh() instance.refresh()
self.assertEqual(vm_states.ERROR, instance.vm_state) self.assertEqual(vm_states.ERROR, instance.vm_state)
@ -4939,8 +4893,6 @@ class ComputeTestCase(BaseTestCase,
self.assertEqual(old_flavor['ephemeral_gb'], instance.ephemeral_gb) self.assertEqual(old_flavor['ephemeral_gb'], instance.ephemeral_gb)
self.assertEqual(old_flavor['id'], instance.instance_type_id) self.assertEqual(old_flavor['id'], instance.instance_type_id)
self.assertNotEqual(instance_type['id'], instance.instance_type_id) self.assertNotEqual(instance_type['id'], instance.instance_type_id)
self._ensure_quota_reservations(instance, reservations,
mock_rollback)
def test_set_instance_info(self): def test_set_instance_info(self):
old_flavor_name = 'm1.tiny' old_flavor_name = 'm1.tiny'
@ -5135,16 +5087,12 @@ class ComputeTestCase(BaseTestCase,
self.assertEqual(payload['image_ref_url'], image_ref_url) self.assertEqual(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(self.context, instance, [], []) self.compute.terminate_instance(self.context, instance, [], [])
@mock.patch.object(nova.quota.QUOTAS, 'rollback') def test_prep_resize_instance_migration_error_on_none_host(self):
def test_prep_resize_instance_migration_error_on_none_host(self,
mock_rollback):
"""Ensure prep_resize raises a migration error if destination host is """Ensure prep_resize raises a migration error if destination host is
not defined not defined
""" """
instance = self._create_fake_instance_obj() instance = self._create_fake_instance_obj()
reservations = list('fake_res')
self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[]) block_device_mapping=[])
instance.host = None instance.host = None
@ -5154,15 +5102,12 @@ class ComputeTestCase(BaseTestCase,
self.assertRaises(exception.MigrationError, self.compute.prep_resize, self.assertRaises(exception.MigrationError, self.compute.prep_resize,
self.context, instance=instance, self.context, instance=instance,
instance_type=instance_type, image={}, instance_type=instance_type, image={},
reservations=reservations, request_spec={}, reservations=[], request_spec={},
filter_properties={}, node=None, filter_properties={}, node=None,
clean_shutdown=True) clean_shutdown=True)
self.compute.terminate_instance(self.context, instance, [], []) self.compute.terminate_instance(self.context, instance, [], [])
self._ensure_quota_reservations(instance, reservations,
mock_rollback)
@mock.patch.object(nova.quota.QUOTAS, 'rollback') def test_resize_instance_driver_error(self):
def test_resize_instance_driver_error(self, mock_rollback):
# Ensure instance status set to Error on resize error. # Ensure instance status set to Error on resize error.
def throw_up(*args, **kwargs): def throw_up(*args, **kwargs):
@ -5174,15 +5119,13 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj() instance = self._create_fake_instance_obj()
instance_type = flavors.get_default_flavor() instance_type = flavors.get_default_flavor()
reservations = list('fake_res')
self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[]) block_device_mapping=[])
instance.host = 'foo' instance.host = 'foo'
instance.save() instance.save()
self.compute.prep_resize(self.context, instance=instance, self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type, image={}, instance_type=instance_type, image={},
reservations=reservations, request_spec={}, reservations=[], request_spec={},
filter_properties={}, node=None, filter_properties={}, node=None,
clean_shutdown=True) clean_shutdown=True)
instance.task_state = task_states.RESIZE_PREP instance.task_state = task_states.RESIZE_PREP
@ -5195,7 +5138,7 @@ class ComputeTestCase(BaseTestCase,
self.assertRaises(test.TestingException, self.compute.resize_instance, self.assertRaises(test.TestingException, self.compute.resize_instance,
self.context, instance=instance, self.context, instance=instance,
migration=migration, image={}, migration=migration, image={},
reservations=reservations, reservations=[],
instance_type=jsonutils.to_primitive(instance_type), instance_type=jsonutils.to_primitive(instance_type),
clean_shutdown=True) clean_shutdown=True)
# NOTE(comstud): error path doesn't use objects, so our object # NOTE(comstud): error path doesn't use objects, so our object
@ -5203,11 +5146,8 @@ class ComputeTestCase(BaseTestCase,
instance.refresh() instance.refresh()
self.assertEqual(instance.vm_state, vm_states.ERROR) self.assertEqual(instance.vm_state, vm_states.ERROR)
self.compute.terminate_instance(self.context, instance, [], []) self.compute.terminate_instance(self.context, instance, [], [])
self._ensure_quota_reservations(instance, reservations,
mock_rollback)
@mock.patch.object(nova.quota.QUOTAS, 'rollback') def test_resize_instance_driver_rollback(self):
def test_resize_instance_driver_rollback(self, mock_rollback):
# Ensure instance status set to Running after rollback. # Ensure instance status set to Running after rollback.
def throw_up(*args, **kwargs): def throw_up(*args, **kwargs):
@ -5218,14 +5158,13 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj() instance = self._create_fake_instance_obj()
instance_type = flavors.get_default_flavor() instance_type = flavors.get_default_flavor()
reservations = list('fake_res')
self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[]) block_device_mapping=[])
instance.host = 'foo' instance.host = 'foo'
instance.save() instance.save()
self.compute.prep_resize(self.context, instance=instance, self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type, image={}, instance_type=instance_type, image={},
reservations=reservations, request_spec={}, reservations=[], request_spec={},
filter_properties={}, node=None, filter_properties={}, node=None,
clean_shutdown=True) clean_shutdown=True)
instance.task_state = task_states.RESIZE_PREP instance.task_state = task_states.RESIZE_PREP
@ -5238,7 +5177,7 @@ class ComputeTestCase(BaseTestCase,
self.assertRaises(test.TestingException, self.compute.resize_instance, self.assertRaises(test.TestingException, self.compute.resize_instance,
self.context, instance=instance, self.context, instance=instance,
migration=migration, image={}, migration=migration, image={},
reservations=reservations, reservations=[],
instance_type=jsonutils.to_primitive(instance_type), instance_type=jsonutils.to_primitive(instance_type),
clean_shutdown=True) clean_shutdown=True)
# NOTE(comstud): error path doesn't use objects, so our object # NOTE(comstud): error path doesn't use objects, so our object
@ -5247,8 +5186,6 @@ class ComputeTestCase(BaseTestCase,
self.assertEqual(instance.vm_state, vm_states.ACTIVE) self.assertEqual(instance.vm_state, vm_states.ACTIVE)
self.assertIsNone(instance.task_state) self.assertIsNone(instance.task_state)
self.compute.terminate_instance(self.context, instance, [], []) self.compute.terminate_instance(self.context, instance, [], [])
self._ensure_quota_reservations(instance, reservations,
mock_rollback)
def _test_resize_instance(self, clean_shutdown=True): def _test_resize_instance(self, clean_shutdown=True):
# Ensure instance can be migrated/resized. # Ensure instance can be migrated/resized.
@ -5316,8 +5253,7 @@ class ComputeTestCase(BaseTestCase,
def test_resize_instance_forced_shutdown(self): def test_resize_instance_forced_shutdown(self):
self._test_resize_instance(clean_shutdown=False) self._test_resize_instance(clean_shutdown=False)
@mock.patch.object(nova.quota.QUOTAS, 'commit') def _test_confirm_resize(self, power_on, numa_topology=None):
def _test_confirm_resize(self, mock_commit, power_on, numa_topology=None):
# Common test case method for confirm_resize # Common test case method for confirm_resize
def fake(*args, **kwargs): def fake(*args, **kwargs):
pass pass
@ -5344,8 +5280,6 @@ class ComputeTestCase(BaseTestCase,
self._stub_out_resize_network_methods() self._stub_out_resize_network_methods()
reservations = list('fake_res')
# Get initial memory usage # Get initial memory usage
memory_mb_used = self.rt.compute_nodes[NODENAME].memory_mb_used memory_mb_used = self.rt.compute_nodes[NODENAME].memory_mb_used
@ -5371,7 +5305,7 @@ class ComputeTestCase(BaseTestCase,
self.compute.prep_resize(self.context, self.compute.prep_resize(self.context,
instance=instance, instance=instance,
instance_type=new_instance_type_ref, instance_type=new_instance_type_ref,
image={}, reservations=reservations, request_spec={}, image={}, reservations=[], request_spec={},
filter_properties={}, node=None, clean_shutdown=True) filter_properties={}, node=None, clean_shutdown=True)
# Memory usage should increase after the resize as well # Memory usage should increase after the resize as well
@ -5420,7 +5354,7 @@ class ComputeTestCase(BaseTestCase,
instance.task_state = None instance.task_state = None
instance.save() instance.save()
self.compute.confirm_resize(self.context, instance=instance, self.compute.confirm_resize(self.context, instance=instance,
reservations=reservations, reservations=[],
migration=migration) migration=migration)
# Resources from the migration (based on initial flavor) should # Resources from the migration (based on initial flavor) should
@ -5439,8 +5373,6 @@ class ComputeTestCase(BaseTestCase,
self.assertIsNone(instance.migration_context) self.assertIsNone(instance.migration_context)
self.assertEqual(p_state, instance.power_state) self.assertEqual(p_state, instance.power_state)
self.compute.terminate_instance(self.context, instance, [], []) self.compute.terminate_instance(self.context, instance, [], [])
self._ensure_quota_reservations(instance, reservations,
mock_commit)
def test_confirm_resize_from_active(self): def test_confirm_resize_from_active(self):
self._test_confirm_resize(power_on=True) self._test_confirm_resize(power_on=True)
@ -5600,8 +5532,7 @@ class ComputeTestCase(BaseTestCase,
self._test_resize_with_pci( self._test_resize_with_pci(
self.compute.revert_resize, '0000:0b:00.1') self.compute.revert_resize, '0000:0b:00.1')
@mock.patch.object(nova.quota.QUOTAS, 'commit') def _test_finish_revert_resize(self, power_on,
def _test_finish_revert_resize(self, mock_commit, power_on,
remove_old_vm_state=False, remove_old_vm_state=False,
numa_topology=None): numa_topology=None):
"""Convenience method that does most of the work for the """Convenience method that does most of the work for the
@ -5634,8 +5565,6 @@ class ComputeTestCase(BaseTestCase,
self._stub_out_resize_network_methods() self._stub_out_resize_network_methods()
reservations = list('fake_res')
# Get initial memory usage # Get initial memory usage
memory_mb_used = self.rt.compute_nodes[NODENAME].memory_mb_used memory_mb_used = self.rt.compute_nodes[NODENAME].memory_mb_used
@ -5661,7 +5590,7 @@ class ComputeTestCase(BaseTestCase,
self.compute.prep_resize(self.context, self.compute.prep_resize(self.context,
instance=instance, instance=instance,
instance_type=new_instance_type_ref, instance_type=new_instance_type_ref,
image={}, reservations=reservations, request_spec={}, image={}, reservations=[], request_spec={},
filter_properties={}, node=None, filter_properties={}, node=None,
clean_shutdown=True) clean_shutdown=True)
@ -5710,7 +5639,7 @@ class ComputeTestCase(BaseTestCase,
self.compute.revert_resize(self.context, self.compute.revert_resize(self.context,
migration=migration, instance=instance, migration=migration, instance=instance,
reservations=reservations) reservations=[])
# Resources from the migration (based on initial flavor) should # Resources from the migration (based on initial flavor) should
# be freed now # be freed now
@ -5729,7 +5658,7 @@ class ComputeTestCase(BaseTestCase,
self.compute.finish_revert_resize(self.context, self.compute.finish_revert_resize(self.context,
migration=migration, migration=migration,
instance=instance, reservations=reservations) instance=instance, reservations=[])
self.assertIsNone(instance.task_state) self.assertIsNone(instance.task_state)
@ -5744,9 +5673,6 @@ class ComputeTestCase(BaseTestCase,
else: else:
self.assertEqual(old_vm_state, instance.vm_state) self.assertEqual(old_vm_state, instance.vm_state)
self._ensure_quota_reservations(instance, reservations,
mock_commit)
def test_finish_revert_resize_from_active(self): def test_finish_revert_resize_from_active(self):
self._test_finish_revert_resize(power_on=True) self._test_finish_revert_resize(power_on=True)
@ -5844,8 +5770,7 @@ class ComputeTestCase(BaseTestCase,
flavor_type = flavors.get_flavor_by_flavor_id(1) flavor_type = flavors.get_flavor_by_flavor_id(1)
self.assertEqual(flavor_type['name'], 'm1.tiny') self.assertEqual(flavor_type['name'], 'm1.tiny')
@mock.patch.object(nova.quota.QUOTAS, 'rollback') def test_resize_instance_handles_migration_error(self):
def test_resize_instance_handles_migration_error(self, mock_rollback):
# Ensure vm_state is ERROR when error occurs. # Ensure vm_state is ERROR when error occurs.
def raise_migration_failure(*args): def raise_migration_failure(*args):
raise test.TestingException() raise test.TestingException()
@ -5853,7 +5778,6 @@ class ComputeTestCase(BaseTestCase,
raise_migration_failure) raise_migration_failure)
instance = self._create_fake_instance_obj() instance = self._create_fake_instance_obj()
reservations = list('fake_res')
instance_type = flavors.get_default_flavor() instance_type = flavors.get_default_flavor()
@ -5863,7 +5787,7 @@ class ComputeTestCase(BaseTestCase,
instance.save() instance.save()
self.compute.prep_resize(self.context, instance=instance, self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type, instance_type=instance_type,
image={}, reservations=reservations, image={}, reservations=[],
request_spec={}, filter_properties={}, request_spec={}, filter_properties={},
node=None, clean_shutdown=True) node=None, clean_shutdown=True)
migration = objects.Migration.get_by_instance_and_status( migration = objects.Migration.get_by_instance_and_status(
@ -5874,7 +5798,7 @@ class ComputeTestCase(BaseTestCase,
self.assertRaises(test.TestingException, self.compute.resize_instance, self.assertRaises(test.TestingException, self.compute.resize_instance,
self.context, instance=instance, self.context, instance=instance,
migration=migration, image={}, migration=migration, image={},
reservations=reservations, reservations=[],
instance_type=jsonutils.to_primitive(instance_type), instance_type=jsonutils.to_primitive(instance_type),
clean_shutdown=True) clean_shutdown=True)
# NOTE(comstud): error path doesn't use objects, so our object # NOTE(comstud): error path doesn't use objects, so our object
@ -5882,8 +5806,6 @@ class ComputeTestCase(BaseTestCase,
instance.refresh() instance.refresh()
self.assertEqual(instance.vm_state, vm_states.ERROR) self.assertEqual(instance.vm_state, vm_states.ERROR)
self.compute.terminate_instance(self.context, instance, [], []) self.compute.terminate_instance(self.context, instance, [], [])
self._ensure_quota_reservations(instance, reservations,
mock_rollback)
def test_pre_live_migration_instance_has_no_fixed_ip(self): def test_pre_live_migration_instance_has_no_fixed_ip(self):
# Confirm that no exception is raised if there is no fixed ip on # Confirm that no exception is raised if there is no fixed ip on
@ -7485,17 +7407,15 @@ class ComputeTestCase(BaseTestCase,
self.compute._reclaim_queued_deletes(ctxt) self.compute._reclaim_queued_deletes(ctxt)
mock_delete.assert_called_once_with( mock_delete.assert_called_once_with(
ctxt, test.MatchType(objects.Instance), [], ctxt, test.MatchType(objects.Instance), [])
test.MatchType(objects.Quotas))
mock_bdms.assert_called_once_with(ctxt, mock.ANY) mock_bdms.assert_called_once_with(ctxt, mock.ANY)
@mock.patch.object(objects.Quotas, 'from_reservations')
@mock.patch.object(objects.InstanceList, 'get_by_filters') @mock.patch.object(objects.InstanceList, 'get_by_filters')
@mock.patch.object(compute_manager.ComputeManager, '_deleted_old_enough') @mock.patch.object(compute_manager.ComputeManager, '_deleted_old_enough')
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(compute_manager.ComputeManager, '_delete_instance') @mock.patch.object(compute_manager.ComputeManager, '_delete_instance')
def test_reclaim_queued_deletes_continue_on_error(self, mock_delete_inst, def test_reclaim_queued_deletes_continue_on_error(self, mock_delete_inst,
mock_get_uuid, mock_delete_old, mock_get_filter, mock_quota): mock_get_uuid, mock_delete_old, mock_get_filter):
# Verify that reclaim continues on error. # Verify that reclaim continues on error.
self.flags(reclaim_instance_interval=3600) self.flags(reclaim_instance_interval=3600)
ctxt = context.get_admin_context() ctxt = context.get_admin_context()
@ -7515,7 +7435,6 @@ class ComputeTestCase(BaseTestCase,
mock_delete_old.side_effect = (True, True) mock_delete_old.side_effect = (True, True)
mock_get_uuid.side_effect = ([], []) mock_get_uuid.side_effect = ([], [])
mock_delete_inst.side_effect = (test.TestingException, None) mock_delete_inst.side_effect = (test.TestingException, None)
mock_quota.return_value = self.none_quotas
self.compute._reclaim_queued_deletes(ctxt) self.compute._reclaim_queued_deletes(ctxt)
@ -7527,9 +7446,8 @@ class ComputeTestCase(BaseTestCase,
mock_get_uuid.assert_has_calls([mock.call(ctxt, instance1.uuid), mock_get_uuid.assert_has_calls([mock.call(ctxt, instance1.uuid),
mock.call(ctxt, instance2.uuid)]) mock.call(ctxt, instance2.uuid)])
mock_delete_inst.assert_has_calls([ mock_delete_inst.assert_has_calls([
mock.call(ctxt, instance1, [], self.none_quotas), mock.call(ctxt, instance1, []),
mock.call(ctxt, instance2, [], self.none_quotas)]) mock.call(ctxt, instance2, [])])
mock_quota.assert_called_once_with(ctxt, None)
@mock.patch.object(fake.FakeDriver, 'get_info') @mock.patch.object(fake.FakeDriver, 'get_info')
@mock.patch.object(compute_manager.ComputeManager, @mock.patch.object(compute_manager.ComputeManager,
@ -7600,9 +7518,8 @@ class ComputeTestCase(BaseTestCase,
self.compute.handle_events(event_instance) self.compute.handle_events(event_instance)
@mock.patch.object(objects.Migration, 'get_by_id') @mock.patch.object(objects.Migration, 'get_by_id')
@mock.patch.object(objects.Quotas, 'rollback')
def test_confirm_resize_roll_back_quota_migration_not_found(self, def test_confirm_resize_roll_back_quota_migration_not_found(self,
mock_rollback, mock_get_by_id): mock_get_by_id):
instance = self._create_fake_instance_obj() instance = self._create_fake_instance_obj()
migration = objects.Migration() migration = objects.Migration()
@ -7614,12 +7531,10 @@ class ComputeTestCase(BaseTestCase,
migration_id=0) migration_id=0)
self.compute.confirm_resize(self.context, instance=instance, self.compute.confirm_resize(self.context, instance=instance,
migration=migration, reservations=[]) migration=migration, reservations=[])
self.assertTrue(mock_rollback.called)
@mock.patch.object(instance_obj.Instance, 'get_by_uuid') @mock.patch.object(instance_obj.Instance, 'get_by_uuid')
@mock.patch.object(objects.Quotas, 'rollback')
def test_confirm_resize_roll_back_quota_instance_not_found(self, def test_confirm_resize_roll_back_quota_instance_not_found(self,
mock_rollback, mock_get_by_id): mock_get_by_id):
instance = self._create_fake_instance_obj() instance = self._create_fake_instance_obj()
migration = objects.Migration() migration = objects.Migration()
@ -7631,12 +7546,10 @@ class ComputeTestCase(BaseTestCase,
instance_id=instance.uuid) instance_id=instance.uuid)
self.compute.confirm_resize(self.context, instance=instance, self.compute.confirm_resize(self.context, instance=instance,
migration=migration, reservations=[]) migration=migration, reservations=[])
self.assertTrue(mock_rollback.called)
@mock.patch.object(objects.Migration, 'get_by_id') @mock.patch.object(objects.Migration, 'get_by_id')
@mock.patch.object(objects.Quotas, 'rollback')
def test_confirm_resize_roll_back_quota_status_confirmed(self, def test_confirm_resize_roll_back_quota_status_confirmed(self,
mock_rollback, mock_get_by_id): mock_get_by_id):
instance = self._create_fake_instance_obj() instance = self._create_fake_instance_obj()
migration = objects.Migration() migration = objects.Migration()
@ -7647,12 +7560,10 @@ class ComputeTestCase(BaseTestCase,
mock_get_by_id.return_value = migration mock_get_by_id.return_value = migration
self.compute.confirm_resize(self.context, instance=instance, self.compute.confirm_resize(self.context, instance=instance,
migration=migration, reservations=[]) migration=migration, reservations=[])
self.assertTrue(mock_rollback.called)
@mock.patch.object(objects.Migration, 'get_by_id') @mock.patch.object(objects.Migration, 'get_by_id')
@mock.patch.object(objects.Quotas, 'rollback')
def test_confirm_resize_roll_back_quota_status_dummy(self, def test_confirm_resize_roll_back_quota_status_dummy(self,
mock_rollback, mock_get_by_id): mock_get_by_id):
instance = self._create_fake_instance_obj() instance = self._create_fake_instance_obj()
migration = objects.Migration() migration = objects.Migration()
@ -7663,7 +7574,6 @@ class ComputeTestCase(BaseTestCase,
mock_get_by_id.return_value = migration mock_get_by_id.return_value = migration
self.compute.confirm_resize(self.context, instance=instance, self.compute.confirm_resize(self.context, instance=instance,
migration=migration, reservations=[]) migration=migration, reservations=[])
self.assertTrue(mock_rollback.called)
def test_allow_confirm_resize_on_instance_in_deleting_task_state(self): def test_allow_confirm_resize_on_instance_in_deleting_task_state(self):
instance = self._create_fake_instance_obj() instance = self._create_fake_instance_obj()
@ -8450,8 +8360,10 @@ class ComputeAPITestCase(BaseTestCase):
self.fake_show) self.fake_show)
# Simulate a race where the first check passes and the recheck fails. # Simulate a race where the first check passes and the recheck fails.
# The first call is for checking instances/cores/ram, second and third
# calls are for checking server group members.
check_deltas_mock.side_effect = [ check_deltas_mock.side_effect = [
None, exception.OverQuota(overs='server_group_members')] None, None, exception.OverQuota(overs='server_group_members')]
group = objects.InstanceGroup(self.context) group = objects.InstanceGroup(self.context)
group.uuid = uuids.fake group.uuid = uuids.fake
@ -8466,7 +8378,8 @@ class ComputeAPITestCase(BaseTestCase):
scheduler_hints={'group': group.uuid}, scheduler_hints={'group': group.uuid},
check_server_group_quota=True) check_server_group_quota=True)
self.assertEqual(2, check_deltas_mock.call_count) # The first call was for the instances/cores/ram check.
self.assertEqual(3, check_deltas_mock.call_count)
call1 = mock.call(self.context, {'server_group_members': 1}, group, call1 = mock.call(self.context, {'server_group_members': 1}, group,
self.context.user_id) self.context.user_id)
call2 = mock.call(self.context, {'server_group_members': 0}, group, call2 = mock.call(self.context, {'server_group_members': 0}, group,
@ -8502,10 +8415,19 @@ class ComputeAPITestCase(BaseTestCase):
check_server_group_quota=True) check_server_group_quota=True)
self.assertEqual(len(refs), len(group.members)) self.assertEqual(len(refs), len(group.members))
# check_deltas should have been called only once. # check_deltas should have been called only once for server group
check_deltas_mock.assert_called_once_with( # members.
self.context, {'server_group_members': 1}, group, self.assertEqual(2, check_deltas_mock.call_count)
self.context.user_id) call1 = mock.call(self.context, {'instances': 1,
'cores': inst_type.vcpus,
'ram': inst_type.memory_mb},
self.context.project_id,
user_id=None,
check_project_id=self.context.project_id,
check_user_id=None)
call2 = mock.call(self.context, {'server_group_members': 1}, group,
self.context.user_id)
check_deltas_mock.assert_has_calls([call1, call2])
group = objects.InstanceGroup.get_by_uuid(self.context, group.uuid) group = objects.InstanceGroup.get_by_uuid(self.context, group.uuid)
self.assertIn(refs[0]['uuid'], group.members) self.assertIn(refs[0]['uuid'], group.members)
@ -11913,7 +11835,7 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
mock_mig.assert_called_once_with(mock.ANY, mock.ANY) mock_mig.assert_called_once_with(mock.ANY, mock.ANY)
mock_res.assert_called_once_with(mock.ANY, None, inst_obj, mock.ANY, mock_res.assert_called_once_with(mock.ANY, None, inst_obj, mock.ANY,
self.instance_type, mock.ANY, {}, {}) self.instance_type, {}, {})
@mock.patch.object(compute_manager.ComputeManager, "_reschedule") @mock.patch.object(compute_manager.ComputeManager, "_reschedule")
def test_reschedule_fails_with_exception(self, mock_res): def test_reschedule_fails_with_exception(self, mock_res):
@ -11922,8 +11844,7 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
""" """
instance = self._create_fake_instance_obj() instance = self._create_fake_instance_obj()
scheduler_hint = dict(filter_properties={}) scheduler_hint = dict(filter_properties={})
method_args = (instance, None, scheduler_hint, self.instance_type, method_args = (instance, None, scheduler_hint, self.instance_type)
None)
mock_res.side_effect = InnerTestingException("Inner") mock_res.side_effect = InnerTestingException("Inner")
try: try:
@ -11932,8 +11853,7 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
exc_info = sys.exc_info() exc_info = sys.exc_info()
self.assertRaises(test.TestingException, self.assertRaises(test.TestingException,
self.compute._reschedule_resize_or_reraise, self.context, self.compute._reschedule_resize_or_reraise, self.context,
None, instance, exc_info, self.instance_type, None, instance, exc_info, self.instance_type, {}, {})
self.none_quotas, {}, {})
mock_res.assert_called_once_with( mock_res.assert_called_once_with(
self.context, {}, {}, instance, self.context, {}, {}, instance,
@ -11947,8 +11867,7 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
""" """
instance = self._create_fake_instance_obj() instance = self._create_fake_instance_obj()
scheduler_hint = dict(filter_properties={}) scheduler_hint = dict(filter_properties={})
method_args = (instance, None, scheduler_hint, self.instance_type, method_args = (instance, None, scheduler_hint, self.instance_type)
None)
mock_res.return_value = False mock_res.return_value = False
try: try:
@ -11957,8 +11876,7 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
exc_info = sys.exc_info() exc_info = sys.exc_info()
self.assertRaises(test.TestingException, self.assertRaises(test.TestingException,
self.compute._reschedule_resize_or_reraise, self.context, self.compute._reschedule_resize_or_reraise, self.context,
None, instance, exc_info, self.instance_type, None, instance, exc_info, self.instance_type, {}, {})
self.none_quotas, {}, {})
mock_res.assert_called_once_with( mock_res.assert_called_once_with(
self.context, {}, {}, instance, self.context, {}, {}, instance,
@ -11971,8 +11889,7 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
# If rescheduled, the original resize exception should be logged. # If rescheduled, the original resize exception should be logged.
instance = self._create_fake_instance_obj() instance = self._create_fake_instance_obj()
scheduler_hint = dict(filter_properties={}) scheduler_hint = dict(filter_properties={})
method_args = (instance, None, scheduler_hint, self.instance_type, method_args = (instance, None, scheduler_hint, self.instance_type)
None)
try: try:
raise test.TestingException("Original") raise test.TestingException("Original")
@ -11982,7 +11899,7 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
self.compute._reschedule_resize_or_reraise( self.compute._reschedule_resize_or_reraise(
self.context, None, instance, exc_info, self.context, None, instance, exc_info,
self.instance_type, self.none_quotas, {}, {}) self.instance_type, {}, {})
mock_res.assert_called_once_with(self.context, {}, {}, mock_res.assert_called_once_with(self.context, {}, {},
instance, self.compute.compute_task_api.resize_instance, instance, self.compute.compute_task_api.resize_instance,

View File

@ -45,7 +45,6 @@ from nova.objects import block_device as block_device_obj
from nova.objects import fields as fields_obj from nova.objects import fields as fields_obj
from nova.objects import quotas as quotas_obj from nova.objects import quotas as quotas_obj
from nova.objects import security_group as secgroup_obj from nova.objects import security_group as secgroup_obj
from nova import quota
from nova import test from nova import test
from nova.tests import fixtures from nova.tests import fixtures
from nova.tests.unit import fake_block_device from nova.tests.unit import fake_block_device
@ -168,17 +167,19 @@ class _ComputeAPIUnitTestMixIn(object):
list_obj.obj_reset_changes() list_obj.obj_reset_changes()
return list_obj return list_obj
@mock.patch('nova.objects.Quotas.check_deltas')
@mock.patch('nova.conductor.conductor_api.ComputeTaskAPI.build_instances') @mock.patch('nova.conductor.conductor_api.ComputeTaskAPI.build_instances')
@mock.patch('nova.compute.api.API._record_action_start') @mock.patch('nova.compute.api.API._record_action_start')
@mock.patch('nova.compute.api.API._check_requested_networks') @mock.patch('nova.compute.api.API._check_requested_networks')
@mock.patch('nova.quota.QUOTAS.limit_check') @mock.patch('nova.objects.Quotas.limit_check')
@mock.patch('nova.compute.api.API._get_image') @mock.patch('nova.compute.api.API._get_image')
@mock.patch('nova.compute.api.API._provision_instances') @mock.patch('nova.compute.api.API._provision_instances')
def test_create_with_networks_max_count_none(self, provision_instances, def test_create_with_networks_max_count_none(self, provision_instances,
get_image, check_limit, get_image, check_limit,
check_requested_networks, check_requested_networks,
record_action_start, record_action_start,
build_instances): build_instances,
check_deltas):
# Make sure max_count is checked for None, as Python3 doesn't allow # Make sure max_count is checked for None, as Python3 doesn't allow
# comparison between NoneType and Integer, something that's allowed in # comparison between NoneType and Integer, something that's allowed in
# Python 2. # Python 2.
@ -200,21 +201,25 @@ class _ComputeAPIUnitTestMixIn(object):
requested_networks=requested_networks, requested_networks=requested_networks,
max_count=None) max_count=None)
@mock.patch('nova.quota.QUOTAS.reserve') @mock.patch('nova.objects.Quotas.count_as_dict')
@mock.patch('nova.quota.QUOTAS.limit_check') @mock.patch('nova.objects.Quotas.limit_check')
def test_create_quota_exceeded_messages(self, mock_limit_check, @mock.patch('nova.objects.Quotas.limit_check_project_and_user')
mock_reserve): def test_create_quota_exceeded_messages(self, mock_limit_check_pu,
mock_limit_check, mock_count):
image_href = "image_href" image_href = "image_href"
image_id = 0 image_id = 0
instance_type = self._create_flavor() instance_type = self._create_flavor()
quotas = {'instances': 1, 'cores': 1, 'ram': 1} quotas = {'instances': 1, 'cores': 1, 'ram': 1}
usages = {r: {'in_use': 1, 'reserved': 1} for r in
['instances', 'cores', 'ram']}
quota_exception = exception.OverQuota(quotas=quotas, quota_exception = exception.OverQuota(quotas=quotas,
usages=usages, overs=['instances']) usages={'instances': 1, 'cores': 1, 'ram': 1},
overs=['instances'])
mock_reserve.side_effect = quota_exception proj_count = {'instances': 1, 'cores': 1, 'ram': 1}
user_count = proj_count.copy()
mock_count.return_value = {'project': proj_count, 'user': user_count}
# instances/cores/ram quota
mock_limit_check_pu.side_effect = quota_exception
# We don't care about network validation in this test. # We don't care about network validation in this test.
self.compute_api.network_api.validate_networks = ( self.compute_api.network_api.validate_networks = (
@ -232,8 +237,7 @@ class _ComputeAPIUnitTestMixIn(object):
self.fail("Exception not raised") self.fail("Exception not raised")
mock_get_image.assert_called_with(self.context, image_href) mock_get_image.assert_called_with(self.context, image_href)
self.assertEqual(2, mock_get_image.call_count) self.assertEqual(2, mock_get_image.call_count)
self.assertEqual(2, mock_reserve.call_count) self.assertEqual(2, mock_limit_check_pu.call_count)
self.assertEqual(2, mock_limit_check.call_count)
def _test_create_max_net_count(self, max_net_count, min_count, max_count): def _test_create_max_net_count(self, max_net_count, min_count, max_count):
with test.nested( with test.nested(
@ -825,17 +829,11 @@ class _ComputeAPIUnitTestMixIn(object):
self.context.elevated().AndReturn(self.context) self.context.elevated().AndReturn(self.context)
objects.Migration.get_by_instance_and_status( objects.Migration.get_by_instance_and_status(
self.context, inst.uuid, 'finished').AndReturn(migration) self.context, inst.uuid, 'finished').AndReturn(migration)
compute_utils.downsize_quota_delta(self.context,
inst).AndReturn('deltas')
fake_quotas = objects.Quotas.from_reservations(self.context,
['rsvs'])
compute_utils.reserve_quota_delta(self.context, 'deltas',
inst).AndReturn(fake_quotas)
self.compute_api._record_action_start( self.compute_api._record_action_start(
self.context, inst, instance_actions.CONFIRM_RESIZE) self.context, inst, instance_actions.CONFIRM_RESIZE)
self.compute_api.compute_rpcapi.confirm_resize( self.compute_api.compute_rpcapi.confirm_resize(
self.context, inst, migration, self.context, inst, migration,
migration['source_compute'], fake_quotas.reservations, cast=False) migration['source_compute'], cast=False)
def _test_delete_shelved_part(self, inst): def _test_delete_shelved_part(self, inst):
image_api = self.compute_api.image_api image_api = self.compute_api.image_api
@ -886,7 +884,6 @@ class _ComputeAPIUnitTestMixIn(object):
self.context, inst.uuid).AndReturn(im) self.context, inst.uuid).AndReturn(im)
def _test_delete(self, delete_type, **attrs): def _test_delete(self, delete_type, **attrs):
reservations = ['fake-resv']
inst = self._create_instance_obj() inst = self._create_instance_obj()
inst.update(attrs) inst.update(attrs)
inst._context = self.context inst._context = self.context
@ -904,13 +901,10 @@ class _ComputeAPIUnitTestMixIn(object):
self.mox.StubOutWithMock(inst, 'save') self.mox.StubOutWithMock(inst, 'save')
self.mox.StubOutWithMock(objects.BlockDeviceMappingList, self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
'get_by_instance_uuid') 'get_by_instance_uuid')
self.mox.StubOutWithMock(quota.QUOTAS, 'reserve')
self.mox.StubOutWithMock(self.context, 'elevated') self.mox.StubOutWithMock(self.context, 'elevated')
self.mox.StubOutWithMock(objects.Service, 'get_by_compute_host') self.mox.StubOutWithMock(objects.Service, 'get_by_compute_host')
self.mox.StubOutWithMock(self.compute_api.servicegroup_api, self.mox.StubOutWithMock(self.compute_api.servicegroup_api,
'service_is_up') 'service_is_up')
self.mox.StubOutWithMock(compute_utils, 'downsize_quota_delta')
self.mox.StubOutWithMock(compute_utils, 'reserve_quota_delta')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original') self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(self.compute_api.network_api, self.mox.StubOutWithMock(self.compute_api.network_api,
@ -919,8 +913,6 @@ class _ComputeAPIUnitTestMixIn(object):
self.mox.StubOutWithMock(db, 'instance_destroy') self.mox.StubOutWithMock(db, 'instance_destroy')
self.mox.StubOutWithMock(compute_utils, self.mox.StubOutWithMock(compute_utils,
'notify_about_instance_usage') 'notify_about_instance_usage')
self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
rpcapi = self.compute_api.compute_rpcapi rpcapi = self.compute_api.compute_rpcapi
self.mox.StubOutWithMock(rpcapi, 'confirm_resize') self.mox.StubOutWithMock(rpcapi, 'confirm_resize')
self.mox.StubOutWithMock(self.compute_api.consoleauth_rpcapi, self.mox.StubOutWithMock(self.compute_api.consoleauth_rpcapi,
@ -942,34 +934,24 @@ class _ComputeAPIUnitTestMixIn(object):
inst.save() inst.save()
if inst.task_state == task_states.RESIZE_FINISH: if inst.task_state == task_states.RESIZE_FINISH:
self._test_delete_resizing_part(inst, deltas) self._test_delete_resizing_part(inst, deltas)
quota.QUOTAS.reserve(self.context, project_id=inst.project_id,
user_id=inst.user_id,
expire=mox.IgnoreArg(),
**deltas).AndReturn(reservations)
# NOTE(comstud): This is getting messy. But what we are wanting # NOTE(comstud): This is getting messy. But what we are wanting
# to test is: # to test is:
# If cells is enabled and we're the API cell: # If cells is enabled and we're the API cell:
# * Cast to cells_rpcapi.<method> with reservations=None # * Cast to cells_rpcapi.<method>
# * Commit reservations
# Otherwise: # Otherwise:
# * Check for downed host # * Check for downed host
# * If downed host: # * If downed host:
# * Clean up instance, destroying it, sending notifications. # * Clean up instance, destroying it, sending notifications.
# (Tested in _test_downed_host_part()) # (Tested in _test_downed_host_part())
# * Commit reservations
# * If not downed host: # * If not downed host:
# * Record the action start. # * Record the action start.
# * Cast to compute_rpcapi.<method> with the reservations # * Cast to compute_rpcapi.<method>
cast = True cast = True
commit_quotas = True
soft_delete = False
if self.cell_type != 'api': if self.cell_type != 'api':
if inst.vm_state == vm_states.RESIZED: if inst.vm_state == vm_states.RESIZED:
self._test_delete_resized_part(inst) self._test_delete_resized_part(inst)
if inst.vm_state == vm_states.SOFT_DELETED:
soft_delete = True
if inst.vm_state != vm_states.SHELVED_OFFLOADED: if inst.vm_state != vm_states.SHELVED_OFFLOADED:
self.context.elevated().AndReturn(self.context) self.context.elevated().AndReturn(self.context)
objects.Service.get_by_compute_host(self.context, objects.Service.get_by_compute_host(self.context,
@ -984,37 +966,17 @@ class _ComputeAPIUnitTestMixIn(object):
self._test_downed_host_part(inst, updates, delete_time, self._test_downed_host_part(inst, updates, delete_time,
delete_type) delete_type)
cast = False cast = False
else:
# Happens on the manager side
commit_quotas = False
if cast: if cast:
if self.cell_type != 'api': if self.cell_type != 'api':
self.compute_api._record_action_start(self.context, inst, self.compute_api._record_action_start(self.context, inst,
instance_actions.DELETE) instance_actions.DELETE)
if commit_quotas or soft_delete:
cast_reservations = None
else:
cast_reservations = reservations
if delete_type == 'soft_delete': if delete_type == 'soft_delete':
rpcapi.soft_delete_instance(self.context, inst, rpcapi.soft_delete_instance(self.context, inst)
reservations=cast_reservations)
elif delete_type in ['delete', 'force_delete']: elif delete_type in ['delete', 'force_delete']:
rpcapi.terminate_instance(self.context, inst, [], rpcapi.terminate_instance(self.context, inst, [],
reservations=cast_reservations,
delete_type=delete_type) delete_type=delete_type)
if soft_delete:
quota.QUOTAS.rollback(self.context, reservations,
project_id=inst.project_id,
user_id=inst.user_id)
if commit_quotas:
# Local delete or when we're testing API cell.
quota.QUOTAS.commit(self.context, reservations,
project_id=inst.project_id,
user_id=inst.user_id)
if self.cell_type is None or self.cell_type == 'api': if self.cell_type is None or self.cell_type == 'api':
self.compute_api.consoleauth_rpcapi.delete_tokens_for_instance( self.compute_api.consoleauth_rpcapi.delete_tokens_for_instance(
self.context, inst.uuid) self.context, inst.uuid)
@ -1094,7 +1056,6 @@ class _ComputeAPIUnitTestMixIn(object):
self.useFixture(fixtures.AllServicesCurrent()) self.useFixture(fixtures.AllServicesCurrent())
inst = self._create_instance_obj() inst = self._create_instance_obj()
inst.host = '' inst.host = ''
quotas = quotas_obj.Quotas(self.context)
updates = {'progress': 0, 'task_state': task_states.DELETING} updates = {'progress': 0, 'task_state': task_states.DELETING}
self.mox.StubOutWithMock(objects.BuildRequest, self.mox.StubOutWithMock(objects.BuildRequest,
@ -1105,7 +1066,6 @@ class _ComputeAPIUnitTestMixIn(object):
self.mox.StubOutWithMock(db, 'constraint') self.mox.StubOutWithMock(db, 'constraint')
self.mox.StubOutWithMock(db, 'instance_destroy') self.mox.StubOutWithMock(db, 'instance_destroy')
self.mox.StubOutWithMock(self.compute_api, '_create_reservations')
self.mox.StubOutWithMock(self.compute_api, '_lookup_instance') self.mox.StubOutWithMock(self.compute_api, '_lookup_instance')
self.mox.StubOutWithMock(compute_utils, self.mox.StubOutWithMock(compute_utils,
'notify_about_instance_usage') 'notify_about_instance_usage')
@ -1124,16 +1084,12 @@ class _ComputeAPIUnitTestMixIn(object):
self.context, inst.uuid).AndRaise( self.context, inst.uuid).AndRaise(
exception.BuildRequestNotFound(uuid=inst.uuid)) exception.BuildRequestNotFound(uuid=inst.uuid))
inst.save() inst.save()
self.compute_api._create_reservations(self.context,
inst, inst.task_state,
inst.project_id, inst.user_id
).AndReturn(quotas)
if self.cell_type == 'api': if self.cell_type == 'api':
rpcapi.terminate_instance( rpcapi.terminate_instance(
self.context, inst, self.context, inst,
mox.IsA(objects.BlockDeviceMappingList), mox.IsA(objects.BlockDeviceMappingList),
reservations=None, delete_type='delete') delete_type='delete')
else: else:
compute_utils.notify_about_instance_usage( compute_utils.notify_about_instance_usage(
self.compute_api.notifier, self.context, self.compute_api.notifier, self.context,
@ -1418,81 +1374,51 @@ class _ComputeAPIUnitTestMixIn(object):
def test_delete_while_booting_buildreq_deleted_instance_none(self): def test_delete_while_booting_buildreq_deleted_instance_none(self):
self.useFixture(fixtures.AllServicesCurrent()) self.useFixture(fixtures.AllServicesCurrent())
inst = self._create_instance_obj() inst = self._create_instance_obj()
quota_mock = mock.MagicMock()
@mock.patch.object(self.compute_api, '_attempt_delete_of_buildrequest', @mock.patch.object(self.compute_api, '_attempt_delete_of_buildrequest',
return_value=True) return_value=True)
@mock.patch.object(self.compute_api, '_lookup_instance', @mock.patch.object(self.compute_api, '_lookup_instance',
return_value=(None, None)) return_value=(None, None))
@mock.patch.object(self.compute_api, '_create_reservations', def test(mock_lookup, mock_attempt):
return_value=quota_mock)
def test(mock_create_res, mock_lookup, mock_attempt):
self.assertTrue( self.assertTrue(
self.compute_api._delete_while_booting(self.context, self.compute_api._delete_while_booting(self.context,
inst)) inst))
self.assertFalse(quota_mock.commit.called)
quota_mock.rollback.assert_called_once_with()
test() test()
def test_delete_while_booting_buildreq_deleted_instance_not_found(self): def test_delete_while_booting_buildreq_deleted_instance_not_found(self):
self.useFixture(fixtures.AllServicesCurrent()) self.useFixture(fixtures.AllServicesCurrent())
inst = self._create_instance_obj() inst = self._create_instance_obj()
quota_mock = mock.MagicMock()
@mock.patch.object(self.compute_api, '_attempt_delete_of_buildrequest', @mock.patch.object(self.compute_api, '_attempt_delete_of_buildrequest',
return_value=True) return_value=True)
@mock.patch.object(self.compute_api, '_lookup_instance', @mock.patch.object(self.compute_api, '_lookup_instance',
side_effect=exception.InstanceNotFound( side_effect=exception.InstanceNotFound(
instance_id='fake')) instance_id='fake'))
@mock.patch.object(self.compute_api, '_create_reservations', def test(mock_lookup, mock_attempt):
return_value=quota_mock)
def test(mock_create_res, mock_lookup, mock_attempt):
self.assertTrue( self.assertTrue(
self.compute_api._delete_while_booting(self.context, self.compute_api._delete_while_booting(self.context,
inst)) inst))
self.assertFalse(quota_mock.commit.called)
self.assertTrue(quota_mock.rollback.called)
test() test()
@ddt.data((task_states.RESIZE_MIGRATED, True),
(task_states.RESIZE_FINISH, True),
(None, False))
@ddt.unpack
def test_get_flavor_for_reservation(self, task_state, is_old):
instance = self._create_instance_obj({'task_state': task_state})
flavor = self.compute_api._get_flavor_for_reservation(instance)
expected_flavor = instance.old_flavor if is_old else instance.flavor
self.assertEqual(expected_flavor, flavor)
@mock.patch('nova.context.target_cell')
@mock.patch('nova.compute.utils.notify_about_instance_delete') @mock.patch('nova.compute.utils.notify_about_instance_delete')
@mock.patch('nova.objects.Instance.destroy') @mock.patch('nova.objects.Instance.destroy')
def test_delete_instance_from_cell0(self, destroy_mock, notify_mock, def test_delete_instance_from_cell0(self, destroy_mock, notify_mock):
target_cell_mock):
"""Tests the case that the instance does not have a host and was not """Tests the case that the instance does not have a host and was not
deleted while building, so conductor put it into cell0 so the API has deleted while building, so conductor put it into cell0 so the API has
to delete the instance from cell0 and decrement the quota from the to delete the instance from cell0.
main cell.
""" """
instance = self._create_instance_obj({'host': None}) instance = self._create_instance_obj({'host': None})
cell0 = objects.CellMapping(uuid=objects.CellMapping.CELL0_UUID) cell0 = objects.CellMapping(uuid=objects.CellMapping.CELL0_UUID)
quota_mock = mock.MagicMock()
target_cell_mock().__enter__.return_value = mock.sentinel.cctxt
with test.nested( with test.nested(
mock.patch.object(self.compute_api, '_delete_while_booting', mock.patch.object(self.compute_api, '_delete_while_booting',
return_value=False), return_value=False),
mock.patch.object(self.compute_api, '_lookup_instance', mock.patch.object(self.compute_api, '_lookup_instance',
return_value=(cell0, instance)), return_value=(cell0, instance)),
mock.patch.object(self.compute_api, '_get_flavor_for_reservation',
return_value=instance.flavor),
mock.patch.object(self.compute_api, '_create_reservations',
return_value=quota_mock)
) as ( ) as (
_delete_while_booting, _lookup_instance, _delete_while_booting, _lookup_instance,
_get_flavor_for_reservation, _create_reservations
): ):
self.compute_api._delete( self.compute_api._delete(
self.context, instance, 'delete', mock.NonCallableMock()) self.context, instance, 'delete', mock.NonCallableMock())
@ -1500,81 +1426,10 @@ class _ComputeAPIUnitTestMixIn(object):
self.context, instance) self.context, instance)
_lookup_instance.assert_called_once_with( _lookup_instance.assert_called_once_with(
self.context, instance.uuid) self.context, instance.uuid)
_get_flavor_for_reservation.assert_called_once_with(instance)
_create_reservations.assert_called_once_with(
mock.sentinel.cctxt, instance, instance.task_state,
self.context.project_id, instance.user_id,
flavor=instance.flavor)
quota_mock.commit.assert_called_once_with()
expected_target_cell_calls = [
# Create the quota reservation.
mock.call(self.context, None),
mock.call().__enter__(),
mock.call().__exit__(None, None, None),
]
target_cell_mock.assert_has_calls(expected_target_cell_calls)
notify_mock.assert_called_once_with( notify_mock.assert_called_once_with(
self.compute_api.notifier, self.context, instance) self.compute_api.notifier, self.context, instance)
destroy_mock.assert_called_once_with() destroy_mock.assert_called_once_with()
@mock.patch('nova.context.target_cell')
@mock.patch('nova.compute.utils.notify_about_instance_delete')
@mock.patch('nova.objects.Instance.destroy')
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
def test_delete_instance_from_cell0_rollback_quota(
self, bdms_get_by_instance_uuid, destroy_mock, notify_mock,
target_cell_mock):
"""Tests the case that the instance does not have a host and was not
deleted while building, so conductor put it into cell0 so the API has
to delete the instance from cell0 and decrement the quota from the
main cell. When we go to delete the instance, it's already gone so we
rollback the quota change.
"""
instance = self._create_instance_obj({'host': None})
cell0 = objects.CellMapping(uuid=objects.CellMapping.CELL0_UUID)
quota_mock = mock.MagicMock()
destroy_mock.side_effect = exception.InstanceNotFound(
instance_id=instance.uuid)
target_cell_mock().__enter__.return_value = mock.sentinel.cctxt
with test.nested(
mock.patch.object(self.compute_api, '_delete_while_booting',
return_value=False),
mock.patch.object(self.compute_api, '_lookup_instance',
return_value=(cell0, instance)),
mock.patch.object(self.compute_api, '_get_flavor_for_reservation',
return_value=instance.flavor),
mock.patch.object(self.compute_api, '_create_reservations',
return_value=quota_mock)
) as (
_delete_while_booting, _lookup_instance,
_get_flavor_for_reservation, _create_reservations
):
self.compute_api._delete(
self.context, instance, 'delete', mock.NonCallableMock())
_delete_while_booting.assert_called_once_with(
self.context, instance)
_lookup_instance.assert_called_once_with(
self.context, instance.uuid)
_get_flavor_for_reservation.assert_called_once_with(instance)
_create_reservations.assert_called_once_with(
mock.sentinel.cctxt, instance, instance.task_state,
self.context.project_id, instance.user_id,
flavor=instance.flavor)
notify_mock.assert_called_once_with(
self.compute_api.notifier, self.context, instance)
destroy_mock.assert_called_once_with()
expected_target_cell_calls = [
# Create the quota reservation.
mock.call(self.context, None),
mock.call().__enter__(),
mock.call().__exit__(None, None, None),
]
target_cell_mock.assert_has_calls(expected_target_cell_calls)
quota_mock.rollback.assert_called_once_with()
# Make sure we short-circuited and returned.
bdms_get_by_instance_uuid.assert_not_called()
@mock.patch.object(context, 'target_cell') @mock.patch.object(context, 'target_cell')
@mock.patch.object(objects.InstanceMapping, 'get_by_instance_uuid', @mock.patch.object(objects.InstanceMapping, 'get_by_instance_uuid',
side_effect=exception.InstanceMappingNotFound( side_effect=exception.InstanceMappingNotFound(
@ -1645,10 +1500,7 @@ class _ComputeAPIUnitTestMixIn(object):
self.mox.StubOutWithMock(self.context, 'elevated') self.mox.StubOutWithMock(self.context, 'elevated')
self.mox.StubOutWithMock(objects.Migration, self.mox.StubOutWithMock(objects.Migration,
'get_by_instance_and_status') 'get_by_instance_and_status')
self.mox.StubOutWithMock(compute_utils, 'downsize_quota_delta')
self.mox.StubOutWithMock(compute_utils, 'reserve_quota_delta')
self.mox.StubOutWithMock(fake_mig, 'save') self.mox.StubOutWithMock(fake_mig, 'save')
self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(self.compute_api.compute_rpcapi, self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
'confirm_resize') 'confirm_resize')
@ -1658,30 +1510,17 @@ class _ComputeAPIUnitTestMixIn(object):
objects.Migration.get_by_instance_and_status( objects.Migration.get_by_instance_and_status(
self.context, fake_inst['uuid'], 'finished').AndReturn( self.context, fake_inst['uuid'], 'finished').AndReturn(
fake_mig) fake_mig)
compute_utils.downsize_quota_delta(self.context,
fake_inst).AndReturn('deltas')
resvs = ['resvs']
fake_quotas = objects.Quotas.from_reservations(self.context, resvs)
compute_utils.reserve_quota_delta(self.context, 'deltas',
fake_inst).AndReturn(fake_quotas)
def _check_mig(expected_task_state=None): def _check_mig(expected_task_state=None):
self.assertEqual('confirming', fake_mig.status) self.assertEqual('confirming', fake_mig.status)
fake_mig.save().WithSideEffects(_check_mig) fake_mig.save().WithSideEffects(_check_mig)
if self.cell_type:
quota.QUOTAS.commit(self.context, resvs, project_id=None,
user_id=None)
self.compute_api._record_action_start(self.context, fake_inst, self.compute_api._record_action_start(self.context, fake_inst,
'confirmResize') 'confirmResize')
self.compute_api.compute_rpcapi.confirm_resize( self.compute_api.compute_rpcapi.confirm_resize(
self.context, fake_inst, fake_mig, 'compute-source', self.context, fake_inst, fake_mig, 'compute-source')
[] if self.cell_type else fake_quotas.reservations)
self.mox.ReplayAll() self.mox.ReplayAll()
@ -1697,28 +1536,20 @@ class _ComputeAPIUnitTestMixIn(object):
def test_confirm_resize_with_migration_ref(self): def test_confirm_resize_with_migration_ref(self):
self._test_confirm_resize(mig_ref_passed=True) self._test_confirm_resize(mig_ref_passed=True)
@mock.patch('nova.quota.QUOTAS.commit') @mock.patch('nova.objects.Quotas.check_deltas')
@mock.patch.object(compute_utils, 'reserve_quota_delta')
@mock.patch.object(compute_utils, 'reverse_upsize_quota_delta')
@mock.patch('nova.objects.Migration.get_by_instance_and_status') @mock.patch('nova.objects.Migration.get_by_instance_and_status')
@mock.patch('nova.context.RequestContext.elevated') @mock.patch('nova.context.RequestContext.elevated')
def _test_revert_resize(self, mock_elevated, mock_get_migration, def _test_revert_resize(self, mock_elevated, mock_get_migration,
mock_reverse_upsize_quota_delta, mock_check):
mock_reserve_quota_delta,
mock_quota_commit):
params = dict(vm_state=vm_states.RESIZED) params = dict(vm_state=vm_states.RESIZED)
fake_inst = self._create_instance_obj(params=params) fake_inst = self._create_instance_obj(params=params)
fake_inst.old_flavor = fake_inst.flavor
fake_mig = objects.Migration._from_db_object( fake_mig = objects.Migration._from_db_object(
self.context, objects.Migration(), self.context, objects.Migration(),
test_migration.fake_db_migration()) test_migration.fake_db_migration())
resvs = ['resvs']
fake_quotas = objects.Quotas.from_reservations(self.context, resvs)
mock_elevated.return_value = self.context mock_elevated.return_value = self.context
mock_get_migration.return_value = fake_mig mock_get_migration.return_value = fake_mig
mock_reverse_upsize_quota_delta.return_value = 'deltas'
mock_reserve_quota_delta.return_value = fake_quotas
def _check_state(expected_task_state=None): def _check_state(expected_task_state=None):
self.assertEqual(task_states.RESIZE_REVERTING, self.assertEqual(task_states.RESIZE_REVERTING,
@ -1739,47 +1570,30 @@ class _ComputeAPIUnitTestMixIn(object):
mock_elevated.assert_called_once_with() mock_elevated.assert_called_once_with()
mock_get_migration.assert_called_once_with( mock_get_migration.assert_called_once_with(
self.context, fake_inst['uuid'], 'finished') self.context, fake_inst['uuid'], 'finished')
mock_reverse_upsize_quota_delta.assert_called_once_with(
self.context, fake_inst)
mock_reserve_quota_delta.assert_called_once_with(
self.context, 'deltas', fake_inst)
mock_inst_save.assert_called_once_with(expected_task_state=[None]) mock_inst_save.assert_called_once_with(expected_task_state=[None])
mock_mig_save.assert_called_once_with() mock_mig_save.assert_called_once_with()
if self.cell_type:
mock_quota_commit.assert_called_once_with(
self.context, resvs, project_id=None, user_id=None)
mock_record_action.assert_called_once_with(self.context, fake_inst, mock_record_action.assert_called_once_with(self.context, fake_inst,
'revertResize') 'revertResize')
mock_revert_resize.assert_called_once_with( mock_revert_resize.assert_called_once_with(
self.context, fake_inst, fake_mig, 'compute-dest', self.context, fake_inst, fake_mig, 'compute-dest')
[] if self.cell_type else fake_quotas.reservations)
def test_revert_resize(self): def test_revert_resize(self):
self._test_revert_resize() self._test_revert_resize()
@mock.patch('nova.quota.QUOTAS.rollback') @mock.patch('nova.objects.Quotas.check_deltas')
@mock.patch.object(compute_utils, 'reserve_quota_delta')
@mock.patch.object(compute_utils, 'reverse_upsize_quota_delta')
@mock.patch('nova.objects.Migration.get_by_instance_and_status') @mock.patch('nova.objects.Migration.get_by_instance_and_status')
@mock.patch('nova.context.RequestContext.elevated') @mock.patch('nova.context.RequestContext.elevated')
def test_revert_resize_concurrent_fail(self, mock_elevated, def test_revert_resize_concurrent_fail(self, mock_elevated,
mock_get_migration, mock_get_migration, mock_check):
mock_reverse_upsize_quota_delta,
mock_reserve_quota_delta,
mock_quota_rollback):
params = dict(vm_state=vm_states.RESIZED) params = dict(vm_state=vm_states.RESIZED)
fake_inst = self._create_instance_obj(params=params) fake_inst = self._create_instance_obj(params=params)
fake_inst.old_flavor = fake_inst.flavor
fake_mig = objects.Migration._from_db_object( fake_mig = objects.Migration._from_db_object(
self.context, objects.Migration(), self.context, objects.Migration(),
test_migration.fake_db_migration()) test_migration.fake_db_migration())
mock_elevated.return_value = self.context mock_elevated.return_value = self.context
mock_get_migration.return_value = fake_mig mock_get_migration.return_value = fake_mig
delta = ['delta']
mock_reverse_upsize_quota_delta.return_value = delta
resvs = ['resvs']
fake_quotas = objects.Quotas.from_reservations(self.context, resvs)
mock_reserve_quota_delta.return_value = fake_quotas
exc = exception.UnexpectedTaskStateError( exc = exception.UnexpectedTaskStateError(
instance_uuid=fake_inst['uuid'], instance_uuid=fake_inst['uuid'],
@ -1796,13 +1610,7 @@ class _ComputeAPIUnitTestMixIn(object):
mock_elevated.assert_called_once_with() mock_elevated.assert_called_once_with()
mock_get_migration.assert_called_once_with( mock_get_migration.assert_called_once_with(
self.context, fake_inst['uuid'], 'finished') self.context, fake_inst['uuid'], 'finished')
mock_reverse_upsize_quota_delta.assert_called_once_with(
self.context, fake_inst)
mock_reserve_quota_delta.assert_called_once_with(
self.context, delta, fake_inst)
mock_inst_save.assert_called_once_with(expected_task_state=[None]) mock_inst_save.assert_called_once_with(expected_task_state=[None])
mock_quota_rollback.assert_called_once_with(
self.context, resvs, project_id=None, user_id=None)
def _test_resize(self, flavor_id_passed=True, def _test_resize(self, flavor_id_passed=True,
same_host=False, allow_same_host=False, same_host=False, allow_same_host=False,
@ -1823,9 +1631,10 @@ class _ComputeAPIUnitTestMixIn(object):
self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id') self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
self.mox.StubOutWithMock(compute_utils, 'upsize_quota_delta') self.mox.StubOutWithMock(compute_utils, 'upsize_quota_delta')
self.mox.StubOutWithMock(compute_utils, 'reserve_quota_delta')
self.mox.StubOutWithMock(fake_inst, 'save') self.mox.StubOutWithMock(fake_inst, 'save')
self.mox.StubOutWithMock(quota.QUOTAS, 'commit') self.mox.StubOutWithMock(quotas_obj.Quotas, 'count_as_dict')
self.mox.StubOutWithMock(quotas_obj.Quotas,
'limit_check_project_and_user')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(objects.RequestSpec, 'get_by_instance_uuid') self.mox.StubOutWithMock(objects.RequestSpec, 'get_by_instance_uuid')
self.mox.StubOutWithMock(self.compute_api.compute_task_api, self.mox.StubOutWithMock(self.compute_api.compute_task_api,
@ -1845,17 +1654,30 @@ class _ComputeAPIUnitTestMixIn(object):
if (self.cell_type == 'compute' or if (self.cell_type == 'compute' or
not (flavor_id_passed and same_flavor)): not (flavor_id_passed and same_flavor)):
resvs = ['resvs']
project_id, user_id = quotas_obj.ids_from_instance(self.context, project_id, user_id = quotas_obj.ids_from_instance(self.context,
fake_inst) fake_inst)
fake_quotas = objects.Quotas.from_reservations(self.context,
resvs)
if flavor_id_passed: if flavor_id_passed:
compute_utils.upsize_quota_delta( compute_utils.upsize_quota_delta(
self.context, mox.IsA(objects.Flavor), self.context, mox.IsA(objects.Flavor),
mox.IsA(objects.Flavor)).AndReturn('deltas') mox.IsA(objects.Flavor)).AndReturn({'cores': 0, 'ram': 0})
compute_utils.reserve_quota_delta(
self.context, 'deltas', fake_inst).AndReturn(fake_quotas) proj_count = {'instances': 1, 'cores': current_flavor.vcpus,
'ram': current_flavor.memory_mb}
user_count = proj_count.copy()
# mox.IgnoreArg() might be 'instances', 'cores', or 'ram'
# depending on how the deltas dict is iterated in check_deltas
quotas_obj.Quotas.count_as_dict(self.context, mox.IgnoreArg(),
project_id,
user_id=user_id).AndReturn(
{'project': proj_count,
'user': user_count})
# The current and new flavor have the same cores/ram
req_cores = current_flavor.vcpus
req_ram = current_flavor.memory_mb
values = {'cores': req_cores, 'ram': req_ram}
quotas_obj.Quotas.limit_check_project_and_user(
self.context, user_values=values, project_values=values,
project_id=project_id, user_id=user_id)
def _check_state(expected_task_state=None): def _check_state(expected_task_state=None):
self.assertEqual(task_states.RESIZE_PREP, self.assertEqual(task_states.RESIZE_PREP,
@ -1872,15 +1694,7 @@ class _ComputeAPIUnitTestMixIn(object):
else: else:
filter_properties = {'ignore_hosts': [fake_inst['host']]} filter_properties = {'ignore_hosts': [fake_inst['host']]}
if flavor_id_passed:
expected_reservations = fake_quotas.reservations
else:
expected_reservations = []
if self.cell_type == 'api': if self.cell_type == 'api':
if flavor_id_passed:
quota.QUOTAS.commit(self.context, resvs, project_id=None,
user_id=None)
expected_reservations = []
mig = objects.Migration() mig = objects.Migration()
def _get_migration(context=None): def _get_migration(context=None):
@ -1922,7 +1736,6 @@ class _ComputeAPIUnitTestMixIn(object):
self.context, fake_inst, extra_kwargs, self.context, fake_inst, extra_kwargs,
scheduler_hint=scheduler_hint, scheduler_hint=scheduler_hint,
flavor=mox.IsA(objects.Flavor), flavor=mox.IsA(objects.Flavor),
reservations=expected_reservations,
clean_shutdown=clean_shutdown, clean_shutdown=clean_shutdown,
request_spec=fake_spec) request_spec=fake_spec)
@ -1964,6 +1777,37 @@ class _ComputeAPIUnitTestMixIn(object):
def test_resize_forced_shutdown(self): def test_resize_forced_shutdown(self):
self._test_resize(clean_shutdown=False) self._test_resize(clean_shutdown=False)
@mock.patch('nova.compute.flavors.get_flavor_by_flavor_id')
@mock.patch('nova.objects.Quotas.count_as_dict')
@mock.patch('nova.objects.Quotas.limit_check_project_and_user')
def test_resize_quota_check(self, mock_check, mock_count, mock_get):
self.flags(cores=1, group='quota')
self.flags(ram=2048, group='quota')
proj_count = {'instances': 1, 'cores': 1, 'ram': 1024}
user_count = proj_count.copy()
mock_count.return_value = {'project': proj_count,
'user': user_count}
cur_flavor = objects.Flavor(id=1, name='foo', vcpus=1, memory_mb=512,
root_gb=10, disabled=False)
fake_inst = self._create_instance_obj()
fake_inst.flavor = cur_flavor
new_flavor = objects.Flavor(id=2, name='bar', vcpus=1, memory_mb=2048,
root_gb=10, disabled=False)
mock_get.return_value = new_flavor
mock_check.side_effect = exception.OverQuota(
overs=['ram'], quotas={'cores': 1, 'ram': 2048},
usages={'instances': 1, 'cores': 1, 'ram': 2048},
headroom={'ram': 2048})
self.assertRaises(exception.TooManyInstances, self.compute_api.resize,
self.context, fake_inst, flavor_id='new')
mock_check.assert_called_once_with(
self.context,
user_values={'cores': 1, 'ram': 2560},
project_values={'cores': 1, 'ram': 2560},
project_id=fake_inst.project_id, user_id=fake_inst.user_id)
def test_migrate(self): def test_migrate(self):
self._test_migrate() self._test_migrate()
@ -1982,8 +1826,9 @@ class _ComputeAPIUnitTestMixIn(object):
def test_resize_invalid_flavor_fails(self): def test_resize_invalid_flavor_fails(self):
self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id') self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
# Should never reach these. # Should never reach these.
self.mox.StubOutWithMock(compute_utils, 'reserve_quota_delta') self.mox.StubOutWithMock(quotas_obj.Quotas, 'count_as_dict')
self.mox.StubOutWithMock(quota.QUOTAS, 'commit') self.mox.StubOutWithMock(quotas_obj.Quotas,
'limit_check_project_and_user')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(self.compute_api.compute_task_api, self.mox.StubOutWithMock(self.compute_api.compute_task_api,
'resize_instance') 'resize_instance')
@ -2005,8 +1850,9 @@ class _ComputeAPIUnitTestMixIn(object):
def test_resize_disabled_flavor_fails(self): def test_resize_disabled_flavor_fails(self):
self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id') self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
# Should never reach these. # Should never reach these.
self.mox.StubOutWithMock(compute_utils, 'reserve_quota_delta') self.mox.StubOutWithMock(quotas_obj.Quotas, 'count_as_dict')
self.mox.StubOutWithMock(quota.QUOTAS, 'commit') self.mox.StubOutWithMock(quotas_obj.Quotas,
'limit_check_project_and_user')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(self.compute_api.compute_task_api, self.mox.StubOutWithMock(self.compute_api.compute_task_api,
'resize_instance') 'resize_instance')
@ -2072,9 +1918,10 @@ class _ComputeAPIUnitTestMixIn(object):
def test_resize_quota_exceeds_fails(self): def test_resize_quota_exceeds_fails(self):
self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id') self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
self.mox.StubOutWithMock(compute_utils, 'upsize_quota_delta') self.mox.StubOutWithMock(compute_utils, 'upsize_quota_delta')
self.mox.StubOutWithMock(compute_utils, 'reserve_quota_delta') self.mox.StubOutWithMock(quotas_obj.Quotas, 'count_as_dict')
self.mox.StubOutWithMock(quotas_obj.Quotas,
'limit_check_project_and_user')
# Should never reach these. # Should never reach these.
self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(self.compute_api.compute_task_api, self.mox.StubOutWithMock(self.compute_api.compute_task_api,
'resize_instance') 'resize_instance')
@ -2084,21 +1931,34 @@ class _ComputeAPIUnitTestMixIn(object):
name='foo', disabled=False) name='foo', disabled=False)
flavors.get_flavor_by_flavor_id( flavors.get_flavor_by_flavor_id(
'flavor-id', read_deleted='no').AndReturn(fake_flavor) 'flavor-id', read_deleted='no').AndReturn(fake_flavor)
deltas = dict(resource=0) deltas = dict(cores=0)
compute_utils.upsize_quota_delta( compute_utils.upsize_quota_delta(
self.context, mox.IsA(objects.Flavor), self.context, mox.IsA(objects.Flavor),
mox.IsA(objects.Flavor)).AndReturn(deltas) mox.IsA(objects.Flavor)).AndReturn(deltas)
usage = dict(in_use=0, reserved=0) quotas = {'cores': 0}
quotas = {'resource': 0} overs = ['cores']
usages = {'resource': usage}
overs = ['resource']
over_quota_args = dict(quotas=quotas, over_quota_args = dict(quotas=quotas,
usages=usages, usages={'instances': 1, 'cores': 1, 'ram': 512},
overs=overs) overs=overs)
compute_utils.reserve_quota_delta(self.context, deltas, proj_count = {'instances': 1, 'cores': fake_inst.flavor.vcpus,
fake_inst).AndRaise( 'ram': fake_inst.flavor.memory_mb}
exception.OverQuota(**over_quota_args)) user_count = proj_count.copy()
# mox.IgnoreArg() might be 'instances', 'cores', or 'ram'
# depending on how the deltas dict is iterated in check_deltas
quotas_obj.Quotas.count_as_dict(self.context, mox.IgnoreArg(),
fake_inst.project_id,
user_id=fake_inst.user_id).AndReturn(
{'project': proj_count,
'user': user_count})
req_cores = fake_inst.flavor.vcpus
req_ram = fake_inst.flavor.memory_mb
values = {'cores': req_cores, 'ram': req_ram}
quotas_obj.Quotas.limit_check_project_and_user(
self.context, user_values=values, project_values=values,
project_id=fake_inst.project_id,
user_id=fake_inst.user_id).AndRaise(
exception.OverQuota(**over_quota_args))
self.mox.ReplayAll() self.mox.ReplayAll()
@ -2110,8 +1970,9 @@ class _ComputeAPIUnitTestMixIn(object):
@mock.patch.object(flavors, 'get_flavor_by_flavor_id') @mock.patch.object(flavors, 'get_flavor_by_flavor_id')
@mock.patch.object(compute_utils, 'upsize_quota_delta') @mock.patch.object(compute_utils, 'upsize_quota_delta')
@mock.patch.object(compute_utils, 'reserve_quota_delta') @mock.patch.object(quotas_obj.Quotas, 'count_as_dict')
def test_resize_quota_exceeds_fails_instance(self, mock_reserve, @mock.patch.object(quotas_obj.Quotas, 'limit_check_project_and_user')
def test_resize_quota_exceeds_fails_instance(self, mock_check, mock_count,
mock_upsize, mock_flavor): mock_upsize, mock_flavor):
fake_inst = self._create_instance_obj() fake_inst = self._create_instance_obj()
fake_flavor = self._create_flavor(id=200, flavorid='flavor-id', fake_flavor = self._create_flavor(id=200, flavorid='flavor-id',
@ -2119,14 +1980,12 @@ class _ComputeAPIUnitTestMixIn(object):
mock_flavor.return_value = fake_flavor mock_flavor.return_value = fake_flavor
deltas = dict(cores=1, ram=1) deltas = dict(cores=1, ram=1)
mock_upsize.return_value = deltas mock_upsize.return_value = deltas
usage = dict(in_use=0, reserved=0)
quotas = {'instances': 1, 'cores': -1, 'ram': -1} quotas = {'instances': 1, 'cores': -1, 'ram': -1}
usages = {'instances': usage, 'cores': usage, 'ram': usage}
overs = ['ram'] overs = ['ram']
over_quota_args = dict(quotas=quotas, over_quota_args = dict(quotas=quotas,
usages=usages, usages={'instances': 1, 'cores': 1, 'ram': 512},
overs=overs) overs=overs)
mock_reserve.side_effect = exception.OverQuota(**over_quota_args) mock_check.side_effect = exception.OverQuota(**over_quota_args)
with mock.patch.object(fake_inst, 'save') as mock_save: with mock.patch.object(fake_inst, 'save') as mock_save:
self.assertRaises(exception.TooManyInstances, self.assertRaises(exception.TooManyInstances,
@ -2134,45 +1993,21 @@ class _ComputeAPIUnitTestMixIn(object):
fake_inst, flavor_id='flavor-id') fake_inst, flavor_id='flavor-id')
self.assertFalse(mock_save.called) self.assertFalse(mock_save.called)
def test_check_instance_quota_exceeds_with_multiple_resources(self):
quotas = {'cores': 1, 'instances': 1, 'ram': 512}
usages = {'cores': dict(in_use=1, reserved=0),
'instances': dict(in_use=1, reserved=0),
'ram': dict(in_use=512, reserved=0)}
overs = ['cores', 'instances', 'ram']
over_quota_args = dict(quotas=quotas,
usages=usages,
overs=overs)
e = exception.OverQuota(**over_quota_args)
fake_flavor = self._create_flavor()
instance_num = 1
with mock.patch.object(objects.Quotas, 'reserve', side_effect=e):
try:
self.compute_api._check_num_instances_quota(self.context,
fake_flavor,
instance_num,
instance_num)
except exception.TooManyInstances as e:
self.assertEqual('cores, instances, ram', e.kwargs['overs'])
self.assertEqual('1, 1, 512', e.kwargs['req'])
self.assertEqual('1, 1, 512', e.kwargs['used'])
self.assertEqual('1, 1, 512', e.kwargs['allowed'])
else:
self.fail("Exception not raised")
@mock.patch.object(flavors, 'get_flavor_by_flavor_id') @mock.patch.object(flavors, 'get_flavor_by_flavor_id')
@mock.patch.object(objects.Quotas, 'reserve') @mock.patch.object(objects.Quotas, 'count_as_dict')
@mock.patch.object(objects.Quotas, 'limit_check_project_and_user')
def test_resize_instance_quota_exceeds_with_multiple_resources( def test_resize_instance_quota_exceeds_with_multiple_resources(
self, mock_reserve, mock_get_flavor): self, mock_check, mock_count, mock_get_flavor):
quotas = {'cores': 1, 'ram': 512} quotas = {'cores': 1, 'ram': 512}
usages = {'cores': dict(in_use=1, reserved=0),
'ram': dict(in_use=512, reserved=0)}
overs = ['cores', 'ram'] overs = ['cores', 'ram']
over_quota_args = dict(quotas=quotas, over_quota_args = dict(quotas=quotas,
usages=usages, usages={'instances': 1, 'cores': 1, 'ram': 512},
overs=overs) overs=overs)
mock_reserve.side_effect = exception.OverQuota(**over_quota_args) proj_count = {'instances': 1, 'cores': 1, 'ram': 512}
user_count = proj_count.copy()
mock_count.return_value = {'project': proj_count, 'user': user_count}
mock_check.side_effect = exception.OverQuota(**over_quota_args)
mock_get_flavor.return_value = self._create_flavor(id=333, mock_get_flavor.return_value = self._create_flavor(id=333,
vcpus=3, vcpus=3,
memory_mb=1536) memory_mb=1536)
@ -3365,8 +3200,8 @@ class _ComputeAPIUnitTestMixIn(object):
"contents": "foo" "contents": "foo"
} }
] ]
with mock.patch.object(quota.QUOTAS, 'limit_check', with mock.patch('nova.objects.Quotas.limit_check',
side_effect=side_effect): side_effect=side_effect):
self.compute_api._check_injected_file_quota( self.compute_api._check_injected_file_quota(
self.context, injected_files) self.context, injected_files)
@ -3393,15 +3228,18 @@ class _ComputeAPIUnitTestMixIn(object):
self._test_check_injected_file_quota_onset_file_limit_exceeded, self._test_check_injected_file_quota_onset_file_limit_exceeded,
side_effect) side_effect)
@mock.patch('nova.objects.Quotas.commit') @mock.patch('nova.objects.Quotas.count_as_dict')
@mock.patch('nova.objects.Quotas.reserve') @mock.patch('nova.objects.Quotas.limit_check_project_and_user')
@mock.patch('nova.objects.Instance.save') @mock.patch('nova.objects.Instance.save')
@mock.patch('nova.objects.InstanceAction.action_start') @mock.patch('nova.objects.InstanceAction.action_start')
def test_restore_by_admin(self, action_start, instance_save, def test_restore_by_admin(self, action_start, instance_save,
quota_reserve, quota_commit): quota_check, quota_count):
admin_context = context.RequestContext('admin_user', admin_context = context.RequestContext('admin_user',
'admin_project', 'admin_project',
True) True)
proj_count = {'instances': 1, 'cores': 1, 'ram': 512}
user_count = proj_count.copy()
quota_count.return_value = {'project': proj_count, 'user': user_count}
instance = self._create_instance_obj() instance = self._create_instance_obj()
instance.vm_state = vm_states.SOFT_DELETED instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None instance.task_state = None
@ -3411,17 +3249,30 @@ class _ComputeAPIUnitTestMixIn(object):
rpc.restore_instance.assert_called_once_with(admin_context, rpc.restore_instance.assert_called_once_with(admin_context,
instance) instance)
self.assertEqual(instance.task_state, task_states.RESTORING) self.assertEqual(instance.task_state, task_states.RESTORING)
self.assertEqual(1, quota_commit.call_count) # mock.ANY might be 'instances', 'cores', or 'ram' depending on how the
quota_reserve.assert_called_once_with(instances=1, # deltas dict is iterated in check_deltas
cores=instance.flavor.vcpus, ram=instance.flavor.memory_mb, quota_count.assert_called_once_with(admin_context, mock.ANY,
instance.project_id,
user_id=instance.user_id)
quota_check.assert_called_once_with(
admin_context,
user_values={'instances': 2,
'cores': 1 + instance.flavor.vcpus,
'ram': 512 + instance.flavor.memory_mb},
project_values={'instances': 2,
'cores': 1 + instance.flavor.vcpus,
'ram': 512 + instance.flavor.memory_mb},
project_id=instance.project_id, user_id=instance.user_id) project_id=instance.project_id, user_id=instance.user_id)
@mock.patch('nova.objects.Quotas.commit') @mock.patch('nova.objects.Quotas.count_as_dict')
@mock.patch('nova.objects.Quotas.reserve') @mock.patch('nova.objects.Quotas.limit_check_project_and_user')
@mock.patch('nova.objects.Instance.save') @mock.patch('nova.objects.Instance.save')
@mock.patch('nova.objects.InstanceAction.action_start') @mock.patch('nova.objects.InstanceAction.action_start')
def test_restore_by_instance_owner(self, action_start, instance_save, def test_restore_by_instance_owner(self, action_start, instance_save,
quota_reserve, quota_commit): quota_check, quota_count):
proj_count = {'instances': 1, 'cores': 1, 'ram': 512}
user_count = proj_count.copy()
quota_count.return_value = {'project': proj_count, 'user': user_count}
instance = self._create_instance_obj() instance = self._create_instance_obj()
instance.vm_state = vm_states.SOFT_DELETED instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None instance.task_state = None
@ -3432,9 +3283,19 @@ class _ComputeAPIUnitTestMixIn(object):
instance) instance)
self.assertEqual(instance.project_id, self.context.project_id) self.assertEqual(instance.project_id, self.context.project_id)
self.assertEqual(instance.task_state, task_states.RESTORING) self.assertEqual(instance.task_state, task_states.RESTORING)
self.assertEqual(1, quota_commit.call_count) # mock.ANY might be 'instances', 'cores', or 'ram' depending on how the
quota_reserve.assert_called_once_with(instances=1, # deltas dict is iterated in check_deltas
cores=instance.flavor.vcpus, ram=instance.flavor.memory_mb, quota_count.assert_called_once_with(self.context, mock.ANY,
instance.project_id,
user_id=instance.user_id)
quota_check.assert_called_once_with(
self.context,
user_values={'instances': 2,
'cores': 1 + instance.flavor.vcpus,
'ram': 512 + instance.flavor.memory_mb},
project_values={'instances': 2,
'cores': 1 + instance.flavor.vcpus,
'ram': 512 + instance.flavor.memory_mb},
project_id=instance.project_id, user_id=instance.user_id) project_id=instance.project_id, user_id=instance.user_id)
@mock.patch.object(objects.InstanceAction, 'action_start') @mock.patch.object(objects.InstanceAction, 'action_start')
@ -3649,7 +3510,7 @@ class _ComputeAPIUnitTestMixIn(object):
def _test_provision_instances_with_cinder_error(self, def _test_provision_instances_with_cinder_error(self,
expected_exception): expected_exception):
@mock.patch.object(self.compute_api, '_check_num_instances_quota') @mock.patch('nova.compute.utils.check_num_instances_quota')
@mock.patch.object(objects.Instance, 'create') @mock.patch.object(objects.Instance, 'create')
@mock.patch.object(self.compute_api.security_group_api, @mock.patch.object(self.compute_api.security_group_api,
'ensure_default') 'ensure_default')
@ -3658,10 +3519,9 @@ class _ComputeAPIUnitTestMixIn(object):
def do_test( def do_test(
mock_req_spec_from_components, _mock_create_bdm, mock_req_spec_from_components, _mock_create_bdm,
_mock_ensure_default, _mock_create, mock_check_num_inst_quota): _mock_ensure_default, _mock_create, mock_check_num_inst_quota):
quota_mock = mock.MagicMock()
req_spec_mock = mock.MagicMock() req_spec_mock = mock.MagicMock()
mock_check_num_inst_quota.return_value = (1, quota_mock) mock_check_num_inst_quota.return_value = 1
mock_req_spec_from_components.return_value = req_spec_mock mock_req_spec_from_components.return_value = req_spec_mock
ctxt = context.RequestContext('fake-user', 'fake-project') ctxt = context.RequestContext('fake-user', 'fake-project')
@ -3746,7 +3606,7 @@ class _ComputeAPIUnitTestMixIn(object):
mock_br, mock_rs): mock_br, mock_rs):
fake_keypair = objects.KeyPair(name='test') fake_keypair = objects.KeyPair(name='test')
@mock.patch.object(self.compute_api, '_check_num_instances_quota') @mock.patch('nova.compute.utils.check_num_instances_quota')
@mock.patch.object(self.compute_api, 'security_group_api') @mock.patch.object(self.compute_api, 'security_group_api')
@mock.patch.object(self.compute_api, @mock.patch.object(self.compute_api,
'create_db_entry_for_new_instance') 'create_db_entry_for_new_instance')
@ -3754,7 +3614,7 @@ class _ComputeAPIUnitTestMixIn(object):
'_bdm_validate_set_size_and_instance') '_bdm_validate_set_size_and_instance')
@mock.patch.object(self.compute_api, '_create_block_device_mapping') @mock.patch.object(self.compute_api, '_create_block_device_mapping')
def do_test(mock_cbdm, mock_bdm_v, mock_cdb, mock_sg, mock_cniq): def do_test(mock_cbdm, mock_bdm_v, mock_cdb, mock_sg, mock_cniq):
mock_cniq.return_value = 1, mock.MagicMock() mock_cniq.return_value = 1
self.compute_api._provision_instances(self.context, self.compute_api._provision_instances(self.context,
mock.sentinel.flavor, mock.sentinel.flavor,
1, 1, mock.MagicMock(), 1, 1, mock.MagicMock(),
@ -3780,7 +3640,7 @@ class _ComputeAPIUnitTestMixIn(object):
def test_provision_instances_creates_build_request(self): def test_provision_instances_creates_build_request(self):
@mock.patch.object(objects.Instance, 'create') @mock.patch.object(objects.Instance, 'create')
@mock.patch.object(self.compute_api, 'volume_api') @mock.patch.object(self.compute_api, 'volume_api')
@mock.patch.object(self.compute_api, '_check_num_instances_quota') @mock.patch('nova.compute.utils.check_num_instances_quota')
@mock.patch.object(self.compute_api.security_group_api, @mock.patch.object(self.compute_api.security_group_api,
'ensure_default') 'ensure_default')
@mock.patch.object(objects.RequestSpec, 'from_components') @mock.patch.object(objects.RequestSpec, 'from_components')
@ -3795,8 +3655,7 @@ class _ComputeAPIUnitTestMixIn(object):
min_count = 1 min_count = 1
max_count = 2 max_count = 2
quota_mock = mock.MagicMock() mock_check_num_inst_quota.return_value = 2
mock_check_num_inst_quota.return_value = (2, quota_mock)
ctxt = context.RequestContext('fake-user', 'fake-project') ctxt = context.RequestContext('fake-user', 'fake-project')
flavor = self._create_flavor() flavor = self._create_flavor()
@ -3859,7 +3718,7 @@ class _ComputeAPIUnitTestMixIn(object):
do_test() do_test()
def test_provision_instances_creates_instance_mapping(self): def test_provision_instances_creates_instance_mapping(self):
@mock.patch.object(self.compute_api, '_check_num_instances_quota') @mock.patch('nova.compute.utils.check_num_instances_quota')
@mock.patch.object(objects.Instance, 'create', new=mock.MagicMock()) @mock.patch.object(objects.Instance, 'create', new=mock.MagicMock())
@mock.patch.object(self.compute_api.security_group_api, @mock.patch.object(self.compute_api.security_group_api,
'ensure_default', new=mock.MagicMock()) 'ensure_default', new=mock.MagicMock())
@ -3873,10 +3732,9 @@ class _ComputeAPIUnitTestMixIn(object):
new=mock.MagicMock()) new=mock.MagicMock())
@mock.patch('nova.objects.InstanceMapping') @mock.patch('nova.objects.InstanceMapping')
def do_test(mock_inst_mapping, mock_check_num_inst_quota): def do_test(mock_inst_mapping, mock_check_num_inst_quota):
quota_mock = mock.MagicMock()
inst_mapping_mock = mock.MagicMock() inst_mapping_mock = mock.MagicMock()
mock_check_num_inst_quota.return_value = (1, quota_mock) mock_check_num_inst_quota.return_value = 1
mock_inst_mapping.return_value = inst_mapping_mock mock_inst_mapping.return_value = inst_mapping_mock
ctxt = context.RequestContext('fake-user', 'fake-project') ctxt = context.RequestContext('fake-user', 'fake-project')
@ -3947,7 +3805,7 @@ class _ComputeAPIUnitTestMixIn(object):
_mock_cinder_reserve_volume, _mock_cinder_reserve_volume,
_mock_cinder_check_availability_zone, _mock_cinder_get, _mock_cinder_check_availability_zone, _mock_cinder_get,
_mock_get_min_ver): _mock_get_min_ver):
@mock.patch.object(self.compute_api, '_check_num_instances_quota') @mock.patch('nova.compute.utils.check_num_instances_quota')
@mock.patch.object(objects, 'Instance') @mock.patch.object(objects, 'Instance')
@mock.patch.object(self.compute_api.security_group_api, @mock.patch.object(self.compute_api.security_group_api,
'ensure_default') 'ensure_default')
@ -3958,11 +3816,10 @@ class _ComputeAPIUnitTestMixIn(object):
def do_test(mock_inst_mapping, mock_build_req, def do_test(mock_inst_mapping, mock_build_req,
mock_req_spec_from_components, _mock_create_bdm, mock_req_spec_from_components, _mock_create_bdm,
_mock_ensure_default, mock_inst, mock_check_num_inst_quota): _mock_ensure_default, mock_inst, mock_check_num_inst_quota):
quota_mock = mock.MagicMock()
min_count = 1 min_count = 1
max_count = 2 max_count = 2
mock_check_num_inst_quota.return_value = (2, quota_mock) mock_check_num_inst_quota.return_value = 2
req_spec_mock = mock.MagicMock() req_spec_mock = mock.MagicMock()
mock_req_spec_from_components.return_value = req_spec_mock mock_req_spec_from_components.return_value = req_spec_mock
inst_mocks = [mock.MagicMock() for i in range(max_count)] inst_mocks = [mock.MagicMock() for i in range(max_count)]
@ -4036,7 +3893,7 @@ class _ComputeAPIUnitTestMixIn(object):
do_test() do_test()
def test_provision_instances_creates_reqspec_with_secgroups(self): def test_provision_instances_creates_reqspec_with_secgroups(self):
@mock.patch.object(self.compute_api, '_check_num_instances_quota') @mock.patch('nova.compute.utils.check_num_instances_quota')
@mock.patch.object(self.compute_api, 'security_group_api') @mock.patch.object(self.compute_api, 'security_group_api')
@mock.patch.object(compute_api, 'objects') @mock.patch.object(compute_api, 'objects')
@mock.patch.object(self.compute_api, '_create_block_device_mapping', @mock.patch.object(self.compute_api, '_create_block_device_mapping',
@ -4049,7 +3906,7 @@ class _ComputeAPIUnitTestMixIn(object):
new=mock.MagicMock()) new=mock.MagicMock())
def test(mock_objects, mock_secgroup, mock_cniq): def test(mock_objects, mock_secgroup, mock_cniq):
ctxt = context.RequestContext('fake-user', 'fake-project') ctxt = context.RequestContext('fake-user', 'fake-project')
mock_cniq.return_value = (1, mock.MagicMock()) mock_cniq.return_value = 1
self.compute_api._provision_instances(ctxt, None, None, None, self.compute_api._provision_instances(ctxt, None, None, None,
mock.MagicMock(), None, None, mock.MagicMock(), None, None,
[], None, None, None, None, [], None, None, None, None,

View File

@ -33,6 +33,8 @@ from nova.compute import vm_states
import nova.conf import nova.conf
from nova import context from nova import context
from nova import db from nova import db
from nova.db.sqlalchemy import api as db_api
from nova.db.sqlalchemy import api_models
from nova import exception from nova import exception
from nova import objects from nova import objects
from nova.objects import fields as obj_fields from nova.objects import fields as obj_fields
@ -160,6 +162,81 @@ class CellsComputeAPITestCase(test_compute.ComputeAPITestCase):
def test_create_instance_associates_security_groups(self): def test_create_instance_associates_security_groups(self):
self.skipTest("Test is incompatible with cells.") self.skipTest("Test is incompatible with cells.")
@mock.patch('nova.objects.quotas.Quotas.check_deltas')
def test_create_instance_over_quota_during_recheck(
self, check_deltas_mock):
self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
self.fake_show)
# Simulate a race where the first check passes and the recheck fails.
fake_quotas = {'instances': 5, 'cores': 10, 'ram': 4096}
fake_headroom = {'instances': 5, 'cores': 10, 'ram': 4096}
fake_usages = {'instances': 5, 'cores': 10, 'ram': 4096}
exc = exception.OverQuota(overs=['instances'], quotas=fake_quotas,
headroom=fake_headroom, usages=fake_usages)
check_deltas_mock.side_effect = [None, exc]
inst_type = flavors.get_default_flavor()
# Try to create 3 instances.
self.assertRaises(exception.QuotaError, self.compute_api.create,
self.context, inst_type, self.fake_image['id'], min_count=3)
project_id = self.context.project_id
self.assertEqual(2, check_deltas_mock.call_count)
call1 = mock.call(self.context,
{'instances': 3, 'cores': inst_type.vcpus * 3,
'ram': inst_type.memory_mb * 3},
project_id, user_id=None,
check_project_id=project_id, check_user_id=None)
call2 = mock.call(self.context, {'instances': 0, 'cores': 0, 'ram': 0},
project_id, user_id=None,
check_project_id=project_id, check_user_id=None)
check_deltas_mock.assert_has_calls([call1, call2])
# Verify we removed the artifacts that were added after the first
# quota check passed.
instances = objects.InstanceList.get_all(self.context)
self.assertEqual(0, len(instances))
build_requests = objects.BuildRequestList.get_all(self.context)
self.assertEqual(0, len(build_requests))
@db_api.api_context_manager.reader
def request_spec_get_all(context):
return context.session.query(api_models.RequestSpec).all()
request_specs = request_spec_get_all(self.context)
self.assertEqual(0, len(request_specs))
instance_mappings = objects.InstanceMappingList.get_by_project_id(
self.context, project_id)
self.assertEqual(0, len(instance_mappings))
@mock.patch('nova.objects.quotas.Quotas.check_deltas')
def test_create_instance_no_quota_recheck(
self, check_deltas_mock):
self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
self.fake_show)
# Disable recheck_quota.
self.flags(recheck_quota=False, group='quota')
inst_type = flavors.get_default_flavor()
(refs, resv_id) = self.compute_api.create(self.context,
inst_type,
self.fake_image['id'])
self.assertEqual(1, len(refs))
project_id = self.context.project_id
# check_deltas should have been called only once.
check_deltas_mock.assert_called_once_with(self.context,
{'instances': 1,
'cores': inst_type.vcpus,
'ram': inst_type.memory_mb},
project_id, user_id=None,
check_project_id=project_id,
check_user_id=None)
@mock.patch.object(compute_api.API, '_local_delete') @mock.patch.object(compute_api.API, '_local_delete')
@mock.patch.object(compute_api.API, '_lookup_instance', @mock.patch.object(compute_api.API, '_lookup_instance',
return_value=(None, None)) return_value=(None, None))

View File

@ -154,7 +154,6 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
specd_compute._delete_instance(specd_compute, specd_compute._delete_instance(specd_compute,
self.context, self.context,
mock_inst, mock_inst,
mock.Mock(),
mock.Mock()) mock.Mock())
methods_called = [n for n, a, k in call_tracker.mock_calls] methods_called = [n for n, a, k in call_tracker.mock_calls]
@ -277,7 +276,6 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
vm_state=vm_states.ERROR, vm_state=vm_states.ERROR,
host=self.compute.host, host=self.compute.host,
expected_attrs=['system_metadata']) expected_attrs=['system_metadata'])
quotas = mock.create_autospec(objects.Quotas, spec_set=True)
with test.nested( with test.nested(
mock.patch.object(self.compute, '_notify_about_instance_usage'), mock.patch.object(self.compute, '_notify_about_instance_usage'),
@ -290,7 +288,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
instance_obj_load_attr, instance_save, instance_destroy instance_obj_load_attr, instance_save, instance_destroy
): ):
instance.info_cache = None instance.info_cache = None
self.compute._delete_instance(self.context, instance, [], quotas) self.compute._delete_instance(self.context, instance, [])
mock_notify.assert_has_calls([ mock_notify.assert_has_calls([
mock.call(self.context, instance, 'fake-mini', mock.call(self.context, instance, 'fake-mini',
@ -779,11 +777,9 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(objects.Instance, 'destroy') @mock.patch.object(objects.Instance, 'destroy')
@mock.patch.object(objects.Instance, 'obj_load_attr') @mock.patch.object(objects.Instance, 'obj_load_attr')
@mock.patch.object(objects.quotas.Quotas, 'commit')
@mock.patch.object(objects.quotas.Quotas, 'reserve')
@mock.patch.object(objects.quotas, 'ids_from_instance') @mock.patch.object(objects.quotas, 'ids_from_instance')
def test_init_instance_complete_partial_deletion( def test_init_instance_complete_partial_deletion(
self, mock_ids_from_instance, mock_reserve, mock_commit, self, mock_ids_from_instance,
mock_inst_destroy, mock_obj_load_attr, mock_get_by_instance_uuid, mock_inst_destroy, mock_obj_load_attr, mock_get_by_instance_uuid,
mock_bdm_destroy): mock_bdm_destroy):
"""Test to complete deletion for instances in DELETED status but not """Test to complete deletion for instances in DELETED status but not
@ -810,10 +806,6 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
self.assertEqual(vm_states.DELETED, instance.vm_state) self.assertEqual(vm_states.DELETED, instance.vm_state)
self.assertFalse(instance.deleted) self.assertFalse(instance.deleted)
deltas = {'instances': -1,
'cores': -instance.flavor.vcpus,
'ram': -instance.flavor.memory_mb}
def fake_inst_destroy(): def fake_inst_destroy():
instance.deleted = True instance.deleted = True
instance.deleted_at = timeutils.utcnow() instance.deleted_at = timeutils.utcnow()
@ -826,12 +818,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
# Make sure that instance.destroy method was called and # Make sure that instance.destroy method was called and
# instance was deleted from db. # instance was deleted from db.
self.assertTrue(mock_reserve.called)
self.assertTrue(mock_commit.called)
self.assertNotEqual(0, instance.deleted) self.assertNotEqual(0, instance.deleted)
mock_reserve.assert_called_once_with(project_id=instance.project_id,
user_id=instance.user_id,
**deltas)
@mock.patch('nova.compute.manager.LOG') @mock.patch('nova.compute.manager.LOG')
def test_init_instance_complete_partial_deletion_raises_exception( def test_init_instance_complete_partial_deletion_raises_exception(
@ -872,24 +859,18 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
task_state=task_states.DELETING) task_state=task_states.DELETING)
bdms = [] bdms = []
quotas = objects.quotas.Quotas(self.context)
with test.nested( with test.nested(
mock.patch.object(objects.BlockDeviceMappingList, mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid', 'get_by_instance_uuid',
return_value=bdms), return_value=bdms),
mock.patch.object(self.compute, '_delete_instance'), mock.patch.object(self.compute, '_delete_instance'),
mock.patch.object(instance, 'obj_load_attr'), mock.patch.object(instance, 'obj_load_attr')
mock.patch.object(self.compute, '_create_reservations', ) as (mock_get, mock_delete, mock_load):
return_value=quotas)
) as (mock_get, mock_delete, mock_load, mock_create):
self.compute._init_instance(self.context, instance) self.compute._init_instance(self.context, instance)
mock_get.assert_called_once_with(self.context, instance.uuid) mock_get.assert_called_once_with(self.context, instance.uuid)
mock_create.assert_called_once_with(self.context, instance,
instance.project_id,
instance.user_id)
mock_delete.assert_called_once_with(self.context, instance, mock_delete.assert_called_once_with(self.context, instance,
bdms, mock.ANY) bdms)
@mock.patch.object(objects.Instance, 'get_by_uuid') @mock.patch.object(objects.Instance, 'get_by_uuid')
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@ -910,7 +891,6 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
expected_attrs=['metadata', 'system_metadata']) expected_attrs=['metadata', 'system_metadata'])
bdms = [] bdms = []
reservations = ['fake-resv']
def _create_patch(name, attr): def _create_patch(name, attr):
patcher = mock.patch.object(name, attr) patcher = mock.patch.object(name, attr)
@ -921,10 +901,6 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
mock_delete_instance = _create_patch(self.compute, '_delete_instance') mock_delete_instance = _create_patch(self.compute, '_delete_instance')
mock_set_instance_error_state = _create_patch( mock_set_instance_error_state = _create_patch(
self.compute, '_set_instance_obj_error_state') self.compute, '_set_instance_obj_error_state')
mock_create_reservations = _create_patch(self.compute,
'_create_reservations')
mock_create_reservations.return_value = reservations
mock_get_by_instance_uuid.return_value = bdms mock_get_by_instance_uuid.return_value = bdms
mock_get_by_uuid.return_value = instance mock_get_by_uuid.return_value = instance
mock_delete_instance.side_effect = test.TestingException('test') mock_delete_instance.side_effect = test.TestingException('test')
@ -1173,28 +1149,18 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
host=self.compute.host, host=self.compute.host,
task_state=task_states.DELETING) task_state=task_states.DELETING)
bdms = [] bdms = []
quotas = objects.quotas.Quotas(self.context)
with test.nested( with test.nested(
mock.patch.object(objects.BlockDeviceMappingList, mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid', 'get_by_instance_uuid',
return_value=bdms), return_value=bdms),
mock.patch.object(self.compute, '_delete_instance'), mock.patch.object(self.compute, '_delete_instance'),
mock.patch.object(instance, 'obj_load_attr'), mock.patch.object(instance, 'obj_load_attr')
mock.patch.object(self.compute, '_create_reservations', ) as (mock_get, mock_delete, mock_load):
return_value=quotas),
mock.patch.object(objects.quotas, 'ids_from_instance',
return_value=(instance.project_id,
instance.user_id))
) as (mock_get, mock_delete, mock_load, mock_create, mock_ids):
self.compute._init_instance(self.context, instance) self.compute._init_instance(self.context, instance)
mock_get.assert_called_once_with(self.context, instance.uuid) mock_get.assert_called_once_with(self.context, instance.uuid)
mock_create.assert_called_once_with(self.context, instance,
instance.project_id,
instance.user_id)
mock_delete.assert_called_once_with(self.context, instance, mock_delete.assert_called_once_with(self.context, instance,
bdms, mock.ANY) bdms)
mock_ids.assert_called_once_with(self.context, instance)
def test_init_instance_resize_prep(self): def test_init_instance_resize_prep(self):
instance = fake_instance.fake_instance_obj( instance = fake_instance.fake_instance_obj(
@ -3425,16 +3391,13 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
def test_error_out_instance_on_exception_unknown_with_quotas(self, def test_error_out_instance_on_exception_unknown_with_quotas(self,
set_error): set_error):
instance = fake_instance.fake_instance_obj(self.context) instance = fake_instance.fake_instance_obj(self.context)
quotas = mock.create_autospec(objects.Quotas, spec_set=True)
def do_test(): def do_test():
with self.compute._error_out_instance_on_exception( with self.compute._error_out_instance_on_exception(
self.context, instance, quotas): self.context, instance):
raise test.TestingException('test') raise test.TestingException('test')
self.assertRaises(test.TestingException, do_test) self.assertRaises(test.TestingException, do_test)
self.assertEqual(1, len(quotas.method_calls))
self.assertEqual(mock.call.rollback(), quotas.method_calls[0])
set_error.assert_called_once_with(self.context, instance) set_error.assert_called_once_with(self.context, instance)
def test_cleanup_volumes(self): def test_cleanup_volumes(self):
@ -3871,7 +3834,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
mock_bdm_get_by_inst.assert_called_once_with( mock_bdm_get_by_inst.assert_called_once_with(
self.context, instance.uuid) self.context, instance.uuid)
mock_delete_instance.assert_called_once_with( mock_delete_instance.assert_called_once_with(
self.context, instance, bdms, mock.ANY) self.context, instance, bdms)
@mock.patch.object(nova.compute.manager.ComputeManager, @mock.patch.object(nova.compute.manager.ComputeManager,
'_notify_about_instance_usage') '_notify_about_instance_usage')
@ -5501,7 +5464,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
mock.ANY, mock.ANY, not is_shared) mock.ANY, mock.ANY, not is_shared)
mock_finish_revert.assert_called_once_with( mock_finish_revert.assert_called_once_with(
self.context, self.instance, self.migration, self.context, self.instance, self.migration,
self.migration.source_compute, mock.ANY) self.migration.source_compute)
do_test() do_test()

View File

@ -928,9 +928,9 @@ class ComputeUtilsTestCase(test.NoDBTestCase):
mock_notify_usage.assert_has_calls(expected_notify_calls) mock_notify_usage.assert_has_calls(expected_notify_calls)
class ComputeUtilsQuotaDeltaTestCase(test.TestCase): class ComputeUtilsQuotaTestCase(test.TestCase):
def setUp(self): def setUp(self):
super(ComputeUtilsQuotaDeltaTestCase, self).setUp() super(ComputeUtilsQuotaTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake') self.context = context.RequestContext('fake', 'fake')
def test_upsize_quota_delta(self): def test_upsize_quota_delta(self):
@ -995,6 +995,35 @@ class ComputeUtilsQuotaDeltaTestCase(test.TestCase):
mock_reserve.assert_called_once_with(project_id=inst.project_id, mock_reserve.assert_called_once_with(project_id=inst.project_id,
user_id=inst.user_id, **deltas) user_id=inst.user_id, **deltas)
@mock.patch('nova.objects.Quotas.count_as_dict')
def test_check_instance_quota_exceeds_with_multiple_resources(self,
mock_count):
quotas = {'cores': 1, 'instances': 1, 'ram': 512}
overs = ['cores', 'instances', 'ram']
over_quota_args = dict(quotas=quotas,
usages={'instances': 1, 'cores': 1, 'ram': 512},
overs=overs)
e = exception.OverQuota(**over_quota_args)
fake_flavor = objects.Flavor(vcpus=1, memory_mb=512)
instance_num = 1
proj_count = {'instances': 1, 'cores': 1, 'ram': 512}
user_count = proj_count.copy()
mock_count.return_value = {'project': proj_count, 'user': user_count}
with mock.patch.object(objects.Quotas, 'limit_check_project_and_user',
side_effect=e):
try:
compute_utils.check_num_instances_quota(self.context,
fake_flavor,
instance_num,
instance_num)
except exception.TooManyInstances as e:
self.assertEqual('cores, instances, ram', e.kwargs['overs'])
self.assertEqual('1, 1, 512', e.kwargs['req'])
self.assertEqual('1, 1, 512', e.kwargs['used'])
self.assertEqual('1, 1, 512', e.kwargs['allowed'])
else:
self.fail("Exception not raised")
class IsVolumeBackedInstanceTestCase(test.TestCase): class IsVolumeBackedInstanceTestCase(test.TestCase):
def setUp(self): def setUp(self):

View File

@ -58,17 +58,13 @@ class MigrationTaskTestCase(test.NoDBTestCase):
@mock.patch.object(scheduler_utils, 'setup_instance_group') @mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations') @mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations')
@mock.patch.object(compute_rpcapi.ComputeAPI, 'prep_resize') @mock.patch.object(compute_rpcapi.ComputeAPI, 'prep_resize')
@mock.patch.object(objects.Quotas, 'from_reservations') def test_execute(self, prep_resize_mock, sel_dest_mock, sig_mock, az_mock):
def test_execute(self, quotas_mock, prep_resize_mock,
sel_dest_mock, sig_mock, az_mock):
sel_dest_mock.return_value = self.hosts sel_dest_mock.return_value = self.hosts
az_mock.return_value = 'myaz' az_mock.return_value = 'myaz'
task = self._generate_task() task = self._generate_task()
legacy_request_spec = self.request_spec.to_legacy_request_spec_dict() legacy_request_spec = self.request_spec.to_legacy_request_spec_dict()
task.execute() task.execute()
quotas_mock.assert_called_once_with(self.context, self.reservations,
instance=self.instance)
sig_mock.assert_called_once_with(self.context, self.request_spec) sig_mock.assert_called_once_with(self.context, self.request_spec)
task.scheduler_client.select_destinations.assert_called_once_with( task.scheduler_client.select_destinations.assert_called_once_with(
self.context, self.request_spec, [self.instance.uuid]) self.context, self.request_spec, [self.instance.uuid])
@ -78,11 +74,4 @@ class MigrationTaskTestCase(test.NoDBTestCase):
request_spec=legacy_request_spec, request_spec=legacy_request_spec,
filter_properties=self.filter_properties, filter_properties=self.filter_properties,
node=self.hosts[0]['nodename'], clean_shutdown=self.clean_shutdown) node=self.hosts[0]['nodename'], clean_shutdown=self.clean_shutdown)
self.assertFalse(quotas_mock.return_value.rollback.called)
az_mock.assert_called_once_with(self.context, 'host1') az_mock.assert_called_once_with(self.context, 'host1')
def test_rollback(self):
task = self._generate_task()
task.quotas = mock.MagicMock()
task.rollback()
task.quotas.rollback.assert_called_once_with()

View File

@ -38,6 +38,8 @@ from nova.conductor.tasks import migrate
from nova import conf from nova import conf
from nova import context from nova import context
from nova import db from nova import db
from nova.db.sqlalchemy import api as db_api
from nova.db.sqlalchemy import api_models
from nova import exception as exc from nova import exception as exc
from nova.image import api as image_api from nova.image import api as image_api
from nova import objects from nova import objects
@ -1337,9 +1339,14 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
self.ctxt, instance_uuid=build_request.instance.uuid, self.ctxt, instance_uuid=build_request.instance.uuid,
cell_mapping=None, project_id=self.ctxt.project_id) cell_mapping=None, project_id=self.ctxt.project_id)
im.create() im.create()
params['request_specs'] = [objects.RequestSpec( rs = fake_request_spec.fake_spec_obj(remove_id=True)
instance_uuid=build_request.instance_uuid, rs._context = self.ctxt
instance_group=None)] rs.instance_uuid = build_request.instance_uuid
rs.instance_group = None
rs.retry = None
rs.limits = None
rs.create()
params['request_specs'] = [rs]
params['image'] = {'fake_data': 'should_pass_silently'} params['image'] = {'fake_data': 'should_pass_silently'}
params['admin_password'] = 'admin_password', params['admin_password'] = 'admin_password',
params['injected_files'] = 'injected_files' params['injected_files'] = 'injected_files'
@ -1677,6 +1684,73 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
self.assertTrue(bury.called) self.assertTrue(bury.called)
self.assertFalse(build_and_run.called) self.assertFalse(build_and_run.called)
@mock.patch('nova.objects.quotas.Quotas.check_deltas')
@mock.patch('nova.scheduler.rpcapi.SchedulerAPI.select_destinations')
def test_schedule_and_build_over_quota_during_recheck(self, mock_select,
mock_check):
mock_select.return_value = [{'host': 'fake-host',
'nodename': 'fake-nodename',
'limits': None}]
# Simulate a race where the first check passes and the recheck fails.
# First check occurs in compute/api.
fake_quotas = {'instances': 5, 'cores': 10, 'ram': 4096}
fake_headroom = {'instances': 5, 'cores': 10, 'ram': 4096}
fake_usages = {'instances': 5, 'cores': 10, 'ram': 4096}
e = exc.OverQuota(overs=['instances'], quotas=fake_quotas,
headroom=fake_headroom, usages=fake_usages)
mock_check.side_effect = e
# This is needed to register the compute node in a cell.
self.start_service('compute', host='fake-host')
self.assertRaises(
exc.TooManyInstances,
self.conductor.schedule_and_build_instances, **self.params)
project_id = self.params['context'].project_id
mock_check.assert_called_once_with(
self.params['context'], {'instances': 0, 'cores': 0, 'ram': 0},
project_id, user_id=None, check_project_id=project_id,
check_user_id=None)
# Verify we set the instance to ERROR state and set the fault message.
instances = objects.InstanceList.get_all(self.ctxt)
self.assertEqual(1, len(instances))
instance = instances[0]
self.assertEqual(vm_states.ERROR, instance.vm_state)
self.assertIsNone(instance.task_state)
self.assertIn('Quota exceeded', instance.fault.message)
# Verify we removed the build objects.
build_requests = objects.BuildRequestList.get_all(self.ctxt)
self.assertEqual(0, len(build_requests))
@db_api.api_context_manager.reader
def request_spec_get_all(context):
return context.session.query(api_models.RequestSpec).all()
request_specs = request_spec_get_all(self.ctxt)
self.assertEqual(0, len(request_specs))
@mock.patch('nova.compute.rpcapi.ComputeAPI.build_and_run_instance')
@mock.patch('nova.objects.quotas.Quotas.check_deltas')
@mock.patch('nova.scheduler.rpcapi.SchedulerAPI.select_destinations')
def test_schedule_and_build_no_quota_recheck(self, mock_select,
mock_check, mock_build):
mock_select.return_value = [{'host': 'fake-host',
'nodename': 'fake-nodename',
'limits': None}]
# Disable recheck_quota.
self.flags(recheck_quota=False, group='quota')
# This is needed to register the compute node in a cell.
self.start_service('compute', host='fake-host')
self.conductor.schedule_and_build_instances(**self.params)
# check_deltas should not have been called a second time. The first
# check occurs in compute/api.
mock_check.assert_not_called()
self.assertTrue(mock_build.called)
@mock.patch('nova.objects.CellMapping.get_by_uuid') @mock.patch('nova.objects.CellMapping.get_by_uuid')
def test_bury_in_cell0_no_cell0(self, mock_cm_get): def test_bury_in_cell0_no_cell0(self, mock_cm_get):
mock_cm_get.side_effect = exc.CellMappingNotFound(uuid='0') mock_cm_get.side_effect = exc.CellMappingNotFound(uuid='0')
@ -1893,13 +1967,12 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
@mock.patch.object(objects.RequestSpec, 'from_components') @mock.patch.object(objects.RequestSpec, 'from_components')
@mock.patch.object(scheduler_utils, 'setup_instance_group') @mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(utils, 'get_image_from_system_metadata') @mock.patch.object(utils, 'get_image_from_system_metadata')
@mock.patch.object(objects.Quotas, 'from_reservations')
@mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations') @mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations')
@mock.patch.object(conductor_manager.ComputeTaskManager, @mock.patch.object(conductor_manager.ComputeTaskManager,
'_set_vm_state_and_notify') '_set_vm_state_and_notify')
@mock.patch.object(migrate.MigrationTask, 'rollback') @mock.patch.object(migrate.MigrationTask, 'rollback')
def test_cold_migrate_no_valid_host_back_in_active_state( def test_cold_migrate_no_valid_host_back_in_active_state(
self, rollback_mock, notify_mock, select_dest_mock, quotas_mock, self, rollback_mock, notify_mock, select_dest_mock,
metadata_mock, sig_mock, spec_fc_mock, im_mock): metadata_mock, sig_mock, spec_fc_mock, im_mock):
flavor = flavors.get_flavor_by_name('m1.tiny') flavor = flavors.get_flavor_by_name('m1.tiny')
inst_obj = objects.Instance( inst_obj = objects.Instance(
@ -1934,8 +2007,6 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
flavor, {}, [resvs], flavor, {}, [resvs],
True, None) True, None)
metadata_mock.assert_called_with({}) metadata_mock.assert_called_with({})
quotas_mock.assert_called_once_with(self.context, [resvs],
instance=inst_obj)
sig_mock.assert_called_once_with(self.context, fake_spec) sig_mock.assert_called_once_with(self.context, fake_spec)
notify_mock.assert_called_once_with(self.context, inst_obj.uuid, notify_mock.assert_called_once_with(self.context, inst_obj.uuid,
'migrate_server', updates, 'migrate_server', updates,
@ -1946,13 +2017,12 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
@mock.patch.object(scheduler_utils, 'setup_instance_group') @mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(objects.RequestSpec, 'from_components') @mock.patch.object(objects.RequestSpec, 'from_components')
@mock.patch.object(utils, 'get_image_from_system_metadata') @mock.patch.object(utils, 'get_image_from_system_metadata')
@mock.patch.object(objects.Quotas, 'from_reservations')
@mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations') @mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations')
@mock.patch.object(conductor_manager.ComputeTaskManager, @mock.patch.object(conductor_manager.ComputeTaskManager,
'_set_vm_state_and_notify') '_set_vm_state_and_notify')
@mock.patch.object(migrate.MigrationTask, 'rollback') @mock.patch.object(migrate.MigrationTask, 'rollback')
def test_cold_migrate_no_valid_host_back_in_stopped_state( def test_cold_migrate_no_valid_host_back_in_stopped_state(
self, rollback_mock, notify_mock, select_dest_mock, quotas_mock, self, rollback_mock, notify_mock, select_dest_mock,
metadata_mock, spec_fc_mock, sig_mock, im_mock): metadata_mock, spec_fc_mock, sig_mock, im_mock):
flavor = flavors.get_flavor_by_name('m1.tiny') flavor = flavors.get_flavor_by_name('m1.tiny')
inst_obj = objects.Instance( inst_obj = objects.Instance(
@ -1988,8 +2058,6 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
flavor, {}, [resvs], flavor, {}, [resvs],
True, None) True, None)
metadata_mock.assert_called_with({}) metadata_mock.assert_called_with({})
quotas_mock.assert_called_once_with(self.context, [resvs],
instance=inst_obj)
sig_mock.assert_called_once_with(self.context, fake_spec) sig_mock.assert_called_once_with(self.context, fake_spec)
notify_mock.assert_called_once_with(self.context, inst_obj.uuid, notify_mock.assert_called_once_with(self.context, inst_obj.uuid,
'migrate_server', updates, 'migrate_server', updates,
@ -2072,7 +2140,6 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
@mock.patch.object(scheduler_utils, 'setup_instance_group') @mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(objects.RequestSpec, 'from_components') @mock.patch.object(objects.RequestSpec, 'from_components')
@mock.patch.object(utils, 'get_image_from_system_metadata') @mock.patch.object(utils, 'get_image_from_system_metadata')
@mock.patch.object(objects.Quotas, 'from_reservations')
@mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations') @mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations')
@mock.patch.object(conductor_manager.ComputeTaskManager, @mock.patch.object(conductor_manager.ComputeTaskManager,
'_set_vm_state_and_notify') '_set_vm_state_and_notify')
@ -2080,7 +2147,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
@mock.patch.object(compute_rpcapi.ComputeAPI, 'prep_resize') @mock.patch.object(compute_rpcapi.ComputeAPI, 'prep_resize')
def test_cold_migrate_exception_host_in_error_state_and_raise( def test_cold_migrate_exception_host_in_error_state_and_raise(
self, prep_resize_mock, rollback_mock, notify_mock, self, prep_resize_mock, rollback_mock, notify_mock,
select_dest_mock, quotas_mock, metadata_mock, spec_fc_mock, select_dest_mock, metadata_mock, spec_fc_mock,
sig_mock, im_mock): sig_mock, im_mock):
flavor = flavors.get_flavor_by_name('m1.tiny') flavor = flavors.get_flavor_by_name('m1.tiny')
inst_obj = objects.Instance( inst_obj = objects.Instance(
@ -2123,8 +2190,6 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
'limits': {}} 'limits': {}}
metadata_mock.assert_called_with({}) metadata_mock.assert_called_with({})
quotas_mock.assert_called_once_with(self.context, [resvs],
instance=inst_obj)
sig_mock.assert_called_once_with(self.context, fake_spec) sig_mock.assert_called_once_with(self.context, fake_spec)
select_dest_mock.assert_called_once_with( select_dest_mock.assert_called_once_with(
self.context, fake_spec, [inst_obj.uuid]) self.context, fake_spec, [inst_obj.uuid])

View File

@ -124,6 +124,12 @@ def fake_instance_obj(context, obj_instance_class=None, **updates):
inst.keypairs = objects.KeyPairList(objects=[]) inst.keypairs = objects.KeyPairList(objects=[])
if flavor: if flavor:
inst.flavor = flavor inst.flavor = flavor
# This is needed for instance quota counting until we have the
# ability to count allocations in placement.
if 'vcpus' in flavor and 'vcpus' not in updates:
inst.vcpus = flavor.vcpus
if 'memory_mb' in flavor and 'memory_mb' not in updates:
inst.memory_mb = flavor.memory_mb
inst.old_flavor = None inst.old_flavor = None
inst.new_flavor = None inst.new_flavor = None
inst.obj_reset_changes() inst.obj_reset_changes()

View File

@ -1112,7 +1112,7 @@ object_data = {
'InstanceGroup': '1.10-1a0c8c7447dc7ecb9da53849430c4a5f', 'InstanceGroup': '1.10-1a0c8c7447dc7ecb9da53849430c4a5f',
'InstanceGroupList': '1.8-90f8f1a445552bb3bbc9fa1ae7da27d4', 'InstanceGroupList': '1.8-90f8f1a445552bb3bbc9fa1ae7da27d4',
'InstanceInfoCache': '1.5-cd8b96fefe0fc8d4d337243ba0bf0e1e', 'InstanceInfoCache': '1.5-cd8b96fefe0fc8d4d337243ba0bf0e1e',
'InstanceList': '2.3-7a3c541e6e7b5a75afe7afe125f9712d', 'InstanceList': '2.4-d2c5723da8c1d08e07cb00160edfd292',
'InstanceMapping': '1.0-65de80c491f54d19374703c0753c4d47', 'InstanceMapping': '1.0-65de80c491f54d19374703c0753c4d47',
'InstanceMappingList': '1.2-ee638619aa3d8a82a59c0c83bfa64d78', 'InstanceMappingList': '1.2-ee638619aa3d8a82a59c0c83bfa64d78',
'InstanceNUMACell': '1.4-7c1eb9a198dee076b4de0840e45f4f55', 'InstanceNUMACell': '1.4-7c1eb9a198dee076b4de0840e45f4f55',

View File

@ -363,12 +363,6 @@ class ProjectCommandsTestCase(test.TestCase):
self.assertIsNone(self.commands.quota_usage_refresh( self.assertIsNone(self.commands.quota_usage_refresh(
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')) 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'))
def test_quota_usage_refresh_with_keys(self):
self.assertIsNone(self.commands.quota_usage_refresh(
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaab',
'ram'))
def test_quota_usage_refresh_invalid_user_key(self): def test_quota_usage_refresh_invalid_user_key(self):
self.assertEqual(2, self.commands.quota_usage_refresh( self.assertEqual(2, self.commands.quota_usage_refresh(
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',

File diff suppressed because it is too large Load Diff