Merge "Use instance mappings to count server group members"
This commit is contained in:
@@ -432,3 +432,28 @@ class InstanceMappingList(base.ObjectListBase, base.NovaObject):
|
||||
'user': {'instances': <count across user>}}
|
||||
"""
|
||||
return cls._get_counts_in_db(context, project_id, user_id=user_id)
|
||||
|
||||
@staticmethod
|
||||
@db_api.api_context_manager.reader
|
||||
def _get_count_by_uuids_and_user_in_db(context, uuids, user_id):
|
||||
query = (context.session.query(
|
||||
func.count(api_models.InstanceMapping.id))
|
||||
.filter(api_models.InstanceMapping.instance_uuid.in_(uuids))
|
||||
.filter_by(queued_for_delete=False)
|
||||
.filter_by(user_id=user_id))
|
||||
return query.scalar()
|
||||
|
||||
@classmethod
|
||||
def get_count_by_uuids_and_user(cls, context, uuids, user_id):
|
||||
"""Get the count of InstanceMapping objects by UUIDs and user_id.
|
||||
|
||||
The count is used to represent the count of server group members
|
||||
belonging to a particular user, for the purpose of counting quota
|
||||
usage. Instances that are queued_for_deleted=True are not included in
|
||||
the count (deleted and SOFT_DELETED instances).
|
||||
|
||||
:param uuids: List of instance UUIDs on which to filter
|
||||
:param user_id: The user_id on which to filter
|
||||
:returns: An integer for the count
|
||||
"""
|
||||
return cls._get_count_by_uuids_and_user_in_db(context, uuids, user_id)
|
||||
|
||||
@@ -43,6 +43,10 @@ PLACEMENT_CLIENT = None
|
||||
# If user_id and queued_for_delete are populated for a project, cache the
|
||||
# result to avoid doing unnecessary EXISTS database queries.
|
||||
UID_QFD_POPULATED_CACHE_BY_PROJECT = set()
|
||||
# For the server group members check, we do not scope to a project, so if all
|
||||
# user_id and queued_for_delete are populated for all projects, cache the
|
||||
# result to avoid doing unnecessary EXISTS database queries.
|
||||
UID_QFD_POPULATED_CACHE_ALL = False
|
||||
|
||||
|
||||
class DbQuotaDriver(object):
|
||||
@@ -1126,15 +1130,13 @@ def _security_group_count(context, project_id, user_id=None):
|
||||
user_id=user_id)
|
||||
|
||||
|
||||
def _server_group_count_members_by_user(context, group, user_id):
|
||||
def _server_group_count_members_by_user_legacy(context, group, user_id):
|
||||
# NOTE(melwitt): This is mostly duplicated from
|
||||
# InstanceGroup.count_members_by_user() to query across multiple cells.
|
||||
# We need to be able to pass the correct cell context to
|
||||
# InstanceList.get_by_filters().
|
||||
# TODO(melwitt): Counting across cells for instances means we will miss
|
||||
# counting resources if a cell is down. In the future, we should query
|
||||
# placement for cores/ram and InstanceMappings for instances (once we are
|
||||
# deleting InstanceMappings when we delete instances).
|
||||
# NOTE(melwitt): Counting across cells for instances means we will miss
|
||||
# counting resources if a cell is down.
|
||||
cell_mappings = objects.CellMappingList.get_all(context)
|
||||
greenthreads = []
|
||||
filters = {'deleted': False, 'user_id': user_id, 'uuid': group.members}
|
||||
@@ -1163,6 +1165,41 @@ def _server_group_count_members_by_user(context, group, user_id):
|
||||
return {'user': {'server_group_members': count}}
|
||||
|
||||
|
||||
def _server_group_count_members_by_user(context, group, user_id):
|
||||
"""Get the count of server group members for a group by user.
|
||||
|
||||
:param context: The request context for database access
|
||||
:param group: The InstanceGroup object with members to count
|
||||
:param user_id: The user_id to count across
|
||||
:returns: A dict containing the user-scoped count. For example:
|
||||
|
||||
{'user': 'server_group_members': <count across user>}}
|
||||
"""
|
||||
# Because server group members quota counting is not scoped to a project,
|
||||
# but scoped to a particular InstanceGroup and user, we cannot filter our
|
||||
# user_id/queued_for_delete populated check on project_id or user_id.
|
||||
# So, we check whether user_id/queued_for_delete is populated for all
|
||||
# records and cache the result to prevent unnecessary checking once the
|
||||
# data migration has been completed.
|
||||
global UID_QFD_POPULATED_CACHE_ALL
|
||||
if not UID_QFD_POPULATED_CACHE_ALL:
|
||||
LOG.debug('Checking whether user_id and queued_for_delete are '
|
||||
'populated for all projects')
|
||||
uid_qfd_populated = _user_id_queued_for_delete_populated(context)
|
||||
if uid_qfd_populated:
|
||||
UID_QFD_POPULATED_CACHE_ALL = True
|
||||
|
||||
if UID_QFD_POPULATED_CACHE_ALL:
|
||||
count = objects.InstanceMappingList.get_count_by_uuids_and_user(
|
||||
context, group.members, user_id)
|
||||
return {'user': {'server_group_members': count}}
|
||||
|
||||
LOG.warning('Falling back to legacy quota counting method for server '
|
||||
'group members')
|
||||
return _server_group_count_members_by_user_legacy(context, group,
|
||||
user_id)
|
||||
|
||||
|
||||
def _fixed_ip_count(context, project_id):
|
||||
# NOTE(melwitt): This assumes a single cell.
|
||||
count = objects.FixedIPList.get_count_by_project(context, project_id)
|
||||
@@ -1261,7 +1298,8 @@ def _instances_cores_ram_count(context, project_id, user_id=None):
|
||||
if CONF.quota.count_usage_from_placement:
|
||||
# If a project has all user_id and queued_for_delete data populated,
|
||||
# cache the result to avoid needless database checking in the future.
|
||||
if project_id not in UID_QFD_POPULATED_CACHE_BY_PROJECT:
|
||||
if (not UID_QFD_POPULATED_CACHE_ALL and
|
||||
project_id not in UID_QFD_POPULATED_CACHE_BY_PROJECT):
|
||||
LOG.debug('Checking whether user_id and queued_for_delete are '
|
||||
'populated for project_id %s', project_id)
|
||||
uid_qfd_populated = _user_id_queued_for_delete_populated(
|
||||
|
||||
@@ -319,6 +319,7 @@ class TestCase(testtools.TestCase):
|
||||
|
||||
# NOTE(melwitt): Reset the cached set of projects
|
||||
quota.UID_QFD_POPULATED_CACHE_BY_PROJECT = set()
|
||||
quota.UID_QFD_POPULATED_CACHE_ALL = False
|
||||
|
||||
def _setup_cells(self):
|
||||
"""Setup a normal cellsv2 environment.
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ddt
|
||||
import mock
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from nova import context
|
||||
@@ -20,6 +22,7 @@ from nova.tests import fixtures as nova_fixtures
|
||||
from nova.tests.functional.db import test_instance_mapping
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class QuotaTestCase(test.NoDBTestCase):
|
||||
USES_DB_SELF = True
|
||||
|
||||
@@ -32,7 +35,13 @@ class QuotaTestCase(test.NoDBTestCase):
|
||||
fix.add_cell_database('cell2')
|
||||
self.useFixture(fix)
|
||||
|
||||
def test_server_group_members_count_by_user(self):
|
||||
@ddt.data(True, False)
|
||||
@mock.patch('nova.quota.LOG.warning')
|
||||
@mock.patch('nova.quota._user_id_queued_for_delete_populated')
|
||||
def test_server_group_members_count_by_user(self, uid_qfd_populated,
|
||||
mock_uid_qfd_populated,
|
||||
mock_warn_log):
|
||||
mock_uid_qfd_populated.return_value = uid_qfd_populated
|
||||
ctxt = context.RequestContext('fake-user', 'fake-project')
|
||||
mapping1 = objects.CellMapping(context=ctxt,
|
||||
uuid=uuidutils.generate_uuid(),
|
||||
@@ -59,6 +68,12 @@ class QuotaTestCase(test.NoDBTestCase):
|
||||
user_id='fake-user')
|
||||
instance.create()
|
||||
instance_uuids.append(instance.uuid)
|
||||
im = objects.InstanceMapping(context=ctxt,
|
||||
instance_uuid=instance.uuid,
|
||||
project_id='fake-project',
|
||||
user_id='fake-user',
|
||||
cell_id=mapping1.id)
|
||||
im.create()
|
||||
|
||||
# Create an instance in cell2
|
||||
with context.target_cell(ctxt, mapping2) as cctxt:
|
||||
@@ -67,18 +82,50 @@ class QuotaTestCase(test.NoDBTestCase):
|
||||
user_id='fake-user')
|
||||
instance.create()
|
||||
instance_uuids.append(instance.uuid)
|
||||
im = objects.InstanceMapping(context=ctxt,
|
||||
instance_uuid=instance.uuid,
|
||||
project_id='fake-project',
|
||||
user_id='fake-user',
|
||||
cell_id=mapping2.id)
|
||||
im.create()
|
||||
|
||||
# Create an instance that is queued for delete in cell2. It should not
|
||||
# be counted
|
||||
with context.target_cell(ctxt, mapping2) as cctxt:
|
||||
instance = objects.Instance(context=cctxt,
|
||||
project_id='fake-project',
|
||||
user_id='fake-user')
|
||||
instance.create()
|
||||
instance.destroy()
|
||||
instance_uuids.append(instance.uuid)
|
||||
im = objects.InstanceMapping(context=ctxt,
|
||||
instance_uuid=instance.uuid,
|
||||
project_id='fake-project',
|
||||
user_id='fake-user',
|
||||
cell_id=mapping2.id,
|
||||
queued_for_delete=True)
|
||||
im.create()
|
||||
|
||||
# Add the uuids to the group
|
||||
objects.InstanceGroup.add_members(ctxt, group.uuid, instance_uuids)
|
||||
# add_members() doesn't add the members to the object field
|
||||
group.members.extend(instance_uuids)
|
||||
|
||||
# Count server group members across cells
|
||||
# Count server group members from instance mappings or cell databases,
|
||||
# depending on whether the user_id/queued_for_delete data migration has
|
||||
# been completed.
|
||||
count = quota._server_group_count_members_by_user(ctxt, group,
|
||||
'fake-user')
|
||||
|
||||
self.assertEqual(2, count['user']['server_group_members'])
|
||||
|
||||
if uid_qfd_populated:
|
||||
# Did not log a warning about falling back to legacy count.
|
||||
mock_warn_log.assert_not_called()
|
||||
else:
|
||||
# Logged a warning about falling back to legacy count.
|
||||
mock_warn_log.assert_called_once()
|
||||
|
||||
def test_instances_cores_ram_count(self):
|
||||
ctxt = context.RequestContext('fake-user', 'fake-project')
|
||||
mapping1 = objects.CellMapping(context=ctxt,
|
||||
|
||||
@@ -2107,3 +2107,37 @@ class QuotaCountTestCase(test.NoDBTestCase):
|
||||
mock.sentinel.project_id,
|
||||
user_id=mock.sentinel.user_id)
|
||||
mock_uid_qfd_populated.assert_not_called()
|
||||
|
||||
@mock.patch('nova.quota._user_id_queued_for_delete_populated')
|
||||
@mock.patch('nova.quota._server_group_count_members_by_user_legacy')
|
||||
@mock.patch('nova.objects.InstanceMappingList.get_count_by_uuids_and_user')
|
||||
@mock.patch('nova.quota._instances_cores_ram_count_legacy')
|
||||
@mock.patch('nova.quota._instances_cores_ram_count_api_db_placement')
|
||||
def test_user_id_queued_for_delete_populated_cache_all(
|
||||
self, mock_api_db_placement_count, mock_legacy_icr_count,
|
||||
mock_api_db_sgm_count, mock_legacy_sgm_count,
|
||||
mock_uid_qfd_populated):
|
||||
# Check the case where the data migration was found to be complete by a
|
||||
# server group members count not scoped to a project.
|
||||
mock_uid_qfd_populated.return_value = True
|
||||
# Server group members call will check whether there are any unmigrated
|
||||
# records.
|
||||
fake_group = mock.Mock()
|
||||
quota._server_group_count_members_by_user(mock.sentinel.context,
|
||||
fake_group,
|
||||
mock.sentinel.user_id)
|
||||
mock_uid_qfd_populated.assert_called_once()
|
||||
# Second server group members call should skip the check for user_id
|
||||
# and queued_for_delete migrated because the result was cached.
|
||||
mock_uid_qfd_populated.reset_mock()
|
||||
quota._server_group_count_members_by_user(mock.sentinel.context,
|
||||
fake_group,
|
||||
mock.sentinel.user_id)
|
||||
mock_uid_qfd_populated.assert_not_called()
|
||||
# A call to count instances, cores, and ram should skip the check for
|
||||
# user_id and queued_for_delete migrated because the result was cached
|
||||
# during the call to count server group members.
|
||||
mock_uid_qfd_populated.reset_mock()
|
||||
quota._instances_cores_ram_count(mock.sentinel.context,
|
||||
mock.sentinel.project_id)
|
||||
mock_uid_qfd_populated.assert_not_called()
|
||||
|
||||
Reference in New Issue
Block a user