Periodic Quota Sync
Added method for: Periodic Quota Sync for all projects. On Demand Quota Sync for a project. Calculate nova resource usage for a project. Get Total resource usage for a project. Divide list of projects into batch and sync quota batch by batch Use filtered regions if endpoint filter is applied for a project To share usage details, Queue is used. Test Cases for all the above. The implementation is based on multi-threading i.e each project will be synced in a seprate thread. Get Total Usage from each region is also multi-threaded. Note: The current implementation is based on Nova resources only. Once this is fixed, will add neutron & cinder resources. Change-Id: Iaff1c7df2efc57d47882f23beb8e0082e5b427f6 Implements: https://blueprints.launchpad.net/kingbird/+spec/quota-management
This commit is contained in:
parent
c441f5c7f5
commit
9301c9a828
|
@ -6,6 +6,7 @@ namespace = kingbird.common.manager
|
|||
namespace = kingbird.common.baserpc
|
||||
namespace = kingbird.db.base
|
||||
namespace = kingbird.engine.engine_config
|
||||
namespace = kingbird.engine.quota_manager
|
||||
namespace = kingbird.engine.service
|
||||
namespace = kingbird.engine.listener
|
||||
namespace = kingbird.api.api_config
|
||||
|
@ -18,4 +19,4 @@ namespace = oslo.log
|
|||
namespace = oslo.policy
|
||||
namespace = oslo.service.service
|
||||
namespace = oslo.service.periodic_task
|
||||
namespace = oslo.service.sslutils
|
||||
namespace = oslo.service.sslutils
|
||||
|
|
|
@ -49,6 +49,7 @@ cinder_quotas = [
|
|||
cfg.IntOpt('quota_backup_gigabytes', default=1000)
|
||||
]
|
||||
|
||||
|
||||
# OpenStack credentials used for Endpoint Cache
|
||||
cache_opts = [
|
||||
cfg.StrOpt('auth_url',
|
||||
|
|
|
@ -13,6 +13,14 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from itertools import izip_longest
|
||||
|
||||
|
||||
def get_import_path(cls):
|
||||
return cls.__module__ + "." + cls.__name__
|
||||
|
||||
|
||||
# Returns a iterator of tuples containing batch_size number of objects in each
|
||||
def get_batch_projects(batch_size, project_list, fillvalue=None):
|
||||
args = [iter(project_list)] * batch_size
|
||||
return izip_longest(fillvalue=fillvalue, *args)
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
# under the License.
|
||||
#
|
||||
|
||||
from keystoneclient.v3.contrib import endpoint_filter
|
||||
from oslo_utils import importutils
|
||||
|
||||
from kingbird.common.endpoint_cache import EndpointCache
|
||||
|
@ -51,3 +52,17 @@ class KeystoneClient(base.DriverBase):
|
|||
return False
|
||||
except exceptions.InternalError:
|
||||
raise
|
||||
|
||||
# Returns list of regions if endpoint filter is applied for the project
|
||||
def get_filtered_region(self, project_id):
|
||||
try:
|
||||
region_list = []
|
||||
endpoint_manager = endpoint_filter.EndpointFilterManager(
|
||||
self.keystone_client)
|
||||
endpoint_lists = endpoint_manager.list_endpoints_for_project(
|
||||
project_id)
|
||||
for endpoint in endpoint_lists:
|
||||
region_list.append(endpoint.region)
|
||||
return region_list
|
||||
except exceptions.InternalError:
|
||||
raise
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
# Copyright 2016 Ericsson AB
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
|
@ -9,6 +11,7 @@
|
|||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import collections
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
|
@ -31,17 +34,65 @@ class NovaClient(base.DriverBase):
|
|||
region_name=region)
|
||||
self.enabled_quotas = list(set(consts.NOVA_QUOTA_FIELDS) -
|
||||
set(disabled_quotas))
|
||||
self.no_neutron = True if 'floatingips' in self.enabled_quotas \
|
||||
or 'fixedips' in self.enabled_quotas else False
|
||||
except exceptions.ServiceUnavailable:
|
||||
raise
|
||||
|
||||
def get_resource_usages(self, project_id):
|
||||
'''Calcualte resources usage and return the dict'''
|
||||
return {}
|
||||
"""Collects resource usages for a given project
|
||||
|
||||
def update_quota_limits(self, project_id, new_quota):
|
||||
'''Update the limits'''
|
||||
pass
|
||||
:params: project_id
|
||||
:return: dictionary of corresponding resources with its usage
|
||||
"""
|
||||
try:
|
||||
# The API call does not give usage for keypair, fixed ips &
|
||||
# metadata items. Have raised a bug for that.
|
||||
limits = self.nova_client.limits.get().to_dict()
|
||||
resource_usage = collections.defaultdict(dict)
|
||||
resource_usage['ram'] = limits['absolute']['totalRAMUsed']
|
||||
resource_usage['cores'] = limits['absolute']['totalCoresUsed']
|
||||
resource_usage['instances'] = \
|
||||
limits['absolute']['totalInstancesUsed']
|
||||
# If neutron is not enabled, calculate below resources from nova
|
||||
if self.no_neutron:
|
||||
resource_usage['security_groups'] = \
|
||||
limits['absolute']['totalSecurityGroupsUsed']
|
||||
resource_usage['floating_ips'] = \
|
||||
limits['absolute']['totalFloatingIpsUsed']
|
||||
# For time being, keypair is calculated in below manner.
|
||||
resource_usage['key_pairs'] = \
|
||||
len(self.nova_client.keypairs.list())
|
||||
return resource_usage
|
||||
except exceptions.InternalError:
|
||||
raise
|
||||
|
||||
def update_quota_limits(self, project_id, **new_quota):
|
||||
"""Updates quota limits for a given project
|
||||
|
||||
:params: project_id, dictionary with the quota limits to update
|
||||
:return: Nothing
|
||||
"""
|
||||
try:
|
||||
if not self.no_neutron:
|
||||
if 'floating_ips' in new_quota:
|
||||
del new_quota['floating_ips']
|
||||
if 'fixed_ips' in new_quota:
|
||||
del new_quota['fixed_ips']
|
||||
if 'security_groups' in new_quota:
|
||||
del new_quota['security_groups']
|
||||
return self.nova_client.quotas.update(project_id,
|
||||
**new_quota)
|
||||
except exceptions.InternalError:
|
||||
raise
|
||||
|
||||
def delete_quota_limits(self, project_id):
|
||||
'''Delete/Reset the limits'''
|
||||
pass
|
||||
"""Delete/Reset quota limits for a given project
|
||||
|
||||
:params: project_id
|
||||
:return: Nothing
|
||||
"""
|
||||
try:
|
||||
return self.nova_client.quotas.delete(project_id)
|
||||
except exceptions.InternalError:
|
||||
raise
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
# Copyright 2016 Ericsson AB
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
|
@ -18,6 +20,7 @@ import collections
|
|||
from oslo_log import log
|
||||
|
||||
from kingbird.common import consts
|
||||
from kingbird.common import endpoint_cache
|
||||
from kingbird.common import exceptions
|
||||
from kingbird.common.i18n import _
|
||||
from kingbird.common.i18n import _LE
|
||||
|
@ -36,7 +39,7 @@ class OpenStackDriver(object):
|
|||
|
||||
os_clients_dict = collections.defaultdict(dict)
|
||||
|
||||
def __init__(self, region_name):
|
||||
def __init__(self, region_name=None):
|
||||
# Check if objects are cached and try to use those
|
||||
self.region_name = region_name
|
||||
if 'keystone' in OpenStackDriver.os_clients_dict:
|
||||
|
@ -54,7 +57,7 @@ class OpenStackDriver(object):
|
|||
region_name]['neutron']
|
||||
else:
|
||||
# Create new objects and cache them
|
||||
LOG.debug(_("Creating fresh OS Clients objects"))
|
||||
LOG.info(_("Creating fresh OS Clients objects"))
|
||||
self.cinder_client = CinderClient(region_name,
|
||||
self.keystone_client.session)
|
||||
self.neutron_client = NeutronClient(region_name,
|
||||
|
@ -94,11 +97,12 @@ class OpenStackDriver(object):
|
|||
def write_quota_limits(self, project_id, limits_to_write):
|
||||
try:
|
||||
self.nova_client.update_quota_limits(project_id,
|
||||
limits_to_write['nova'])
|
||||
self.cinder_client.update_quota_limits(project_id,
|
||||
limits_to_write['cinder'])
|
||||
self.neutron_client.update_quota_limits(project_id,
|
||||
limits_to_write['neutron'])
|
||||
**limits_to_write['nova'])
|
||||
# TODO(Ashish): Include other clients after nova is fixed
|
||||
# self.cinder_client.update_quota_limits(project_id,
|
||||
# **limits_to_write['cinder'])
|
||||
# self.neutron_client.update_quota_limits(project_id,
|
||||
# **limits_to_write['neutron'])
|
||||
except (exceptions.ConnectionRefused, exceptions.NotAuthorized,
|
||||
exceptions.TimeOut):
|
||||
# Delete the cached objects for that region
|
||||
|
@ -138,6 +142,22 @@ class OpenStackDriver(object):
|
|||
if not self.neutron_client.is_extension_supported('quotas'):
|
||||
disabled_quotas.extend(consts.NEUTRON_QUOTA_FIELDS)
|
||||
except Exception:
|
||||
LOG.exception("There was an error checking if the Neutron "
|
||||
"quotas extension is enabled.")
|
||||
LOG.error("There was an error checking if the Neutron "
|
||||
"quotas extension is enabled.")
|
||||
return disabled_quotas
|
||||
|
||||
def get_all_regions_for_project(self, project_id):
|
||||
try:
|
||||
# Retrieve regions based on endpoint filter for the project.
|
||||
region_lists = self._get_filtered_regions(project_id)
|
||||
if not region_lists:
|
||||
# If endpoint filter is not used for the project, then
|
||||
# return all regions
|
||||
region_lists = endpoint_cache.EndpointCache().get_all_regions()
|
||||
return region_lists
|
||||
except Exception as exception:
|
||||
LOG.error(_LE('Error Occurred: %s'), exception.message)
|
||||
raise
|
||||
|
||||
def _get_filtered_regions(self, project_id):
|
||||
return self.keystone_client.get_filtered_region(project_id)
|
||||
|
|
|
@ -28,3 +28,23 @@ listener.py
|
|||
|
||||
engine_cfg.py:
|
||||
configuration and initialization for Engine service
|
||||
|
||||
quota_manager.py
|
||||
Manages all the quota related activies such as Periodic Quota Sync,
|
||||
One Demand Quota Sync, Get Total Usage for a Project, Read Kingbird
|
||||
global limit from DB/Conf file etc..
|
||||
|
||||
Quota sync happens based on below formula:
|
||||
Global_remaining_limit = Kingbird_global_limit(from DB/Conf) -
|
||||
Su(sum of all usages from all regions)
|
||||
Region_new_limit = Global_remaining_limit + resource_usage_in_that_region.
|
||||
|
||||
Reference link: https://etherpad.opnfv.org/p/centralized_quota_management
|
||||
|
||||
On Demand Quota Sync: Creates threads for each region and syncs
|
||||
the limits for each quota concurrently.
|
||||
|
||||
Periodic Quota Sync: Creates threads for each Project and calls
|
||||
quota sync for project(On Demand Quota sync) for syncing project.
|
||||
|
||||
Caches OpenStack region specific clients so reduced traffic.
|
||||
|
|
|
@ -18,7 +18,6 @@ from oslo_log import log as logging
|
|||
import oslo_messaging as messaging
|
||||
import time
|
||||
|
||||
from kingbird.common import context
|
||||
from kingbird.common.i18n import _
|
||||
from kingbird.common.i18n import _LI
|
||||
from kingbird.common import manager
|
||||
|
@ -34,7 +33,7 @@ scheduler_opts = [
|
|||
default=True,
|
||||
help='boolean value for enable/disenable periodic tasks'),
|
||||
cfg.IntOpt('periodic_interval',
|
||||
default=100,
|
||||
default=900,
|
||||
help='periodic time interval for automatic quota sync job')
|
||||
]
|
||||
|
||||
|
@ -83,22 +82,23 @@ class EngineManager(manager.Manager):
|
|||
pass
|
||||
|
||||
def periodic_balance_all(self):
|
||||
# Automated Quota Sync for all the keystone projects
|
||||
LOG.info(_LI("Periodic quota sync job started at: %s"),
|
||||
time.strftime("%c"))
|
||||
ctxt = context.get_admin_context()
|
||||
self.qm.periodic_balance_all(ctxt)
|
||||
self.qm.periodic_balance_all()
|
||||
|
||||
def quota_sync_for_project(self, ctx, project_id):
|
||||
LOG.info(_LI("Engine quota sync called for project: %s"), project_id)
|
||||
|
||||
pass
|
||||
# On Demand Quota Sync for a project, will be triggered by KB-API
|
||||
LOG.info(_LI("On Demand Quota Sync Called for: %s"),
|
||||
project_id)
|
||||
self.qm.quota_sync_for_project(project_id)
|
||||
|
||||
def get_total_usage_for_tenant(self, ctx, project_id):
|
||||
# Returns a dictionary containing nova, neutron &
|
||||
# cinder usages for the project
|
||||
LOG.info(_LI("Get total tenant usage called for: %s"), project_id)
|
||||
|
||||
pass
|
||||
LOG.info(_LI("Get total tenant usage called for: %s"),
|
||||
project_id)
|
||||
return self.qm.get_total_usage_for_tenant(project_id)
|
||||
|
||||
|
||||
def list_opts():
|
||||
|
|
|
@ -13,28 +13,218 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
import collections
|
||||
from Queue import Queue
|
||||
import re
|
||||
import threading
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from kingbird.common import consts
|
||||
from kingbird.common import context
|
||||
from kingbird.common import endpoint_cache
|
||||
from kingbird.common import exceptions
|
||||
from kingbird.common.i18n import _
|
||||
from kingbird.common.i18n import _LE
|
||||
from kingbird.common.i18n import _LI
|
||||
from kingbird.common import manager
|
||||
from kingbird.common import utils
|
||||
from kingbird.db import api as db_api
|
||||
from kingbird.drivers.openstack import sdk
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
# Projects are synced batch by batch. Below configuration defines
|
||||
# number of projects in each batch
|
||||
batch_opts = [
|
||||
cfg.IntOpt('batch_size',
|
||||
default=3,
|
||||
help='Batch size number of projects will be synced at a time')
|
||||
]
|
||||
|
||||
batch_opt_group = cfg.OptGroup('batch')
|
||||
cfg.CONF.register_group(batch_opt_group)
|
||||
cfg.CONF.register_opts(batch_opts, group=batch_opt_group)
|
||||
|
||||
|
||||
class QuotaManager(manager.Manager):
|
||||
"""Manages tasks related to quota management."""
|
||||
"""Manages tasks related to quota management"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
LOG.debug(_('QuotaManager initialization...'))
|
||||
|
||||
super(QuotaManager, self).__init__(service_name="quota_manager",
|
||||
*args, **kwargs)
|
||||
self.context = context.get_admin_context()
|
||||
self.endpoints = endpoint_cache.EndpointCache()
|
||||
|
||||
def periodic_balance_all(self, ctx):
|
||||
# TODO(Ashish): Implement Quota Syncing
|
||||
def periodic_balance_all(self):
|
||||
LOG.info(_LI("periodically balance quota for all keystone tenants"))
|
||||
pass
|
||||
projects_thread_list = []
|
||||
# Iterate through project list and call sync project for each project
|
||||
# using threads
|
||||
project_list = sdk.OpenStackDriver().get_enabled_projects()
|
||||
# Divide list of projects into batches and perfrom quota sync
|
||||
# for one batch at a time.
|
||||
for current_batch_projects in utils.get_batch_projects(
|
||||
cfg.CONF.batch.batch_size, project_list):
|
||||
LOG.info(_LI("Syncing quota for current batch with projects: %s"),
|
||||
current_batch_projects)
|
||||
for current_project in current_batch_projects:
|
||||
if current_project:
|
||||
thread = threading.Thread(
|
||||
target=self.quota_sync_for_project,
|
||||
args=(current_project,))
|
||||
projects_thread_list.append(thread)
|
||||
thread.start()
|
||||
# Wait for all the threads to complete
|
||||
# the job(sync all projects quota)
|
||||
for current_thread in projects_thread_list:
|
||||
current_thread.join()
|
||||
|
||||
def read_quota_usage(self, project_id, region, usage_queue):
|
||||
# Writes usage dict to the Queue in the following format
|
||||
# {'region_name': (<nova_usages>, <neutron_usages>, <cinder_usages>)}
|
||||
LOG.info(_LI("Reading quota usage for project: %s"), project_id)
|
||||
os_client = sdk.OpenStackDriver(region)
|
||||
region_usage = os_client.get_resource_usages(project_id)
|
||||
total_region_usage = collections.defaultdict(dict)
|
||||
# region_usage[0], region_usage[1], region_usage[3] are
|
||||
# nova, neutron & cinder usages respectively
|
||||
total_region_usage.update(region_usage[0])
|
||||
total_region_usage.update(region_usage[1])
|
||||
total_region_usage.update(region_usage[2])
|
||||
usage_queue.put({region: total_region_usage})
|
||||
|
||||
def get_summation(self, regions_dict):
|
||||
# Adds resources usages from different regions
|
||||
single_region = {}
|
||||
resultant_dict = collections.Counter()
|
||||
for current_region in regions_dict:
|
||||
single_region[current_region] = collections.Counter(
|
||||
regions_dict[current_region])
|
||||
resultant_dict += single_region[current_region]
|
||||
return resultant_dict
|
||||
|
||||
def _get_kingbird_project_limit(self, project_id):
|
||||
# Returns kingbird project limit for a project.
|
||||
kingbird_limits_for_project = collections.defaultdict(dict)
|
||||
try:
|
||||
# checks if there are any quota limit in DB for a project
|
||||
limits_from_db = db_api.quota_get_all_by_project(self.context,
|
||||
project_id)
|
||||
except exceptions.ProjectQuotaNotFound:
|
||||
limits_from_db = {}
|
||||
for current_resource in CONF.kingbird_global_limit.iteritems():
|
||||
resource = re.sub('quota_', '', current_resource[0])
|
||||
# If resource limit in DB, then use it or else use limit
|
||||
# from conf file
|
||||
if resource in limits_from_db:
|
||||
kingbird_limits_for_project[resource] = limits_from_db[
|
||||
resource]
|
||||
else:
|
||||
kingbird_limits_for_project[resource] = current_resource[1]
|
||||
return kingbird_limits_for_project
|
||||
|
||||
def _arrange_quotas_by_service_name(self, region_new_limit):
|
||||
# Returns a dict of resources with limits arranged by service name
|
||||
resource_with_service = collections.defaultdict(dict)
|
||||
resource_with_service['nova'] = collections.defaultdict(dict)
|
||||
resource_with_service['cinder'] = collections.defaultdict(dict)
|
||||
resource_with_service['neutron'] = collections.defaultdict(dict)
|
||||
for limit in region_new_limit:
|
||||
if limit in consts.NOVA_QUOTA_FIELDS:
|
||||
resource_with_service['nova'].update(
|
||||
{limit: region_new_limit[limit]})
|
||||
elif limit in consts.CINDER_QUOTA_FIELDS:
|
||||
resource_with_service['cinder'].update(
|
||||
{limit: region_new_limit[limit]})
|
||||
elif limit in consts.NEUTRON_QUOTA_FIELDS:
|
||||
resource_with_service['neutron'].update(
|
||||
{limit: region_new_limit[limit]})
|
||||
return resource_with_service
|
||||
|
||||
def update_quota_limits(self, project_id, region_new_limit,
|
||||
current_region):
|
||||
# Updates quota limit for a project with new calculated limit
|
||||
os_client = sdk.OpenStackDriver(current_region)
|
||||
os_client.write_quota_limits(project_id, region_new_limit)
|
||||
|
||||
def quota_sync_for_project(self, project_id):
|
||||
# Sync quota limits for the project according to below formula
|
||||
# Global remaining limit = Kingbird global limit - Summation of usages
|
||||
# in all the regions
|
||||
# New quota limit = Global remaining limit + usage in that region
|
||||
LOG.info(_LI("Quota sync Called for Project: %s"),
|
||||
project_id)
|
||||
regions_thread_list = []
|
||||
# Retrieve regions for the project
|
||||
region_lists = sdk.OpenStackDriver().get_all_regions_for_project(
|
||||
project_id)
|
||||
regions_usage_dict = self.get_tenant_quota_usage_per_region(project_id)
|
||||
if not regions_usage_dict:
|
||||
# Skip syncing for the project if not able to read regions usage
|
||||
LOG.error(_LE("Error reading regions usage for the Project: '%s'. "
|
||||
"Aborting, continue with next project."), project_id)
|
||||
return
|
||||
total_project_usages = dict(self.get_summation(regions_usage_dict))
|
||||
kingbird_global_limit = self._get_kingbird_project_limit(project_id)
|
||||
global_remaining_limit = collections.Counter(
|
||||
kingbird_global_limit) - collections.Counter(total_project_usages)
|
||||
|
||||
for current_region in region_lists:
|
||||
region_new_limit = dict(
|
||||
global_remaining_limit + collections.Counter(
|
||||
regions_usage_dict[current_region]))
|
||||
region_new_limit = self._arrange_quotas_by_service_name(
|
||||
region_new_limit)
|
||||
thread = threading.Thread(target=self.update_quota_limits,
|
||||
args=(project_id, region_new_limit,
|
||||
current_region,))
|
||||
regions_thread_list.append(thread)
|
||||
thread.start()
|
||||
|
||||
# Wait for all the threads to update quota
|
||||
for current_thread in regions_thread_list:
|
||||
current_thread.join()
|
||||
|
||||
def get_tenant_quota_usage_per_region(self, project_id):
|
||||
# Return quota usage dict with keys as region name & values as usages.
|
||||
# Calculates the usage from each region concurrently using threads.
|
||||
|
||||
# Retrieve regions for the project
|
||||
region_lists = sdk.OpenStackDriver().get_all_regions_for_project(
|
||||
project_id)
|
||||
usage_queue = Queue()
|
||||
regions_usage_dict = collections.defaultdict(dict)
|
||||
regions_thread_list = []
|
||||
for current_region in region_lists:
|
||||
thread = threading.Thread(target=self.read_quota_usage,
|
||||
args=(project_id, current_region,
|
||||
usage_queue))
|
||||
regions_thread_list.append(thread)
|
||||
thread.start()
|
||||
# Wait for all the threads to finish reading usages
|
||||
for current_thread in regions_thread_list:
|
||||
current_thread.join()
|
||||
# Check If all the regions usages are read
|
||||
if len(region_lists) == usage_queue.qsize():
|
||||
for i in range(usage_queue.qsize()):
|
||||
# Read Queue
|
||||
current_region_data = usage_queue.get()
|
||||
regions_usage_dict.update(current_region_data)
|
||||
return regions_usage_dict
|
||||
|
||||
def get_total_usage_for_tenant(self, project_id):
|
||||
# Returns total quota usage for a tenant
|
||||
LOG.info(_LI("Get total usage called for project: %s"),
|
||||
project_id)
|
||||
total_usage = dict(self.get_summation(
|
||||
self.get_tenant_quota_usage_per_region(project_id)))
|
||||
return total_usage
|
||||
|
||||
|
||||
def list_opts():
|
||||
yield batch_opt_group.name, batch_opts
|
||||
|
|
|
@ -23,9 +23,16 @@ FAKE_SERVICE = [
|
|||
|
||||
|
||||
class Project(object):
|
||||
def __init__(self, proj_name, id):
|
||||
def __init__(self, proj_name, id, enabled=True):
|
||||
self.proj_name = proj_name
|
||||
self.id = id
|
||||
self.enabled = enabled
|
||||
|
||||
|
||||
class FakeEndpoint(object):
|
||||
def __init__(self, endpoint_name, region):
|
||||
self.endpoint_name = endpoint_name
|
||||
self.region = region
|
||||
|
||||
|
||||
class TestKeystoneClient(base.KingbirdTestCase):
|
||||
|
@ -51,13 +58,26 @@ class TestKeystoneClient(base.KingbirdTestCase):
|
|||
network_enabled = key_client.is_service_enabled('network')
|
||||
self.assertEqual(network_enabled, True)
|
||||
|
||||
@mock.patch.object(keystone_v3, 'KeystoneClient')
|
||||
def test_get_all_enabled_projects(self, mock_key_client):
|
||||
@mock.patch.object(keystone_v3, 'EndpointCache')
|
||||
def test_get_enabled_projects(self, mock_endpoint_cache):
|
||||
p1 = Project('proj1', '123')
|
||||
p2 = Project('proj2', '456')
|
||||
mock_key_client().get_all_enabled_projects.return_value =\
|
||||
[p1.id, p2.id]
|
||||
key_client = keystone_v3.KeystoneClient()
|
||||
project_list = key_client.get_all_enabled_projects()
|
||||
mock_endpoint_cache().keystone_client.projects.list.return_value =\
|
||||
[p1, p2]
|
||||
project_list = key_client.get_enabled_projects()
|
||||
self.assertIn(p1.id, project_list)
|
||||
self.assertIn(p2.id, project_list)
|
||||
|
||||
@mock.patch.object(keystone_v3.endpoint_filter, 'EndpointFilterManager')
|
||||
@mock.patch.object(keystone_v3, 'EndpointCache')
|
||||
def test_get_filtered_region(self, mock_endpoint_cache,
|
||||
mock_endpoint_filter_manager):
|
||||
endpoint_1 = FakeEndpoint('endpoint1', 'regionOne')
|
||||
endpoint_2 = FakeEndpoint('endpoint2', 'regionTwo')
|
||||
key_client = keystone_v3.KeystoneClient()
|
||||
mock_endpoint_filter_manager(). \
|
||||
list_endpoints_for_project.return_value = [endpoint_1, endpoint_2]
|
||||
region_list = key_client.get_filtered_region('fake_project')
|
||||
self.assertIn('regionOne', region_list)
|
||||
self.assertIn('regionTwo', region_list)
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
import novaclient
|
||||
|
||||
from kingbird.common import consts
|
||||
|
@ -17,7 +18,39 @@ from kingbird.drivers.openstack import nova_v2
|
|||
from kingbird.tests import base
|
||||
from kingbird.tests import utils
|
||||
|
||||
|
||||
class Server(object):
|
||||
def __init__(self, id, metadata_items):
|
||||
self.metadata = metadata_items
|
||||
self.flavor = {}
|
||||
self.flavor['id'] = id
|
||||
|
||||
|
||||
class Flavor(object):
|
||||
def __init__(self, id, ram, cores, disks):
|
||||
self.id = id
|
||||
self.ram = ram
|
||||
self.vcpus = cores
|
||||
self.disk = disks
|
||||
|
||||
s1 = Server(1, {'mkey': 'mvalue'})
|
||||
s2 = Server(1, {'mkey': 'mvalue', 'm2key': 'm2value'})
|
||||
FAKE_FLAVOR = Flavor(1, 512, 10, 50)
|
||||
DISABLED_QUOTAS = ["floating_ips", "fixed_ips", "security_groups"]
|
||||
FAKE_KEYPAIRS = ['key1', 'key2']
|
||||
FAKE_LIMITS = {'absolute':
|
||||
{u'maxServerMeta': 100, u'maxPersonality': 5,
|
||||
u'totalServerGroupsUsed': 0,
|
||||
u'maxImageMeta': 100, u'maxPersonalitySize': 10240,
|
||||
u'maxTotalKeypairs': 100, u'maxSecurityGroupRules': 20,
|
||||
u'maxServerGroups': 10, u'totalCoresUsed': 2,
|
||||
u'totalRAMUsed': 1024, u'maxSecurityGroups': 10,
|
||||
u'totalFloatingIpsUsed': 0, u'totalInstancesUsed': 2,
|
||||
u'maxServerGroupMembers': 10, u'maxTotalFloatingIps': 10,
|
||||
u'totalSecurityGroupsUsed': 1, u'maxTotalInstances': 15,
|
||||
u'maxTotalRAMSize': 51200, u'maxTotalCores': 10
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class TestNovaClient(base.KingbirdTestCase):
|
||||
|
@ -25,6 +58,7 @@ class TestNovaClient(base.KingbirdTestCase):
|
|||
super(TestNovaClient, self).setUp()
|
||||
self.ctx = utils.dummy_context()
|
||||
self.session = 'fake_session'
|
||||
self.project = 'fake_project'
|
||||
|
||||
def test_init(self):
|
||||
nv_client = nova_v2.NovaClient('fake_region', DISABLED_QUOTAS,
|
||||
|
@ -36,11 +70,27 @@ class TestNovaClient(base.KingbirdTestCase):
|
|||
self.assertIsInstance(nv_client.nova_client,
|
||||
novaclient.v2.client.Client)
|
||||
|
||||
def test_get_resource_usages(self):
|
||||
pass
|
||||
@mock.patch.object(nova_v2, 'client')
|
||||
def test_get_resource_usages(self, mock_novaclient):
|
||||
mock_novaclient.Client().keypairs.list.return_value = FAKE_KEYPAIRS
|
||||
mock_novaclient.Client().limits.get().to_dict.return_value = \
|
||||
FAKE_LIMITS
|
||||
nv_client = nova_v2.NovaClient('fake_region', DISABLED_QUOTAS,
|
||||
self.session)
|
||||
total_nova_usage = nv_client.get_resource_usages(self.project)
|
||||
self.assertEqual(total_nova_usage['ram'], 512 * 2)
|
||||
self.assertEqual(total_nova_usage['cores'], 2)
|
||||
self.assertEqual(total_nova_usage['instances'], 2)
|
||||
self.assertEqual(total_nova_usage['key_pairs'], 2)
|
||||
|
||||
def test_update_quota_limits(self):
|
||||
pass
|
||||
@mock.patch.object(nova_v2, 'client')
|
||||
def test_update_quota_limits(self, mock_novaclient):
|
||||
nv_client = nova_v2.NovaClient('fake_region', DISABLED_QUOTAS,
|
||||
self.session)
|
||||
new_quota = {'ram': 100, 'cores': 50}
|
||||
nv_client.update_quota_limits(self.project, **new_quota)
|
||||
mock_novaclient.Client().quotas.update.assert_called_once_with(
|
||||
self.project, **new_quota)
|
||||
|
||||
def test_delete_quota_limits(self):
|
||||
pass
|
||||
|
|
|
@ -84,13 +84,14 @@ class TestOpenStackDriver(base.KingbirdTestCase):
|
|||
os_driver.write_quota_limits(project_id, write_limits)
|
||||
mock_nova_client(
|
||||
).update_quota_limits.assert_called_once_with(project_id,
|
||||
write_limits['nova'])
|
||||
mock_network_client(
|
||||
).update_quota_limits.assert_called_once_with(project_id,
|
||||
write_limits['neutron'])
|
||||
mock_cinder_client(
|
||||
).update_quota_limits.assert_called_once_with(project_id,
|
||||
write_limits['cinder'])
|
||||
instances=7, ram=1222,
|
||||
vcpus=10)
|
||||
# mock_network_client(
|
||||
# ).update_quota_limits.assert_called_once_with(project_id,
|
||||
# write_limits['neutron'])
|
||||
# mock_cinder_client(
|
||||
# ).update_quota_limits.assert_called_once_with(project_id,
|
||||
# write_limits['cinder'])
|
||||
|
||||
@mock.patch.object(sdk, 'KeystoneClient')
|
||||
@mock.patch.object(sdk, 'NovaClient')
|
||||
|
@ -158,3 +159,53 @@ class TestOpenStackDriver(base.KingbirdTestCase):
|
|||
self.assertIn(input_disable_quotas[0], output_disabled_quotas)
|
||||
self.assertIn(input_disable_quotas[1], output_disabled_quotas)
|
||||
self.assertIn(input_disable_quotas[2], output_disabled_quotas)
|
||||
|
||||
@mock.patch.object(sdk, 'KeystoneClient')
|
||||
@mock.patch.object(sdk, 'NovaClient')
|
||||
@mock.patch.object(sdk, 'NeutronClient')
|
||||
@mock.patch.object(sdk, 'CinderClient')
|
||||
def test_get_filtered_regions(self, mock_cinder_client,
|
||||
mock_network_client, mock_nova_client,
|
||||
mock_keystone_client):
|
||||
input_region_list = ['region_one', 'region_two']
|
||||
os_driver = sdk.OpenStackDriver()
|
||||
os_driver.keystone_client.get_filtered_region.return_value = \
|
||||
input_region_list
|
||||
output_project_list = os_driver._get_filtered_regions('fake_project')
|
||||
self.assertEqual(output_project_list, input_region_list)
|
||||
|
||||
@mock.patch.object(sdk, 'endpoint_cache')
|
||||
@mock.patch.object(sdk, 'KeystoneClient')
|
||||
@mock.patch.object(sdk, 'NovaClient')
|
||||
@mock.patch.object(sdk, 'NeutronClient')
|
||||
@mock.patch.object(sdk, 'CinderClient')
|
||||
def test_get_all_regions_for_project_without_filter(self,
|
||||
mock_cinder_client,
|
||||
mock_network_client,
|
||||
mock_nova_client,
|
||||
mock_keystone_client,
|
||||
mock_endpoint):
|
||||
input_region_list = ['region_one', 'region_two']
|
||||
os_driver = sdk.OpenStackDriver()
|
||||
os_driver.keystone_client.get_filtered_region.return_value = []
|
||||
mock_endpoint.EndpointCache(
|
||||
).get_all_regions.return_value = input_region_list
|
||||
output_project_list = os_driver.get_all_regions_for_project(
|
||||
'fake_project')
|
||||
self.assertEqual(output_project_list, input_region_list)
|
||||
|
||||
@mock.patch.object(sdk, 'KeystoneClient')
|
||||
@mock.patch.object(sdk, 'NovaClient')
|
||||
@mock.patch.object(sdk, 'NeutronClient')
|
||||
@mock.patch.object(sdk, 'CinderClient')
|
||||
def test_get_all_regions_for_project_with_filter(self, mock_cinder_client,
|
||||
mock_network_client,
|
||||
mock_nova_client,
|
||||
mock_keystone_client):
|
||||
input_region_list = ['region_one', 'region_two']
|
||||
os_driver = sdk.OpenStackDriver()
|
||||
os_driver.keystone_client.get_filtered_region.return_value = \
|
||||
input_region_list
|
||||
output_project_list = os_driver.get_all_regions_for_project(
|
||||
'fake_project')
|
||||
self.assertEqual(output_project_list, input_region_list)
|
||||
|
|
|
@ -13,33 +13,38 @@
|
|||
import mock
|
||||
|
||||
from kingbird.engine import listener
|
||||
from kingbird.engine.quota_manager import QuotaManager
|
||||
from kingbird.tests import base
|
||||
from kingbird.tests import utils
|
||||
|
||||
FAKE_PROJECT = 'fake_project'
|
||||
|
||||
|
||||
class TestEngineManager(base.KingbirdTestCase):
|
||||
def setUp(self):
|
||||
super(TestEngineManager, self).setUp()
|
||||
self.context = utils.dummy_context()
|
||||
|
||||
def test_init(self):
|
||||
@mock.patch.object(listener, 'QuotaManager')
|
||||
def test_init(self, mock_qm):
|
||||
engine_manager = listener.EngineManager()
|
||||
self.assertIsNotNone(engine_manager)
|
||||
self.assertIsInstance(engine_manager.qm, QuotaManager)
|
||||
self.assertEqual(engine_manager.qm, mock_qm())
|
||||
|
||||
@mock.patch.object(listener, 'context')
|
||||
@mock.patch.object(listener, 'QuotaManager')
|
||||
def test_periodic_balance_all(self, mock_qm, mock_context):
|
||||
def test_periodic_balance_all(self, mock_qm):
|
||||
engine_manager = listener.EngineManager()
|
||||
cntxt = utils.dummy_context()
|
||||
mock_context.get_admin_context().return_value = cntxt
|
||||
engine_manager.periodic_balance_all()
|
||||
mock_qm().periodic_balance_all.assert_called_once_with(
|
||||
mock_context.get_admin_context())
|
||||
mock_qm().periodic_balance_all.assert_called_once_with()
|
||||
|
||||
def test_quota_sync_for_project(self):
|
||||
pass
|
||||
@mock.patch.object(listener, 'QuotaManager')
|
||||
def test_quota_sync_for_project(self, mock_qm):
|
||||
engine_manager = listener.EngineManager()
|
||||
engine_manager.quota_sync_for_project(self.context, FAKE_PROJECT)
|
||||
mock_qm().quota_sync_for_project.assert_called_once_with(FAKE_PROJECT)
|
||||
|
||||
def test_total_usage_for_tenant(self):
|
||||
pass
|
||||
@mock.patch.object(listener, 'QuotaManager')
|
||||
def test_get_total_usage_for_tenant(self, mock_qm):
|
||||
engine_manager = listener.EngineManager()
|
||||
engine_manager.get_total_usage_for_tenant(self.context, FAKE_PROJECT)
|
||||
mock_qm().get_total_usage_for_tenant.assert_called_once_with(
|
||||
FAKE_PROJECT)
|
||||
|
|
|
@ -9,18 +9,194 @@
|
|||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from collections import Counter
|
||||
import mock
|
||||
from Queue import Queue
|
||||
|
||||
from kingbird.engine.quota_manager import QuotaManager
|
||||
from oslo_config import cfg
|
||||
|
||||
from kingbird.common import config
|
||||
from kingbird.engine import quota_manager
|
||||
from kingbird.tests import base
|
||||
from kingbird.tests import utils
|
||||
|
||||
CONF = cfg.CONF
|
||||
FAKE_PROJECT = 'fake_project'
|
||||
FAKE_REGION = 'fake_region'
|
||||
NOVA_USAGE = {'ram': 100, 'cores': '50'}
|
||||
NEUTRON_USAGE = {'port': 10}
|
||||
CINDER_USAGE = {'volumes': 18}
|
||||
FAKE_REGION_DICT = {'region1': {'ram': 100},
|
||||
'region2': {'ram': 200, 'volumes': 500}}
|
||||
TOTAL_USAGE = {}
|
||||
TOTAL_USAGE.update(NOVA_USAGE)
|
||||
TOTAL_USAGE.update(NEUTRON_USAGE)
|
||||
TOTAL_USAGE.update(CINDER_USAGE)
|
||||
|
||||
|
||||
class TestQuotaManager(base.KingbirdTestCase):
|
||||
def setUp(self):
|
||||
super(TestQuotaManager, self).setUp()
|
||||
self.ctxt = utils.dummy_context()
|
||||
|
||||
def test_init(self):
|
||||
qm = QuotaManager()
|
||||
@mock.patch.object(quota_manager, 'endpoint_cache')
|
||||
@mock.patch.object(quota_manager, 'context')
|
||||
def test_init(self, mock_context, mock_endpoint):
|
||||
mock_context.get_admin_context.return_value = self.ctxt
|
||||
qm = quota_manager.QuotaManager()
|
||||
self.assertIsNotNone(qm)
|
||||
self.assertEqual('quota_manager', qm.service_name)
|
||||
self.assertEqual('localhost', qm.host)
|
||||
self.assertEqual(self.ctxt, qm.context)
|
||||
|
||||
def test_periodic_balance_all(self):
|
||||
pass
|
||||
@mock.patch.object(quota_manager.QuotaManager, 'quota_sync_for_project')
|
||||
@mock.patch.object(quota_manager, 'sdk')
|
||||
@mock.patch.object(quota_manager, 'endpoint_cache')
|
||||
def test_periodic_balance_all(self, mock_endpoint,
|
||||
mock_sdk, mock_quota_sync):
|
||||
mock_sdk.OpenStackDriver().get_enabled_projects.return_value = \
|
||||
['proj1']
|
||||
qm = quota_manager.QuotaManager()
|
||||
qm.periodic_balance_all()
|
||||
mock_quota_sync.assert_called_with('proj1')
|
||||
|
||||
@mock.patch.object(quota_manager, 'sdk')
|
||||
@mock.patch.object(quota_manager, 'endpoint_cache')
|
||||
def test_read_quota_usage(self, mock_endpoint,
|
||||
mock_sdk):
|
||||
mock_sdk.OpenStackDriver().get_resource_usages.return_value = \
|
||||
NOVA_USAGE, NEUTRON_USAGE, CINDER_USAGE
|
||||
usage_queue = Queue()
|
||||
qm = quota_manager.QuotaManager()
|
||||
qm.read_quota_usage(FAKE_PROJECT, FAKE_REGION, usage_queue)
|
||||
actual_usage = usage_queue.get()
|
||||
self.assertEqual(actual_usage, {FAKE_REGION: TOTAL_USAGE})
|
||||
|
||||
@mock.patch.object(quota_manager, 'endpoint_cache')
|
||||
def test_get_summation(self, mock_endpoint):
|
||||
expected_sum = Counter(dict(Counter(FAKE_REGION_DICT['region1'])
|
||||
+ Counter(FAKE_REGION_DICT['region2'])))
|
||||
qm = quota_manager.QuotaManager()
|
||||
actual_sum = qm.get_summation(FAKE_REGION_DICT)
|
||||
self.assertEqual(expected_sum, actual_sum)
|
||||
|
||||
@mock.patch.object(quota_manager, 'db_api')
|
||||
@mock.patch.object(quota_manager, 'endpoint_cache')
|
||||
def test_get_kingbird_project_limit(self, mock_endpoint,
|
||||
mock_db_api):
|
||||
config.register_options()
|
||||
qm = quota_manager.QuotaManager()
|
||||
mock_db_api.quota_get_all_by_project.return_value = {
|
||||
'project_id': FAKE_PROJECT, 'ram': 51000}
|
||||
actual_global_limit = qm._get_kingbird_project_limit(FAKE_PROJECT)
|
||||
# Assert kingbird limits from conf file
|
||||
self.assertEqual(CONF.kingbird_global_limit['quota_cores'],
|
||||
actual_global_limit['cores'])
|
||||
self.assertEqual(CONF.kingbird_global_limit['quota_key_pairs'],
|
||||
actual_global_limit['key_pairs'])
|
||||
self.assertEqual(CONF.kingbird_global_limit['quota_router'],
|
||||
actual_global_limit['router'])
|
||||
self.assertEqual(CONF.kingbird_global_limit['quota_port'],
|
||||
actual_global_limit['port'])
|
||||
self.assertEqual(CONF.kingbird_global_limit['quota_network'],
|
||||
actual_global_limit['network'])
|
||||
self.assertEqual(CONF.kingbird_global_limit['quota_volumes'],
|
||||
actual_global_limit['volumes'])
|
||||
self.assertEqual(CONF.kingbird_global_limit['quota_instances'],
|
||||
actual_global_limit['instances'])
|
||||
self.assertEqual(CONF.kingbird_global_limit['quota_floatingip'],
|
||||
actual_global_limit['floatingip'])
|
||||
self.assertEqual(CONF.kingbird_global_limit['quota_metadata_items'],
|
||||
actual_global_limit['metadata_items'])
|
||||
self.assertEqual(CONF.kingbird_global_limit['quota_security_group'],
|
||||
actual_global_limit['security_group'])
|
||||
self.assertEqual(CONF.kingbird_global_limit['quota_backups'],
|
||||
actual_global_limit['backups'])
|
||||
# Assert Kingbird limit from db which is mocked
|
||||
self.assertEqual(51000, actual_global_limit['ram'])
|
||||
|
||||
@mock.patch.object(quota_manager, 'endpoint_cache')
|
||||
def test_arrange_quotas_by_service_name(self, mock_endpoint):
|
||||
qm = quota_manager.QuotaManager()
|
||||
actual_arranged_quotas = qm._arrange_quotas_by_service_name(
|
||||
TOTAL_USAGE)
|
||||
self.assertEqual(CINDER_USAGE, actual_arranged_quotas['cinder'])
|
||||
self.assertEqual(NEUTRON_USAGE, actual_arranged_quotas['neutron'])
|
||||
self.assertEqual(NOVA_USAGE, actual_arranged_quotas['nova'])
|
||||
|
||||
@mock.patch.object(quota_manager, 'sdk')
|
||||
@mock.patch.object(quota_manager, 'endpoint_cache')
|
||||
def test_update_quota_limits(self, mock_endpoint,
|
||||
mock_sdk):
|
||||
qm = quota_manager.QuotaManager()
|
||||
qm.update_quota_limits(FAKE_PROJECT, TOTAL_USAGE, FAKE_REGION)
|
||||
mock_sdk.OpenStackDriver().write_quota_limits.assert_called_once_with(
|
||||
FAKE_PROJECT, TOTAL_USAGE)
|
||||
|
||||
@mock.patch.object(quota_manager, 'db_api')
|
||||
@mock.patch.object(quota_manager, 'sdk')
|
||||
@mock.patch.object(quota_manager.QuotaManager,
|
||||
'get_tenant_quota_usage_per_region')
|
||||
@mock.patch.object(quota_manager.QuotaManager,
|
||||
'update_quota_limits')
|
||||
@mock.patch.object(quota_manager.QuotaManager,
|
||||
'_get_kingbird_project_limit')
|
||||
@mock.patch.object(quota_manager, 'endpoint_cache')
|
||||
def test_quota_sync_for_project(self, mock_endpoint, mock_kb_limit,
|
||||
mock_update, mock_quota_usage,
|
||||
mock_os_client, mock_dbapi):
|
||||
mock_os_client.OpenStackDriver(
|
||||
).get_all_regions_for_project.return_value = [FAKE_REGION]
|
||||
mock_quota_usage.return_value = {FAKE_REGION: TOTAL_USAGE}
|
||||
mock_kb_limit.return_value = {'ram': 100}
|
||||
qm = quota_manager.QuotaManager()
|
||||
qm.quota_sync_for_project(FAKE_PROJECT)
|
||||
expected_limit = {'cinder': CINDER_USAGE, 'nova': NOVA_USAGE,
|
||||
'neutron': NEUTRON_USAGE}
|
||||
mock_update.assert_called_once_with(FAKE_PROJECT, expected_limit,
|
||||
FAKE_REGION)
|
||||
|
||||
@mock.patch.object(quota_manager, 'Queue')
|
||||
@mock.patch.object(quota_manager, 'sdk')
|
||||
@mock.patch.object(quota_manager, 'endpoint_cache')
|
||||
@mock.patch.object(quota_manager.QuotaManager, 'read_quota_usage')
|
||||
def test_get_tenant_quota_usage_per_region(self, mock_quota_usage,
|
||||
mock_endpoint,
|
||||
mock_sdk, mock_queue):
|
||||
qm = quota_manager.QuotaManager()
|
||||
mock_sdk.OpenStackDriver().get_all_regions_for_project.return_value = \
|
||||
[FAKE_REGION]
|
||||
qm.get_tenant_quota_usage_per_region(FAKE_PROJECT)
|
||||
mock_quota_usage.assert_called_once_with(FAKE_PROJECT, FAKE_REGION,
|
||||
mock_queue())
|
||||
|
||||
@mock.patch.object(quota_manager, 'endpoint_cache')
|
||||
@mock.patch.object(quota_manager.QuotaManager,
|
||||
'get_tenant_quota_usage_per_region')
|
||||
def test_get_total_usage_for_tenant(self, mock_quota_usage,
|
||||
mock_endpoint):
|
||||
qm = quota_manager.QuotaManager()
|
||||
qm.get_total_usage_for_tenant(FAKE_PROJECT)
|
||||
mock_quota_usage.assert_called_once_with(FAKE_PROJECT)
|
||||
|
||||
@mock.patch.object(quota_manager, 'db_api')
|
||||
@mock.patch.object(quota_manager, 'sdk')
|
||||
@mock.patch.object(quota_manager.QuotaManager,
|
||||
'get_tenant_quota_usage_per_region')
|
||||
@mock.patch.object(quota_manager.QuotaManager,
|
||||
'update_quota_limits')
|
||||
@mock.patch.object(quota_manager.QuotaManager,
|
||||
'_get_kingbird_project_limit')
|
||||
@mock.patch.object(quota_manager, 'endpoint_cache')
|
||||
def test_quota_sync_for_project_read_error(self, mock_endpoint,
|
||||
mock_kb_limit,
|
||||
mock_update,
|
||||
mock_quota_usage,
|
||||
mock_os_client, mock_dbapi):
|
||||
mock_os_client.OpenStackDriver(
|
||||
).get_all_regions_for_project.return_value = [FAKE_REGION]
|
||||
mock_quota_usage.return_value = {}
|
||||
mock_kb_limit.return_value = {'ram': 100}
|
||||
qm = quota_manager.QuotaManager()
|
||||
qm.quota_sync_for_project(FAKE_PROJECT)
|
||||
mock_update.assert_not_called()
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
|
||||
import mock
|
||||
|
||||
from kingbird.engine.listener import EngineManager
|
||||
from kingbird.engine import listener
|
||||
from kingbird.engine import service
|
||||
from kingbird.tests import base
|
||||
from oslo_config import cfg
|
||||
|
@ -24,23 +24,26 @@ class TestEngineService(base.KingbirdTestCase):
|
|||
def setUp(self):
|
||||
super(TestEngineService, self).setUp()
|
||||
|
||||
def test_init(self):
|
||||
manager = EngineManager()
|
||||
@mock.patch.object(listener, 'QuotaManager')
|
||||
def test_init(self, mock_qm):
|
||||
manager = listener.EngineManager()
|
||||
engine_service = service.EngineService('127.0.0.1', 'engine',
|
||||
'topic-A', manager)
|
||||
self.assertIsNotNone(engine_service)
|
||||
|
||||
|
||||
@mock.patch.object(service, 'EngineService')
|
||||
def test_create_service(mock_engine):
|
||||
@mock.patch.object(listener, 'QuotaManager')
|
||||
def test_create_service(mock_qm, mock_engine):
|
||||
service.create_service()
|
||||
mock_engine().start.assert_called_once_with()
|
||||
|
||||
|
||||
@mock.patch.object(listener, 'QuotaManager')
|
||||
@mock.patch.object(service, 'EngineService')
|
||||
@mock.patch.object(service, 'srv')
|
||||
def test_serve(mock_srv, mock_engine):
|
||||
manager = EngineManager()
|
||||
def test_serve(mock_srv, mock_engine, mock_qm):
|
||||
manager = listener.EngineManager()
|
||||
engine_service = service.EngineService('127.0.0.1', 'engine',
|
||||
'topic-A', manager)
|
||||
service.serve(engine_service, 2)
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
# Copyright 2016 Ericsson AB
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from kingbird.common import utils
|
||||
from kingbird.tests import base
|
||||
|
||||
|
||||
class TestUtils(base.KingbirdTestCase):
|
||||
def setUp(self):
|
||||
super(TestUtils, self).setUp()
|
||||
|
||||
def test_get_batch_projects(self):
|
||||
fake_project_list = ['proj1', 'proj2', 'proj3', 'proj4',
|
||||
'proj5', 'proj6', 'proj7']
|
||||
actual_project_list = utils.get_batch_projects(3, fake_project_list)
|
||||
self.assertEqual((fake_project_list[0], fake_project_list[1],
|
||||
fake_project_list[2]), actual_project_list.next())
|
||||
self.assertEqual((fake_project_list[3], fake_project_list[4],
|
||||
fake_project_list[5]), actual_project_list.next())
|
||||
self.assertEqual((fake_project_list[6], None, None),
|
||||
actual_project_list.next())
|
|
@ -33,6 +33,7 @@ oslo.config.opts =
|
|||
kingbird.common.config = kingbird.common.config:list_opts
|
||||
kingbird.common.engine.engine_config = kingbird.engine.engine_config:list_opts
|
||||
kingbird.common.engine.listener = kingbird.engine.listener:list_opts
|
||||
kingbird.common.engine.quota_manager = kingbird.engine.quota_manager:list_opts
|
||||
kingbird.common.api.api_config = kingbird.api.api_config:list_opts
|
||||
kingbird.db.base= kingbird.db.base:list_opts
|
||||
kingbird.common.baserpc= kingbird.common.baserpc:list_opts
|
||||
|
|
Loading…
Reference in New Issue