Merge "feat: connect sharding manager to control drivers"
This commit is contained in:
commit
7bd2912f8a
0
marconi/common/storage/__init__.py
Normal file
0
marconi/common/storage/__init__.py
Normal file
58
marconi/common/storage/select.py
Normal file
58
marconi/common/storage/select.py
Normal file
@ -0,0 +1,58 @@
|
||||
# Copyright (c) 2013 Rackspace Hosting, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""select: a collection of algorithms for choosing an entry from a
|
||||
collection."""
|
||||
|
||||
import random
|
||||
|
||||
|
||||
def weighted(objs, key='weight', generator=random.randint):
|
||||
"""Perform a weighted select given a list of objects.
|
||||
|
||||
:param objs: a list of objects containing at least the field `key`
|
||||
:type objs: [dict]
|
||||
:param key: the field in each obj that corresponds to weight
|
||||
:type key: six.text_type
|
||||
:param generator: a number generator taking two ints
|
||||
:type generator: function(int, int) -> int
|
||||
:return: an object
|
||||
:rtype: dict
|
||||
"""
|
||||
acc = 0
|
||||
lookup = []
|
||||
|
||||
# construct weighted spectrum
|
||||
for o in objs:
|
||||
# NOTE(cpp-cabrera): skip objs with 0 weight
|
||||
if o[key] <= 0:
|
||||
continue
|
||||
acc += o[key]
|
||||
lookup.append((o, acc))
|
||||
|
||||
# no objects were found
|
||||
if not lookup:
|
||||
return None
|
||||
|
||||
# NOTE(cpp-cabrera): select an object from the lookup table. If
|
||||
# the selector lands in the interval [lower, upper), then choose
|
||||
# it.
|
||||
gen = generator
|
||||
selector = gen(0, acc - 1)
|
||||
lower = 0
|
||||
for obj, upper in lookup:
|
||||
if lower <= selector < upper:
|
||||
return obj
|
||||
lower = upper
|
@ -15,6 +15,7 @@
|
||||
|
||||
"""utils: general-purpose utilities."""
|
||||
|
||||
from oslo.config import cfg
|
||||
import six
|
||||
|
||||
|
||||
@ -36,3 +37,29 @@ def fields(d, names, pred=lambda x: True,
|
||||
return dict((key_transform(k), value_transform(v))
|
||||
for k, v in six.iteritems(d)
|
||||
if k in names and pred(v))
|
||||
|
||||
|
||||
_pytype_to_cfgtype = {
|
||||
six.text_type: cfg.StrOpt,
|
||||
int: cfg.IntOpt,
|
||||
bool: cfg.BoolOpt,
|
||||
float: cfg.FloatOpt,
|
||||
list: cfg.ListOpt,
|
||||
dict: cfg.DictOpt
|
||||
}
|
||||
|
||||
|
||||
def dict_to_conf(options):
|
||||
"""Converts a python dictionary to a list of oslo.config.cfg.Opt
|
||||
|
||||
:param options: The python dictionary to convert
|
||||
:type options: dict
|
||||
:returns: a list of options compatible with oslo.config
|
||||
:rtype: [oslo.config.cfg.Opt]
|
||||
"""
|
||||
opts = []
|
||||
for k, v in six.iteritems(options):
|
||||
opt_type = _pytype_to_cfgtype[type(v)]
|
||||
opts.append(opt_type(name=k, default=v))
|
||||
|
||||
return opts
|
||||
|
@ -29,7 +29,10 @@ LOG = log.getLogger(__name__)
|
||||
|
||||
_GENERAL_OPTIONS = [
|
||||
cfg.BoolOpt('sharding', default=False,
|
||||
help='Enable sharding across multiple storage backends'),
|
||||
help=('Enable sharding across multiple storage backends. ',
|
||||
'If sharding is enabled, the storage driver ',
|
||||
'configuration is used to determine where the ',
|
||||
'catalogue/control plane data is kept.')),
|
||||
cfg.BoolOpt('admin_mode', default=False,
|
||||
help='Activate endpoints to manage shard registry.'),
|
||||
]
|
||||
@ -67,7 +70,7 @@ class Bootstrap(object):
|
||||
|
||||
if self.conf.sharding:
|
||||
LOG.debug(_(u'Storage sharding enabled'))
|
||||
storage_driver = sharding.DataDriver(self.conf)
|
||||
storage_driver = sharding.DataDriver(self.conf, self.control)
|
||||
else:
|
||||
storage_driver = storage_utils.load_storage_driver(self.conf)
|
||||
|
||||
|
@ -121,7 +121,7 @@ class QueueBase(ControllerBase):
|
||||
|
||||
@abc.abstractmethod
|
||||
def list(self, project=None, marker=None, limit=10,
|
||||
detailed=False, include_claimed=True):
|
||||
detailed=False):
|
||||
"""Base method for listing queues.
|
||||
|
||||
:param project: Project id
|
||||
@ -129,7 +129,6 @@ class QueueBase(ControllerBase):
|
||||
:param limit: (Default 10, configurable) Max number
|
||||
queues to return.
|
||||
:param detailed: Whether metadata is included
|
||||
:param include_claimed: Whether to list claimed messages
|
||||
|
||||
:returns: An iterator giving a sequence of queues
|
||||
and the marker of the next page.
|
||||
@ -208,7 +207,8 @@ class MessageBase(ControllerBase):
|
||||
|
||||
@abc.abstractmethod
|
||||
def list(self, queue, project=None, marker=None,
|
||||
limit=10, echo=False, client_uuid=None):
|
||||
limit=None, echo=False, client_uuid=None,
|
||||
include_claimed=False):
|
||||
"""Base method for listing messages.
|
||||
|
||||
:param queue: Name of the queue to get the
|
||||
@ -217,9 +217,12 @@ class MessageBase(ControllerBase):
|
||||
:param marker: Tail identifier
|
||||
:param limit: (Default 10, configurable) Max number
|
||||
messages to return.
|
||||
:type limit: Maybe int
|
||||
:param echo: (Default False) Boolean expressing whether
|
||||
or not this client should receive its own messages.
|
||||
:param client_uuid: A UUID object. Required when echo=False.
|
||||
:param include_claimed: omit claimed messages from listing?
|
||||
:type include_claimed: bool
|
||||
|
||||
:returns: An iterator giving a sequence of messages and
|
||||
the marker of the next page.
|
||||
|
@ -122,3 +122,10 @@ class ShardDoesNotExist(DoesNotExist):
|
||||
def __init__(self, shard):
|
||||
msg = u'Shard {0} does not exists'.format(shard)
|
||||
super(ShardDoesNotExist, self).__init__(msg)
|
||||
|
||||
|
||||
class NoShardFound(Exception):
|
||||
|
||||
def __init__(self):
|
||||
msg = u'No shards registered'
|
||||
super(NoShardFound, self).__init__(msg)
|
||||
|
@ -42,9 +42,18 @@ class DataDriver(storage.DataDriverBase):
|
||||
def __init__(self, conf):
|
||||
super(DataDriver, self).__init__(conf)
|
||||
|
||||
self.conf.register_opts(options.MONGODB_OPTIONS,
|
||||
group=options.MONGODB_GROUP)
|
||||
opts = options.MONGODB_OPTIONS
|
||||
|
||||
# NOTE(cpp-cabrera): if this data driver is being loaded
|
||||
# dynamically, as would be the case for a sharded context,
|
||||
# filter out the options that were given by the shard
|
||||
# catalogue to avoid DuplicateOptErrors.
|
||||
if 'dynamic' in conf:
|
||||
names = conf[options.MONGODB_GROUP].keys()
|
||||
opts = filter(lambda x: x.name not in names, opts)
|
||||
|
||||
self.conf.register_opts(opts,
|
||||
group=options.MONGODB_GROUP)
|
||||
self.mongodb_conf = self.conf[options.MONGODB_GROUP]
|
||||
|
||||
@decorators.lazy_property(write=False)
|
||||
|
@ -15,9 +15,13 @@
|
||||
# limitations under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
import six
|
||||
|
||||
from marconi.common import decorators
|
||||
from marconi.common.storage import select
|
||||
from marconi.common import utils as common_utils
|
||||
from marconi.queues import storage
|
||||
from marconi.queues.storage import errors
|
||||
from marconi.queues.storage import utils
|
||||
|
||||
_CATALOG_OPTIONS = [
|
||||
@ -35,9 +39,9 @@ class DataDriver(storage.DataDriverBase):
|
||||
:param catalog_conf: Options pertaining to the shard catalog
|
||||
"""
|
||||
|
||||
def __init__(self, conf):
|
||||
def __init__(self, conf, control):
|
||||
super(DataDriver, self).__init__(conf)
|
||||
self._shard_catalog = Catalog(conf)
|
||||
self._shard_catalog = Catalog(conf, control)
|
||||
|
||||
@decorators.lazy_property(write=False)
|
||||
def queue_controller(self):
|
||||
@ -91,64 +95,228 @@ class RoutingController(storage.base.ControllerBase):
|
||||
|
||||
|
||||
class QueueController(RoutingController):
|
||||
"""Controller to facilitate special processing for queue operations."""
|
||||
"""Controller to facilitate special processing for queue operations.
|
||||
"""
|
||||
|
||||
_resource_name = 'queue'
|
||||
|
||||
def __init__(self, shard_catalog):
|
||||
super(QueueController, self).__init__(shard_catalog)
|
||||
self._lookup = self._shard_catalog.lookup
|
||||
|
||||
def list(self, project=None, marker=None,
|
||||
limit=None, detailed=False):
|
||||
# TODO(kgriffs): SHARDING - Query all shards and merge
|
||||
# the results, then return the resulting list.
|
||||
limit=10, detailed=False):
|
||||
# TODO(cpp-cabrera): fill in sharded list queues
|
||||
# implementation.
|
||||
|
||||
# TODO(kgriffs): Remove this placeholder code - it is
|
||||
# only here to make tests pass in the short term!
|
||||
target = self._shard_catalog.lookup(None, project).queue_controller
|
||||
return target.list(project=project, marker=marker,
|
||||
limit=limit, detailed=detailed)
|
||||
return []
|
||||
|
||||
def create(self, name, project=None):
|
||||
self._shard_catalog.register(name, project)
|
||||
|
||||
target = self._shard_catalog.lookup(name, project).queue_controller
|
||||
return target.create(name, project)
|
||||
# NOTE(cpp-cabrera): This should always succeed since we just
|
||||
# registered the project/queue. There is a race condition,
|
||||
# however. If between the time we register a queue and go to
|
||||
# look it up, the queue is deleted, then this assertion will
|
||||
# fail.
|
||||
target = self._lookup(name, project)
|
||||
if not target:
|
||||
raise RuntimeError('Failed to register queue')
|
||||
|
||||
return target.queue_controller.create(name, project)
|
||||
|
||||
def delete(self, name, project=None):
|
||||
self._shard_catalog.deregister(name, project)
|
||||
# NOTE(cpp-cabrera): If we fail to find a project/queue in the
|
||||
# catalogue for a delete, just ignore it.
|
||||
target = self._lookup(name, project)
|
||||
if target:
|
||||
|
||||
target = self._shard_catalog.lookup(name, project).queue_controller
|
||||
return target.delete(name, project)
|
||||
# NOTE(cpp-cabrera): Now we found the controller. First,
|
||||
# attempt to delete it from storage. IFF the deletion is
|
||||
# successful, then remove it from the catalogue.
|
||||
control = target.queue_controller
|
||||
ret = control.delete(name, project)
|
||||
self._shard_catalog.deregister(name, project)
|
||||
return ret
|
||||
|
||||
return None
|
||||
|
||||
def exists(self, name, project=None, **kwargs):
|
||||
target = self._lookup(name, project)
|
||||
if target:
|
||||
control = target.queue_controller
|
||||
return control.exists(name, project=project)
|
||||
return False
|
||||
|
||||
def get_metadata(self, name, project=None):
|
||||
target = self._lookup(name, project)
|
||||
if target:
|
||||
control = target.queue_controller
|
||||
return control.get_metadata(name, project=project)
|
||||
raise errors.QueueDoesNotExist(name, project)
|
||||
|
||||
def set_metadata(self, name, metadata, project=None):
|
||||
target = self._lookup(name, project)
|
||||
if target:
|
||||
control = target.queue_controller
|
||||
return control.set_metadata(name, metadata=metadata,
|
||||
project=project)
|
||||
raise errors.QueueDoesNotExist(name, project)
|
||||
|
||||
def stats(self, name, project=None):
|
||||
target = self._lookup(name, project)
|
||||
if target:
|
||||
control = target.queue_controller
|
||||
return control.stats(name, project=project)
|
||||
raise errors.QueueDoesNotExist(name, project)
|
||||
|
||||
|
||||
class MessageController(RoutingController):
|
||||
_resource_name = 'message'
|
||||
|
||||
def __init__(self, shard_catalog):
|
||||
super(MessageController, self).__init__(shard_catalog)
|
||||
self._lookup = self._shard_catalog.lookup
|
||||
|
||||
def post(self, queue, project, messages, client_uuid):
|
||||
target = self._lookup(queue, project)
|
||||
if target:
|
||||
control = target.message_controller
|
||||
return control.post(queue, project=project,
|
||||
messages=messages,
|
||||
client_uuid=client_uuid)
|
||||
raise errors.QueueDoesNotExist(project, queue)
|
||||
|
||||
def delete(self, queue, project, message_id, claim):
|
||||
target = self._lookup(queue, project)
|
||||
if target:
|
||||
control = target.message_controller
|
||||
return control.delete(queue, project=project,
|
||||
message_id=message_id, claim=claim)
|
||||
return None
|
||||
|
||||
def bulk_delete(self, queue, project, message_ids):
|
||||
target = self._lookup(queue, project)
|
||||
if target:
|
||||
control = target.message_controller
|
||||
return control.bulk_delete(queue, project=project,
|
||||
message_ids=message_ids)
|
||||
return None
|
||||
|
||||
def bulk_get(self, queue, project, message_ids):
|
||||
target = self._lookup(queue, project)
|
||||
if target:
|
||||
control = target.message_controller
|
||||
return control.bulk_get(queue, project=project,
|
||||
message_ids=message_ids)
|
||||
return []
|
||||
|
||||
def list(self, queue, project, marker=None, limit=10, echo=False,
|
||||
client_uuid=None, include_claimed=False):
|
||||
target = self._lookup(queue, project)
|
||||
if target:
|
||||
control = target.message_controller
|
||||
return control.list(queue, project=project,
|
||||
marker=marker, limit=limit,
|
||||
echo=echo, client_uuid=client_uuid,
|
||||
include_claimed=include_claimed)
|
||||
return iter([[]])
|
||||
|
||||
def get(self, queue, message_id, project):
|
||||
target = self._lookup(queue, project)
|
||||
if target:
|
||||
control = target.message_controller
|
||||
return control.get(queue, message_id=message_id,
|
||||
project=project)
|
||||
raise errors.QueueDoesNotExist(project, queue)
|
||||
|
||||
|
||||
class ClaimController(RoutingController):
|
||||
_resource_name = 'claim'
|
||||
|
||||
def __init__(self, shard_catalog):
|
||||
super(ClaimController, self).__init__(shard_catalog)
|
||||
self._lookup = self._shard_catalog.lookup
|
||||
|
||||
def create(self, queue, metadata, project=None, limit=None):
|
||||
target = self._lookup(queue, project)
|
||||
if target:
|
||||
control = target.claim_controller
|
||||
return control.create(queue, metadata=metadata,
|
||||
project=project, limit=limit)
|
||||
return [None, []]
|
||||
|
||||
def get(self, queue, claim_id, project):
|
||||
target = self._lookup(queue, project)
|
||||
if target:
|
||||
control = target.claim_controller
|
||||
return control.get(queue, claim_id=claim_id,
|
||||
project=project)
|
||||
raise errors.ClaimDoesNotExist(claim_id, queue, project)
|
||||
|
||||
def update(self, queue, claim_id, metadata, project):
|
||||
target = self._lookup(queue, project)
|
||||
if target:
|
||||
control = target.claim_controller
|
||||
return control.update(queue, claim_id=claim_id,
|
||||
project=project, metadata=metadata)
|
||||
raise errors.ClaimDoesNotExist(claim_id, queue, project)
|
||||
|
||||
def delete(self, queue, claim_id, project):
|
||||
target = self._lookup(queue, project)
|
||||
if target:
|
||||
control = target.claim_controller
|
||||
return control.delete(queue, claim_id=claim_id,
|
||||
project=project)
|
||||
return None
|
||||
|
||||
|
||||
class Catalog(object):
|
||||
"""Represents the mapping between queues and shard drivers."""
|
||||
|
||||
def __init__(self, conf):
|
||||
self._shards = {}
|
||||
def __init__(self, conf, control):
|
||||
self._drivers = {}
|
||||
self._conf = conf
|
||||
|
||||
self._conf.register_opts(_CATALOG_OPTIONS, group=_CATALOG_GROUP)
|
||||
self._catalog_conf = self._conf[_CATALOG_GROUP]
|
||||
self._shards_ctrl = control.shards_controller
|
||||
self._catalogue_ctrl = control.catalogue_controller
|
||||
|
||||
def _init_shard(self, shard_id):
|
||||
# TODO(kgriffs): SHARDING - Read options from catalog backend
|
||||
# FIXME(cpp-cabrera): https://bugs.launchpad.net/marconi/+bug/1252791
|
||||
def _init_driver(self, shard_id):
|
||||
"""Given a shard name, returns a storage driver.
|
||||
|
||||
:param shard_id: The name of a shard.
|
||||
:type shard_id: six.text_type
|
||||
:returns: a storage driver
|
||||
:rtype: marconi.queues.storage.base.DataDriver
|
||||
"""
|
||||
shard = self._shards_ctrl.get(shard_id, detailed=True)
|
||||
|
||||
# NOTE(cpp-cabrera): make it *very* clear to data storage
|
||||
# drivers that we are operating in sharding mode.
|
||||
general_dict_opts = {'dynamic': True}
|
||||
general_opts = common_utils.dict_to_conf(general_dict_opts)
|
||||
|
||||
# NOTE(cpp-cabrera): parse general opts: 'queues:drivers'
|
||||
uri = shard['uri']
|
||||
storage_type = six.moves.urllib_parse.urlparse(uri).scheme
|
||||
driver_dict_opts = {'storage': storage_type}
|
||||
driver_opts = common_utils.dict_to_conf(driver_dict_opts)
|
||||
|
||||
# NOTE(cpp-cabrera): parse storage-specific opts:
|
||||
# 'queues:drivers:storage:{type}'
|
||||
storage_dict_opts = shard['options']
|
||||
storage_dict_opts['uri'] = shard['uri']
|
||||
storage_opts = common_utils.dict_to_conf(storage_dict_opts)
|
||||
storage_group = u'queues:drivers:storage:%s' % storage_type
|
||||
|
||||
# NOTE(cpp-cabrera): register those options!
|
||||
conf = cfg.ConfigOpts()
|
||||
|
||||
options = [
|
||||
cfg.StrOpt('storage', default='sqlite'),
|
||||
]
|
||||
|
||||
conf.register_opts(options, group='queues:drivers')
|
||||
conf.register_opts(general_opts)
|
||||
conf.register_opts(driver_opts, group=u'queues:drivers')
|
||||
conf.register_opts(storage_opts, group=storage_group)
|
||||
return utils.load_storage_driver(conf)
|
||||
|
||||
def register(self, queue, project=None):
|
||||
@ -164,22 +332,34 @@ class Catalog(object):
|
||||
queue's assigned backend shard.
|
||||
|
||||
:param queue: Name of the new queue to assign to a shard
|
||||
:type queue: six.text_type
|
||||
:param project: Project to which the queue belongs, or
|
||||
None for the "global" or "generic" project.
|
||||
:type project: six.text_type
|
||||
:raises: NoShardFound
|
||||
"""
|
||||
|
||||
# TODO(kgriffs): SHARDING - Implement this!
|
||||
pass
|
||||
if not self._catalogue_ctrl.exists(project, queue):
|
||||
# NOTE(cpp-cabrera): limit=0 implies unlimited - select from
|
||||
# all shards
|
||||
shard = select.weighted(self._shards_ctrl.list(limit=0))
|
||||
if not shard:
|
||||
raise errors.NoShardFound()
|
||||
self._catalogue_ctrl.insert(project, queue, shard['name'])
|
||||
|
||||
def deregister(self, queue, project=None):
|
||||
"""Removes a queue from the shard catalog.
|
||||
|
||||
Call this method after successfully deleting it from a
|
||||
backend shard.
|
||||
"""
|
||||
|
||||
# TODO(kgriffs): SHARDING - Implement this!
|
||||
pass
|
||||
:param queue: Name of the new queue to assign to a shard
|
||||
:type queue: six.text_type
|
||||
:param project: Project to which the queue belongs, or
|
||||
None for the "global" or "generic" project.
|
||||
:type project: six.text_type
|
||||
"""
|
||||
# TODO(cpp-cabrera): invalidate cache here
|
||||
self._catalogue_ctrl.delete(project, queue)
|
||||
|
||||
def lookup(self, queue, project=None):
|
||||
"""Lookup a shard driver for the given queue and project.
|
||||
@ -190,16 +370,19 @@ class Catalog(object):
|
||||
|
||||
:returns: A storage driver instance for the appropriate shard. If
|
||||
the driver does not exist yet, it is created and cached.
|
||||
:rtype: Maybe DataDriver
|
||||
"""
|
||||
|
||||
# TODO(kgriffs): SHARDING - Raise an exception if the queue
|
||||
# does not have a mapping (it does not exist).
|
||||
|
||||
# TODO(kgriffs): SHARDING - Get ID from the catalog backend
|
||||
shard_id = '[insert_id]'
|
||||
# TODO(cpp-cabrera): add caching lookup here
|
||||
try:
|
||||
shard = self._shards[shard_id]
|
||||
except KeyError:
|
||||
self._shards[shard_id] = shard = self._init_shard(shard_id)
|
||||
shard_id = self._catalogue_ctrl.get(project, queue)['shard']
|
||||
except errors.QueueNotMapped:
|
||||
return None
|
||||
|
||||
return shard
|
||||
# NOTE(cpp-cabrera): cache storage driver connection
|
||||
try:
|
||||
driver = self._drivers[shard_id]
|
||||
except KeyError:
|
||||
self._drivers[shard_id] = driver = self._init_driver(shard_id)
|
||||
|
||||
return driver
|
||||
|
0
tests/unit/common/storage/__init__.py
Normal file
0
tests/unit/common/storage/__init__.py
Normal file
66
tests/unit/common/storage/test_select.py
Normal file
66
tests/unit/common/storage/test_select.py
Normal file
@ -0,0 +1,66 @@
|
||||
# Copyright (c) 2013 Rackspace, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import testtools
|
||||
|
||||
from marconi.common.storage import select
|
||||
|
||||
|
||||
class TestSelect(testtools.TestCase):
|
||||
|
||||
def test_weighted_returns_none_if_no_objs(self):
|
||||
self.assertIsNone(select.weighted([]))
|
||||
|
||||
def test_weighted_returns_none_if_objs_have_zero_weight(self):
|
||||
objs = [{'weight': 0, 'name': str(i)} for i in range(2)]
|
||||
self.assertIsNone(select.weighted(objs))
|
||||
|
||||
def test_weighted_ignores_zero_weight_objs(self):
|
||||
objs = [{'weight': 0, 'name': str(i)} for i in range(2)]
|
||||
expect = {'weight': 1, 'name': 'theone'}
|
||||
objs.append(expect)
|
||||
self.assertEqual(select.weighted(objs), expect)
|
||||
|
||||
def test_weighted_returns_an_object_it_was_given(self):
|
||||
objs = [{'weight': 10, 'name': str(i)} for i in range(10)]
|
||||
ret = select.weighted(objs)
|
||||
self.assertIn(ret, objs)
|
||||
|
||||
def test_weighted_returns_none_if_selector_oob(self):
|
||||
objs = [{'weight': 10, 'name': str(i)} for i in range(10)]
|
||||
sum_weights = sum([o['weight'] for o in objs])
|
||||
capped_gen = lambda x, y: sum_weights
|
||||
self.assertIsNone(select.weighted(objs,
|
||||
generator=capped_gen))
|
||||
|
||||
def test_weighted_returns_first_if_selector_is_zero(self):
|
||||
objs = [{'weight': 10, 'name': str(i)} for i in range(10)]
|
||||
zero_gen = lambda x, y: 0
|
||||
self.assertEqual(select.weighted(objs, generator=zero_gen),
|
||||
objs[0])
|
||||
|
||||
def test_weighted_returns_last_if_selector_is_sum_minus_one(self):
|
||||
objs = [{'weight': 10, 'name': str(i)} for i in range(10)]
|
||||
sum_weights = sum([o['weight'] for o in objs])
|
||||
capped_gen = lambda x, y: sum_weights - 1
|
||||
self.assertEqual(select.weighted(objs, generator=capped_gen),
|
||||
objs[-1])
|
||||
|
||||
def test_weighted_boundaries(self):
|
||||
objs = [{'weight': 1, 'name': str(i)} for i in range(3)]
|
||||
for i in range(len(objs)):
|
||||
fixed_gen = lambda x, y: i
|
||||
self.assertEqual(select.weighted(objs, generator=fixed_gen),
|
||||
objs[i])
|
@ -14,30 +14,57 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import uuid
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from marconi.queues.storage import sharding
|
||||
from marconi.queues.storage import sqlite
|
||||
from marconi.tests import base
|
||||
from marconi.queues.storage import utils
|
||||
from marconi import tests as testing
|
||||
|
||||
|
||||
class TestShardCatalog(base.TestBase):
|
||||
# TODO(cpp-cabrera): it would be wonderful to refactor this unit test
|
||||
# so that it could use multiple control storage backends once those
|
||||
# have shards/catalogue implementations.
|
||||
class TestShardCatalog(testing.TestBase):
|
||||
|
||||
def test_lookup(self):
|
||||
# TODO(kgriffs): SHARDING - configure sharding to use an in-memory
|
||||
# backend store, and register the queues we are going to look up.
|
||||
conf_file = 'etc/wsgi_sqlite_sharded.conf'
|
||||
@testing.requires_mongodb
|
||||
def setUp(self):
|
||||
super(TestShardCatalog, self).setUp()
|
||||
conf = self.load_conf('wsgi_mongodb_sharded.conf')
|
||||
|
||||
conf = cfg.ConfigOpts()
|
||||
conf(args=[], default_config_files=[conf_file])
|
||||
conf.register_opts([cfg.StrOpt('storage')],
|
||||
group='queues:drivers')
|
||||
control = utils.load_storage_driver(conf, control_mode=True)
|
||||
self.catalogue_ctrl = control.catalogue_controller
|
||||
self.shards_ctrl = control.shards_controller
|
||||
|
||||
lookup = sharding.Catalog(conf).lookup
|
||||
# NOTE(cpp-cabrera): populate catalogue
|
||||
self.shard = str(uuid.uuid1())
|
||||
self.queue = str(uuid.uuid1())
|
||||
self.project = str(uuid.uuid1())
|
||||
self.shards_ctrl.create(self.shard, 100, 'sqlite://memory')
|
||||
self.catalogue_ctrl.insert(self.project, self.queue, self.shard)
|
||||
self.catalog = sharding.Catalog(conf, control)
|
||||
|
||||
storage = lookup('q1', '123456')
|
||||
def tearDown(self):
|
||||
self.catalogue_ctrl.drop_all()
|
||||
self.shards_ctrl.drop_all()
|
||||
super(TestShardCatalog, self).tearDown()
|
||||
|
||||
def test_lookup_loads_correct_driver(self):
|
||||
storage = self.catalog.lookup(self.queue, self.project)
|
||||
self.assertIsInstance(storage, sqlite.DataDriver)
|
||||
|
||||
storage = lookup('q2', '123456')
|
||||
self.assertIsInstance(storage, sqlite.DataDriver)
|
||||
def test_lookup_returns_none_if_queue_not_mapped(self):
|
||||
self.assertIsNone(self.catalog.lookup('not', 'mapped'))
|
||||
|
||||
storage = lookup('g1', None)
|
||||
def test_lookup_returns_none_if_entry_deregistered(self):
|
||||
self.catalog.deregister(self.queue, self.project)
|
||||
self.assertIsNone(self.catalog.lookup(self.queue, self.project))
|
||||
|
||||
def test_register_leads_to_successful_lookup(self):
|
||||
self.catalog.register('not_yet', 'mapped')
|
||||
storage = self.catalog.lookup('not_yet', 'mapped')
|
||||
self.assertIsInstance(storage, sqlite.DataDriver)
|
||||
|
@ -31,17 +31,24 @@ class TestBase(testing.TestBase):
|
||||
|
||||
super(TestBase, self).setUp()
|
||||
|
||||
conf = self.load_conf(self.conf_path(self.config_filename))
|
||||
conf.register_opts(driver._WSGI_OPTIONS,
|
||||
group=driver._WSGI_GROUP)
|
||||
self.wsgi_cfg = conf[driver._WSGI_GROUP]
|
||||
self.conf = self.load_conf(self.conf_path(self.config_filename))
|
||||
self.conf.register_opts(driver._WSGI_OPTIONS,
|
||||
group=driver._WSGI_GROUP)
|
||||
|
||||
conf.admin_mode = True
|
||||
self.boot = bootstrap.Bootstrap(conf)
|
||||
self.wsgi_cfg = self.conf[driver._WSGI_GROUP]
|
||||
|
||||
self.conf.admin_mode = True
|
||||
self.boot = bootstrap.Bootstrap(self.conf)
|
||||
self.app = self.boot.transport.app
|
||||
|
||||
self.srmock = ftest.StartResponseMock()
|
||||
|
||||
def tearDown(self):
|
||||
if self.conf.sharding:
|
||||
self.boot.control.shards_controller.drop_all()
|
||||
self.boot.control.catalogue_controller.drop_all()
|
||||
super(TestBase, self).tearDown()
|
||||
|
||||
def simulate_request(self, path, project_id=None, **kwargs):
|
||||
"""Simulate a request.
|
||||
|
||||
|
@ -33,6 +33,14 @@ class MessagesBaseTest(base.TestBase):
|
||||
def setUp(self):
|
||||
super(MessagesBaseTest, self).setUp()
|
||||
|
||||
if self.conf.sharding:
|
||||
for i in range(4):
|
||||
uri = self.conf['queues:drivers:storage:mongodb'].uri
|
||||
doc = {'weight': 100, 'uri': uri}
|
||||
self.simulate_put('/v1/shards/' + str(i),
|
||||
body=json.dumps(doc))
|
||||
self.assertEqual(self.srmock.status, falcon.HTTP_201)
|
||||
|
||||
self.project_id = '7e55e1a7e'
|
||||
self.queue_path = '/v1/queues/fizbit'
|
||||
self.messages_path = self.queue_path + '/messages'
|
||||
@ -46,6 +54,10 @@ class MessagesBaseTest(base.TestBase):
|
||||
|
||||
def tearDown(self):
|
||||
self.simulate_delete(self.queue_path, self.project_id)
|
||||
if self.conf.sharding:
|
||||
for i in range(4):
|
||||
self.simulate_delete('/v1/shards/' + str(i))
|
||||
|
||||
super(MessagesBaseTest, self).tearDown()
|
||||
|
||||
def _test_post(self, sample_messages):
|
||||
@ -425,9 +437,8 @@ class MessagesSQLiteTests(MessagesBaseTest):
|
||||
config_filename = 'wsgi_sqlite.conf'
|
||||
|
||||
|
||||
class MessagesSQLiteShardedTests(MessagesBaseTest):
|
||||
|
||||
config_filename = 'wsgi_sqlite_sharded.conf'
|
||||
# TODO(cpp-cabrera): restore sqlite sharded test suite once shards and
|
||||
# catalogue get an sqlite implementation.
|
||||
|
||||
|
||||
@testing.requires_mongodb
|
||||
@ -438,6 +449,9 @@ class MessagesMongoDBTests(MessagesBaseTest):
|
||||
def setUp(self):
|
||||
super(MessagesMongoDBTests, self).setUp()
|
||||
|
||||
def tearDown(self):
|
||||
super(MessagesMongoDBTests, self).tearDown()
|
||||
|
||||
|
||||
@testing.requires_mongodb
|
||||
class MessagesMongoDBShardedTests(MessagesBaseTest):
|
||||
@ -447,6 +461,14 @@ class MessagesMongoDBShardedTests(MessagesBaseTest):
|
||||
def setUp(self):
|
||||
super(MessagesMongoDBShardedTests, self).setUp()
|
||||
|
||||
def tearDown(self):
|
||||
super(MessagesMongoDBShardedTests, self).tearDown()
|
||||
|
||||
# TODO(cpp-cabrera): remove this skipTest once sharded queue
|
||||
# listing is implemented
|
||||
def test_list(self):
|
||||
self.skipTest("Need to implement sharded queue listing.")
|
||||
|
||||
|
||||
class MessagesFaultyDriverTests(base.TestBaseFaulty):
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user