Added some de-facto style guidelines to HACKING and fixed violations

This patch adds several guidelines:

* Global constants should be ALL_CAPS (cfg => CFG)
* Prefer single-quotes over double-quotes ("foo" => 'foo')
* Place a space before TODO in comments ("#TODO" => "# TODO")

Change-Id: Ib5b5c5916744856eca2ecaa37e949a3cdc4b3bd7
This commit is contained in:
kgriffs 2013-06-14 14:38:26 -04:00
parent 38370ee30d
commit baf3d2e372
39 changed files with 561 additions and 539 deletions

View File

@ -12,9 +12,27 @@ General
- Put two newlines between top-level code (funcs, classes, etc.)
- Put one newline between methods in classes and anywhere else.
- Use blank lines to group related logic.
- Do not write "except:", use "except Exception:" at the very least.
- Include your name with TODOs as in "#TODO(termie)".
- All classes must inherit from "object" (explicitly).
- Never write ``except:`` (use ``except Exception:`` instead, at
the very least).
- All classes must inherit from ``object`` (explicitly).
- Use single-quotes for strings unless the string contains a
single-quote.
- Use the double-quote character for blockquotes (``"""``, not ``'''``)
- USE_ALL_CAPS_FOR_GLOBAL_CONSTANTS
Comments
--------
- In general use comments as "memory pegs" for those coming after you up
the trail.
- Guide the reader though long functions with a comments introducing
different sections of the code.
- Choose clean, descriptive names for functions and variables to make
them self-documenting.
- Include your name with TODOs as in ``# TODO(termie): blah blah...``.
- Add ``# NOTE(termie): blah blah...`` comments to clarify your intent, or
to explain a tricky algorithm, when it isn't obvious from just reading
the code.
Identifiers
-----------

View File

@ -15,13 +15,13 @@
# Import guard. No module level import during the setup procedure.
try:
if __MARCONI_SETUP__:
if __MARCONI_SETUP__: # NOQA
import sys as _sys
_sys.stderr.write('Running from marconi source directory.\n')
del _sys
except NameError:
import gettext
gettext.install("marconi", unicode=1)
gettext.install('marconi', unicode=1)
import marconi.bootstrap
Bootstrap = marconi.bootstrap.Bootstrap

View File

@ -19,11 +19,11 @@ from marconi.common import config
from marconi.common import decorators
from marconi.common import exceptions
from marconi.openstack.common import log
from marconi import transport # NOQA.
from marconi import transport # NOQA
cfg_handle = config.project('marconi')
cfg = config.namespace('drivers').from_options(
PROJECT_CFG = config.project('marconi')
CFG = config.namespace('drivers').from_options(
transport='wsgi',
storage='sqlite')
@ -38,15 +38,15 @@ class Bootstrap(object):
"""
def __init__(self, config_file=None, cli_args=None):
cfg_handle.load(filename=config_file, args=cli_args)
log.setup("marconi")
PROJECT_CFG.load(filename=config_file, args=cli_args)
log.setup('marconi')
@decorators.lazy_property(write=False)
def storage(self):
LOG.debug(_("Loading Storage Driver"))
LOG.debug(_('Loading Storage Driver'))
try:
mgr = driver.DriverManager('marconi.storage',
cfg.storage,
CFG.storage,
invoke_on_load=True)
return mgr.driver
except RuntimeError as exc:
@ -54,10 +54,10 @@ class Bootstrap(object):
@decorators.lazy_property(write=False)
def transport(self):
LOG.debug(_("Loading Transport Driver"))
LOG.debug(_('Loading Transport Driver'))
try:
mgr = driver.DriverManager('marconi.transport',
cfg.transport,
CFG.transport,
invoke_on_load=True,
invoke_args=[self.storage])
return mgr.driver

View File

@ -19,7 +19,7 @@ from marconi import bootstrap
def fail(returncode, e):
sys.stderr.write("ERROR: %s\n" % e)
sys.stderr.write('ERROR: %s\n' % e)
sys.exit(returncode)

View File

@ -22,7 +22,7 @@ A config variable `foo` is a read-only property accessible through
, where `CFG` is either a global configuration accessible through
CFG = config.project('marconi').from_options(
foo=("bar", "usage"),
foo=('bar', 'usage'),
...)
, or a local configuration associated with a namespace
@ -44,7 +44,7 @@ sections named by their associated namespaces.
To load the configurations from a file:
PROJECT_CFG = config.project('marconi')
PROJECT_CFG.load(filename="/path/to/example.conf")
PROJECT_CFG.load(filename='/path/to/example.conf')
A call to `.load` without a filename looks up for the default ones:
@ -54,7 +54,7 @@ A call to `.load` without a filename looks up for the default ones:
Global config variables, if any, can also be read from the command line
arguments:
PROJECT_CFG.load(filename="example.conf", args=sys.argv[1:])
PROJECT_CFG.load(filename='example.conf', args=sys.argv[1:])
"""
from oslo.config import cfg
@ -197,4 +197,4 @@ def _make_opt(name, default):
try:
return deduction[type(default)](name, help=help, default=default)
except KeyError:
raise cfg.Error("unrecognized option type")
raise cfg.Error('unrecognized option type')

View File

@ -120,7 +120,7 @@ class QueueBase(ControllerBase):
:returns: True if a queue was created and False
if it was updated.
"""
msg = _("Metadata should be an instance of dict")
msg = _('Metadata should be an instance of dict')
assert isinstance(metadata, dict), msg
@abc.abstractmethod

View File

@ -47,9 +47,9 @@ class MessageConflict(Conflict):
posted. Note that these must be in the same order as the
list of messages originally submitted to be enqueued.
"""
msg = (_("Message could not be enqueued due to a conflict "
"with another message that is already in "
"queue %(queue)s for project %(project)s") %
msg = (_('Message could not be enqueued due to a conflict '
'with another message that is already in '
'queue %(queue)s for project %(project)s') %
dict(queue=queue, project=project))
super(MessageConflict, self).__init__(msg)
@ -64,7 +64,7 @@ class MessageConflict(Conflict):
class QueueDoesNotExist(DoesNotExist):
def __init__(self, name, project):
msg = (_("Queue %(name)s does not exist for project %(project)s") %
msg = (_('Queue %(name)s does not exist for project %(project)s') %
dict(name=name, project=project))
super(QueueDoesNotExist, self).__init__(msg)
@ -72,8 +72,8 @@ class QueueDoesNotExist(DoesNotExist):
class MessageDoesNotExist(DoesNotExist):
def __init__(self, mid, queue, project):
msg = (_("Message %(mid)s does not exist in "
"queue %(queue)s for project %(project)s") %
msg = (_('Message %(mid)s does not exist in '
'queue %(queue)s for project %(project)s') %
dict(mid=mid, queue=queue, project=project))
super(MessageDoesNotExist, self).__init__(msg)
@ -81,8 +81,8 @@ class MessageDoesNotExist(DoesNotExist):
class ClaimDoesNotExist(DoesNotExist):
def __init__(self, cid, queue, project):
msg = (_("Claim %(cid)s does not exist in "
"queue %(queue)s for project %(project)s") %
msg = (_('Claim %(cid)s does not exist in '
'queue %(queue)s for project %(project)s') %
dict(cid=cid, queue=queue, project=project))
super(ClaimDoesNotExist, self).__init__(msg)
@ -90,6 +90,6 @@ class ClaimDoesNotExist(DoesNotExist):
class ClaimNotPermitted(NotPermitted):
def __init__(self, mid, cid):
msg = (_("Message %(mid)s is not claimed by %(cid)s") %
msg = (_('Message %(mid)s is not claimed by %(cid)s') %
dict(cid=cid, mid=mid))
super(ClaimNotPermitted, self).__init__(msg)

View File

@ -56,20 +56,20 @@ class QueueController(storage.QueueBase):
def __init__(self, *args, **kwargs):
super(QueueController, self).__init__(*args, **kwargs)
self._col = self.driver.db["queues"]
self._col = self.driver.db['queues']
# NOTE(flaper87): This creates a unique compound index for
# project and name. Using project as the first field of the
# index allows for querying by project and project+name.
# This is also useful for retrieving the queues list for
# as specific project, for example. Order Matters!
self._col.ensure_index([("p", 1), ("n", 1)], unique=True)
self._col.ensure_index([('p', 1), ('n', 1)], unique=True)
#-----------------------------------------------------------------------
# Helpers
#-----------------------------------------------------------------------
def _get(self, name, project=None, fields={"m": 1, "_id": 0}):
queue = self._col.find_one({"p": project, "n": name}, fields=fields)
def _get(self, name, project=None, fields={'m': 1, '_id': 0}):
queue = self._col.find_one({'p': project, 'n': name}, fields=fields)
if queue is None:
raise exceptions.QueueDoesNotExist(name, project)
@ -80,13 +80,13 @@ class QueueController(storage.QueueBase):
:returns: Queue's `ObjectId`
"""
queue = self._get(name, project, fields=["_id"])
return queue.get("_id")
queue = self._get(name, project, fields=['_id'])
return queue.get('_id')
def _get_ids(self):
"""Returns a generator producing a list of all queue IDs."""
cursor = self._col.find({}, fields={"_id": 1})
return (doc["_id"] for doc in cursor)
cursor = self._col.find({}, fields={'_id': 1})
return (doc['_id'] for doc in cursor)
#-----------------------------------------------------------------------
# Interface
@ -94,47 +94,47 @@ class QueueController(storage.QueueBase):
def list(self, project=None, marker=None,
limit=10, detailed=False):
query = {"p": project}
query = {'p': project}
if marker:
query["n"] = {"$gt": marker}
query['n'] = {'$gt': marker}
fields = {"n": 1, "_id": 0}
fields = {'n': 1, '_id': 0}
if detailed:
fields["m"] = 1
fields['m'] = 1
cursor = self._col.find(query, fields=fields)
cursor = cursor.limit(limit).sort("n")
cursor = cursor.limit(limit).sort('n')
marker_name = {}
def normalizer(records):
for rec in records:
queue = {"name": rec["n"]}
marker_name["next"] = queue["name"]
queue = {'name': rec['n']}
marker_name['next'] = queue['name']
if detailed:
queue["metadata"] = rec["m"]
queue['metadata'] = rec['m']
yield queue
yield normalizer(cursor)
yield marker_name["next"]
yield marker_name['next']
def get(self, name, project=None):
queue = self._get(name, project)
return queue.get("m", {})
return queue.get('m', {})
def upsert(self, name, metadata, project=None):
super(QueueController, self).upsert(name, metadata, project)
rst = self._col.update({"p": project, "n": name},
{"$set": {"m": metadata, "c": 1}},
rst = self._col.update({'p': project, 'n': name},
{'$set': {'m': metadata, 'c': 1}},
multi=False,
upsert=True,
manipulate=False)
return not rst["updatedExisting"]
return not rst['updatedExisting']
def delete(self, name, project=None):
self.driver.message_controller._purge_queue(name, project)
self._col.remove({"p": project, "n": name})
self._col.remove({'p': project, 'n': name})
def stats(self, name, project=None):
queue_id = self._get_id(name, project)
@ -143,10 +143,10 @@ class QueueController(storage.QueueBase):
claimed = controller.claimed(queue_id)
return {
"actions": 0,
"messages": {
"claimed": claimed.count(),
"free": active.count(),
'actions': 0,
'messages': {
'claimed': claimed.count(),
'free': active.count(),
}
}
@ -179,7 +179,7 @@ class MessageController(storage.MessageBase):
# Make sure indexes exist before,
# doing anything.
self._col = self._db["messages"]
self._col = self._db['messages']
# NOTE(flaper87): This index is used mostly in the
# active method but some parts of it are used in
@ -189,27 +189,27 @@ class MessageController(storage.MessageBase):
# * e: Together with q is used for getting a
# specific message. (see `get`)
active_fields = [
("q", 1),
("e", 1),
("c.e", 1),
("k", 1),
("_id", -1),
('q', 1),
('e', 1),
('c.e', 1),
('k', 1),
('_id', -1),
]
self._col.ensure_index(active_fields,
name="active",
name='active',
background=True)
# Index used for claims
claimed_fields = [
("q", 1),
("c.id", 1),
("c.e", 1),
("_id", -1),
('q', 1),
('c.id', 1),
('c.e', 1),
('_id', -1),
]
self._col.ensure_index(claimed_fields,
name="claimed",
name='claimed',
background=True)
# Index used for _next_marker() and also to ensure
@ -222,8 +222,8 @@ class MessageController(storage.MessageBase):
# to miss a message when there is more than one
# producer posting messages to the same queue, in
# parallel.
self._col.ensure_index([("q", 1), ("k", -1)],
name="queue_marker",
self._col.ensure_index([('q', 1), ('k', -1)],
name='queue_marker',
unique=True,
background=True)
@ -260,12 +260,12 @@ class MessageController(storage.MessageBase):
:returns: next message marker as an integer
"""
document = self._col.find_one({"q": queue_id},
sort=[("k", -1)],
fields={"k": 1, "_id": 0})
document = self._col.find_one({'q': queue_id},
sort=[('k', -1)],
fields={'k': 1, '_id': 0})
# NOTE(kgriffs): this approach is faster than using "or"
return 1 if document is None else (document["k"] + 1)
# NOTE(kgriffs): this approach is faster than using 'or'
return 1 if document is None else (document['k'] + 1)
def _backoff_sleep(self, attempt):
"""Sleep between retries using a jitter algorithm.
@ -289,8 +289,8 @@ class MessageController(storage.MessageBase):
"""
query = {
"q": queue_id,
"e": {"$lte": timeutils.utcnow()},
'q': queue_id,
'e': {'$lte': timeutils.utcnow()},
}
return self._col.find(query).count()
@ -313,19 +313,19 @@ class MessageController(storage.MessageBase):
if options.CFG.gc_threshold <= self._count_expired(queue_id):
# Get the message with the highest marker, and leave
# it in the queue
head = self._col.find_one({"q": queue_id},
sort=[("k", -1)],
fields={"_id": 1})
head = self._col.find_one({'q': queue_id},
sort=[('k', -1)],
fields={'_id': 1})
if head is None:
# Assume queue was just deleted via a parallel request
LOG.warning(_("Queue %s is empty or missing.") % queue_id)
LOG.warning(_('Queue %s is empty or missing.') % queue_id)
return
query = {
"q": queue_id,
"e": {"$lte": timeutils.utcnow()},
"_id": {"$ne": head["_id"]}
'q': queue_id,
'e': {'$lte': timeutils.utcnow()},
'_id': {'$ne': head['_id']}
}
self._col.remove(query)
@ -344,7 +344,7 @@ class MessageController(storage.MessageBase):
"""
try:
qid = self._get_queue_id(queue, project)
self._col.remove({"q": qid}, w=0)
self._col.remove({'q': qid}, w=0)
except exceptions.QueueDoesNotExist:
pass
@ -362,36 +362,36 @@ class MessageController(storage.MessageBase):
query = {
# Messages must belong to this queue
"q": utils.to_oid(queue_id),
'q': utils.to_oid(queue_id),
# The messages can not be expired
"e": {"$gt": now},
'e': {'$gt': now},
# Include messages that are part of expired claims
"c.e": {"$lte": now},
'c.e': {'$lte': now},
}
if fields and not isinstance(fields, (dict, list)):
raise TypeError(_("Fields must be an instance of list / dict"))
raise TypeError(_('Fields must be an instance of list / dict'))
if not echo and client_uuid:
query["u"] = {"$ne": client_uuid}
query['u'] = {'$ne': client_uuid}
if marker:
query["k"] = {"$gt": marker}
query['k'] = {'$gt': marker}
return self._col.find(query, fields=fields)
def claimed(self, queue_id, claim_id=None, expires=None, limit=None):
query = {
"c.id": claim_id,
"c.e": {"$gt": expires or timeutils.utcnow()},
"q": utils.to_oid(queue_id),
'c.id': claim_id,
'c.e': {'$gt': expires or timeutils.utcnow()},
'q': utils.to_oid(queue_id),
}
if not claim_id:
# lookup over c.id to use the index
query["c.id"] = {"$ne": None}
query['c.id'] = {'$ne': None}
msgs = self._col.find(query, sort=[("_id", 1)])
msgs = self._col.find(query, sort=[('_id', 1)])
if limit:
msgs = msgs.limit(limit)
@ -399,15 +399,15 @@ class MessageController(storage.MessageBase):
now = timeutils.utcnow()
def denormalizer(msg):
oid = msg["_id"]
oid = msg['_id']
age = now - utils.oid_utc(oid)
return {
"id": str(oid),
"age": age.seconds,
"ttl": msg["t"],
"body": msg["b"],
"claim": msg["c"]
'id': str(oid),
'age': age.seconds,
'ttl': msg['t'],
'body': msg['b'],
'claim': msg['c']
}
return utils.HookedCursor(msgs, denormalizer)
@ -418,8 +418,8 @@ class MessageController(storage.MessageBase):
except ValueError:
return
self._col.update({"c.id": cid},
{"$set": {"c": {"id": None, "e": 0}}},
self._col.update({'c.id': cid},
{'$set': {'c': {'id': None, 'e': 0}}},
upsert=False, multi=True)
def remove_expired(self, project=None):
@ -443,7 +443,7 @@ class MessageController(storage.MessageBase):
"""
# TODO(kgriffs): Optimize first by batching the .removes, second
# by setting a "last inserted ID" in the queue collection for
# by setting a 'last inserted ID' in the queue collection for
# each message inserted (TBD, may cause problematic side-effect),
# and third, by changing the marker algorithm such that it no
# longer depends on retaining the last message in the queue!
@ -462,21 +462,21 @@ class MessageController(storage.MessageBase):
qid = self._get_queue_id(queue, project)
messages = self.active(qid, marker, echo, client_uuid)
messages = messages.limit(limit).sort("_id")
messages = messages.limit(limit).sort('_id')
marker_id = {}
now = timeutils.utcnow()
def denormalizer(msg):
oid = msg["_id"]
oid = msg['_id']
age = now - utils.oid_utc(oid)
marker_id['next'] = msg["k"]
marker_id['next'] = msg['k']
return {
"id": str(oid),
"age": age.seconds,
"ttl": msg["t"],
"body": msg["b"],
'id': str(oid),
'age': age.seconds,
'ttl': msg['t'],
'body': msg['b'],
}
yield utils.HookedCursor(messages, denormalizer)
@ -488,9 +488,9 @@ class MessageController(storage.MessageBase):
# Base query, always check expire time
query = {
"q": self._get_queue_id(queue, project),
"e": {"$gt": now},
"_id": mid
'q': self._get_queue_id(queue, project),
'e': {'$gt': now},
'_id': mid
}
message = self._col.find_one(query)
@ -498,14 +498,14 @@ class MessageController(storage.MessageBase):
if message is None:
raise exceptions.MessageDoesNotExist(message_id, queue, project)
oid = message["_id"]
oid = message['_id']
age = now - utils.oid_utc(oid)
return {
"id": str(oid),
"age": age.seconds,
"ttl": message["t"],
"body": message["b"],
'id': str(oid),
'age': age.seconds,
'ttl': message['t'],
'body': message['b'],
}
def post(self, queue, messages, client_uuid, project=None):
@ -527,13 +527,13 @@ class MessageController(storage.MessageBase):
# cached in case we need to retry any of them.
message_gen = (
{
"t": message["ttl"],
"q": queue_id,
"e": now + datetime.timedelta(seconds=message["ttl"]),
"u": client_uuid,
"c": {"id": None, "e": now},
"b": message["body"] if "body" in message else {},
"k": next_marker + index,
't': message['ttl'],
'q': queue_id,
'e': now + datetime.timedelta(seconds=message['ttl']),
'u': client_uuid,
'c': {'id': None, 'e': now},
'b': message['body'] if 'body' in message else {},
'k': next_marker + index,
}
for index, message in enumerate(messages)
@ -556,9 +556,9 @@ class MessageController(storage.MessageBase):
# Log a message if we retried, for debugging perf issues
if attempt != 0:
message = _("%(attempts)d attempt(s) required to post "
"%(num_messages)d messages to queue "
"%(queue_id)s")
message = _('%(attempts)d attempt(s) required to post '
'%(num_messages)d messages to queue '
'%(queue_id)s')
message %= dict(queue_id=queue_id, attempts=attempt + 1,
num_messages=len(ids))
@ -576,8 +576,8 @@ class MessageController(storage.MessageBase):
#
# TODO(kgriffs): Add transaction ID to help match up loglines
if attempt == 0:
message = _("First attempt failed while adding messages "
"to queue %s for current request") % queue_id
message = _('First attempt failed while adding messages '
'to queue %s for current request') % queue_id
LOG.debug(message)
@ -600,7 +600,7 @@ class MessageController(storage.MessageBase):
# Put the successful one's IDs into aggregated_results.
succeeded_messages = cached_messages[:failed_index]
succeeded_ids = [msg["_id"] for msg in succeeded_messages]
succeeded_ids = [msg['_id'] for msg in succeeded_messages]
# Results are aggregated across all attempts
if aggregated_results is None:
@ -613,7 +613,7 @@ class MessageController(storage.MessageBase):
prepared_messages = cached_messages[failed_index:]
next_marker = self._next_marker(queue_id)
for index, message in enumerate(prepared_messages):
message["k"] = next_marker + index
message['k'] = next_marker + index
# Chill out to avoid thrashing/thundering
self._backoff_sleep(attempt)
@ -627,8 +627,8 @@ class MessageController(storage.MessageBase):
LOG.exception(ex)
raise
message = _("Hit maximum number of attempts (%(max)s) for queue "
"%(id)s in project %(project)s")
message = _('Hit maximum number of attempts (%(max)s) for queue '
'%(id)s in project %(project)s')
message %= dict(max=options.CFG.max_attempts, id=queue_id,
project=project)
@ -642,13 +642,13 @@ class MessageController(storage.MessageBase):
mid = utils.to_oid(message_id)
query = {
"q": self._get_queue_id(queue, project),
"_id": mid
'q': self._get_queue_id(queue, project),
'_id': mid
}
if claim:
now = timeutils.utcnow()
query["e"] = {"$gt": now}
query['e'] = {'$gt': now}
message = self._col.find_one(query)
if message is None:
@ -656,12 +656,12 @@ class MessageController(storage.MessageBase):
cid = utils.to_oid(claim)
if not ("c" in message and
message["c"]["id"] == cid and
message["c"]["e"] > now):
if not ('c' in message and
message['c']['id'] == cid and
message['c']['e'] > now):
raise exceptions.ClaimNotPermitted(message_id, claim)
self._col.remove(query["_id"], w=0)
self._col.remove(query['_id'], w=0)
else:
self._col.remove(query, w=0)
except exceptions.QueueDoesNotExist:
@ -711,12 +711,12 @@ class ClaimController(storage.ClaimBase):
def messages(msg_iter):
msg = msg_iter.next()
yield msg.pop("claim")
yield msg.pop('claim')
yield msg
# Smoke it!
for msg in msg_iter:
del msg["claim"]
del msg['claim']
yield msg
try:
@ -726,9 +726,9 @@ class ClaimController(storage.ClaimBase):
messages = messages(msg_ctrl.claimed(qid, cid, now))
claim = messages.next()
claim = {
"age": age.seconds,
"ttl": claim.pop("t"),
"id": str(claim["id"]),
'age': age.seconds,
'ttl': claim.pop('t'),
'id': str(claim['id']),
}
except StopIteration:
raise exceptions.ClaimDoesNotExist(cid, queue, project)
@ -759,7 +759,7 @@ class ClaimController(storage.ClaimBase):
# we need to verify it exists.
qid = self._get_queue_id(queue, project)
ttl = int(metadata.get("ttl", 60))
ttl = int(metadata.get('ttl', 60))
oid = objectid.ObjectId()
now = timeutils.utcnow()
@ -767,15 +767,15 @@ class ClaimController(storage.ClaimBase):
expires = now + ttl_delta
meta = {
"id": oid,
"t": ttl,
"e": expires,
'id': oid,
't': ttl,
'e': expires,
}
# Get a list of active, not claimed nor expired
# messages that could be claimed.
msgs = msg_ctrl.active(qid, fields={"_id": 1})
msgs = msgs.limit(limit).sort("_id")
msgs = msg_ctrl.active(qid, fields={'_id': 1})
msgs = msgs.limit(limit).sort('_id')
messages = iter([])
@ -784,29 +784,29 @@ class ClaimController(storage.ClaimBase):
if msgs.count(True) == 0:
return (str(oid), messages)
ids = [msg["_id"] for msg in msgs]
ids = [msg['_id'] for msg in msgs]
now = timeutils.utcnow()
# Set claim field for messages in ids
updated = msg_ctrl._col.update({"_id": {"$in": ids},
"$or": [
{"c.id": None},
updated = msg_ctrl._col.update({'_id': {'$in': ids},
'$or': [
{'c.id': None},
{
"c.id": {"$ne": None},
"c.e": {"$lte": now}
'c.id': {'$ne': None},
'c.e': {'$lte': now}
}
]},
{"$set": {"c": meta}}, upsert=False,
multi=True)["n"]
{'$set': {'c': meta}}, upsert=False,
multi=True)['n']
# NOTE(flaper87): Dirty hack!
# This sets the expiration time to
# `expires` on messages that would
# expire before claim.
msg_ctrl._col.update({"q": queue,
"e": {"$lt": expires},
"c.id": oid},
{"$set": {"e": expires, "t": ttl}},
msg_ctrl._col.update({'q': queue,
'e': {'$lt': expires},
'c.id': oid},
{'$set': {'e': expires, 't': ttl}},
upsert=False, multi=True)
if updated != 0:
@ -820,13 +820,13 @@ class ClaimController(storage.ClaimBase):
raise exceptions.ClaimDoesNotExist(claim_id, queue, project)
now = timeutils.utcnow()
ttl = int(metadata.get("ttl", 60))
ttl = int(metadata.get('ttl', 60))
ttl_delta = datetime.timedelta(seconds=ttl)
expires = now + ttl_delta
if now > expires:
msg = _("New ttl will make the claim expires")
msg = _('New ttl will make the claim expires')
raise ValueError(msg)
qid = self._get_queue_id(queue, project)
@ -839,23 +839,23 @@ class ClaimController(storage.ClaimBase):
raise exceptions.ClaimDoesNotExist(claim_id, queue, project)
meta = {
"id": cid,
"t": ttl,
"e": expires,
'id': cid,
't': ttl,
'e': expires,
}
msg_ctrl._col.update({"q": qid, "c.id": cid},
{"$set": {"c": meta}},
msg_ctrl._col.update({'q': qid, 'c.id': cid},
{'$set': {'c': meta}},
upsert=False, multi=True)
# NOTE(flaper87): Dirty hack!
# This sets the expiration time to
# `expires` on messages that would
# expire before claim.
msg_ctrl._col.update({"q": qid,
"e": {"$lt": expires},
"c.id": cid},
{"$set": {"e": expires, "t": ttl}},
msg_ctrl._col.update({'q': qid,
'e': {'$lt': expires},
'c.id': cid},
{'$set': {'e': expires, 't': ttl}},
upsert=False, multi=True)
def delete(self, queue, claim_id, project=None):

View File

@ -46,7 +46,7 @@ class Driver(storage.DriverBase):
return self._database
def gc(self):
LOG.info("Performing garbage collection.")
LOG.info('Performing garbage collection.')
try:
self.message_controller.remove_expired()

View File

@ -20,28 +20,28 @@ from marconi.common import config
OPTIONS = {
# Connection string
"uri": None,
'uri': None,
# Database name
#TODO(kgriffs): Consider local sharding across DBs to mitigate
# TODO(kgriffs): Consider local sharding across DBs to mitigate
# per-DB locking latency.
"database": "marconi",
'database': 'marconi',
# Maximum number of times to retry a failed operation. Currently
# only used for retrying a message post.
"max_attempts": 1000,
'max_attempts': 1000,
# Maximum sleep interval between retries (actual sleep time
# increases linearly according to number of attempts performed).
"max_retry_sleep": 0.1,
'max_retry_sleep': 0.1,
# Maximum jitter interval, to be added to the sleep interval, in
# order to decrease probability that parallel requests will retry
# at the same instant.
"max_retry_jitter": 0.005,
'max_retry_jitter': 0.005,
# Frequency of message garbage collections, in seconds
"gc_interval": 5 * 60,
'gc_interval': 5 * 60,
# Threshold of number of expired messages to reach in a given
# queue, before performing the GC. Useful for reducing frequent
@ -51,7 +51,7 @@ OPTIONS = {
#
# Note: The higher this number, the larger the memory-mapped DB
# files will be.
"gc_threshold": 1000,
'gc_threshold': 1000,
}
CFG = config.namespace('drivers:storage:mongodb').from_options(**OPTIONS)

View File

@ -25,7 +25,7 @@ from marconi.openstack.common import timeutils
from marconi.storage import exceptions as storage_exceptions
DUP_MARKER_REGEX = re.compile(r"\$queue_marker\s+dup key: { : [^:]+: (\d)+")
DUP_MARKER_REGEX = re.compile(r'\$queue_marker\s+dup key: { : [^:]+: (\d)+')
def dup_marker_from_error(error_message):
@ -39,7 +39,7 @@ def dup_marker_from_error(error_message):
"""
match = DUP_MARKER_REGEX.search(error_message)
if match is None:
description = (_("Error message could not be parsed: %s") %
description = (_('Error message could not be parsed: %s') %
error_message)
raise exceptions.PatternNotFound(description)
@ -91,16 +91,16 @@ def calculate_backoff(attempt, max_attempts, max_sleep, max_jitter=0):
the ratio attempt / max_attempts, with optional jitter.
"""
if max_attempts < 0:
raise ValueError("max_attempts must be >= 0")
raise ValueError('max_attempts must be >= 0')
if max_sleep < 0:
raise ValueError("max_sleep must be >= 0")
raise ValueError('max_sleep must be >= 0')
if max_jitter < 0:
raise ValueError("max_jitter must be >= 0")
raise ValueError('max_jitter must be >= 0')
if not (0 <= attempt < max_attempts):
raise ValueError("attempt value is out of range")
raise ValueError('attempt value is out of range')
ratio = float(attempt) / float(max_attempts)
backoff_sec = ratio * max_sleep
@ -123,7 +123,7 @@ def to_oid(obj):
try:
return objectid.ObjectId(obj)
except (TypeError, berrors.InvalidId):
msg = _("Wrong id %s") % obj
msg = _('Wrong id %s') % obj
raise storage_exceptions.MalformedID(msg)
@ -132,7 +132,7 @@ def oid_utc(oid):
try:
return timeutils.normalize_time(oid.generation_time)
except AttributeError:
raise TypeError(_("Expected ObjectId and got %s") % type(oid))
raise TypeError(_('Expected ObjectId and got %s') % type(oid))
class HookedCursor(object):

View File

@ -40,17 +40,17 @@ class TestLazyProperty(testing.TestBase):
def test_write_delete(self):
self.assertTrue(self.cls_instance.read_write_delete)
self.assertTrue(hasattr(self.cls_instance, "_lazy_read_write_delete"))
self.assertTrue(hasattr(self.cls_instance, '_lazy_read_write_delete'))
self.cls_instance.read_write_delete = False
self.assertFalse(self.cls_instance.read_write_delete)
del self.cls_instance.read_write_delete
self.assertFalse(hasattr(self.cls_instance, "_lazy_read_write_delete"))
self.assertFalse(hasattr(self.cls_instance, '_lazy_read_write_delete'))
def test_write(self):
self.assertTrue(self.cls_instance.read_write)
self.assertTrue(hasattr(self.cls_instance, "_lazy_read_write"))
self.assertTrue(hasattr(self.cls_instance, '_lazy_read_write'))
self.cls_instance.read_write = False
self.assertFalse(self.cls_instance.read_write)
@ -60,11 +60,11 @@ class TestLazyProperty(testing.TestBase):
self.fail()
except TypeError:
# Bool object is not callable
self.assertTrue(hasattr(self.cls_instance, "_lazy_read_write"))
self.assertTrue(hasattr(self.cls_instance, '_lazy_read_write'))
def test_delete(self):
self.assertTrue(self.cls_instance.read_delete)
self.assertTrue(hasattr(self.cls_instance, "_lazy_read_delete"))
self.assertTrue(hasattr(self.cls_instance, '_lazy_read_delete'))
try:
self.cls_instance.read_delete = False
@ -74,4 +74,4 @@ class TestLazyProperty(testing.TestBase):
pass
del self.cls_instance.read_delete
self.assertFalse(hasattr(self.cls_instance, "_lazy_read_delete"))
self.assertFalse(hasattr(self.cls_instance, '_lazy_read_delete'))

View File

@ -10,6 +10,6 @@ storage = mongodb
port = 8888
[drivers:storage:mongodb]
uri = "mongodb://127.0.0.1:27017"
database = "marconi_test"
uri = mongodb://127.0.0.1:27017
database = marconi_test
gc_threshold = 100

View File

@ -20,7 +20,7 @@ from marconi.tests import util as testing
class ControllerBaseTest(testing.TestBase):
project = "project"
project = 'project'
driver_class = None
controller_class = None
controller_base_class = None
@ -29,10 +29,10 @@ class ControllerBaseTest(testing.TestBase):
super(ControllerBaseTest, self).setUp()
if not self.driver_class:
self.skipTest("No driver class specified")
self.skipTest('No driver class specified')
if not issubclass(self.controller_class, self.controller_base_class):
self.skipTest("%s is not an instance of %s. Tests not supported" %
self.skipTest('%s is not an instance of %s. Tests not supported' %
(self.controller_class, self.controller_base_class))
self.driver = self.driver_class()
@ -73,36 +73,36 @@ class QueueControllerTest(ControllerBaseTest):
def test_queue_lifecycle(self):
# Test Queue Creation
created = self.controller.upsert("test", project=self.project,
metadata=dict(topic="test_queue"))
created = self.controller.upsert('test', project=self.project,
metadata=dict(topic='test_queue'))
self.assertTrue(created)
# Test Queue retrieval
queue = self.controller.get("test", project=self.project)
queue = self.controller.get('test', project=self.project)
self.assertIsNotNone(queue)
# Test Queue Update
created = self.controller.upsert("test", project=self.project,
metadata=dict(meta="test_meta"))
created = self.controller.upsert('test', project=self.project,
metadata=dict(meta='test_meta'))
self.assertFalse(created)
queue = self.controller.get("test", project=self.project)
self.assertEqual(queue["meta"], "test_meta")
queue = self.controller.get('test', project=self.project)
self.assertEqual(queue['meta'], 'test_meta')
# Test Queue Statistic
_insert_fixtures(self.message_controller, "test",
project=self.project, client_uuid="my_uuid", num=12)
_insert_fixtures(self.message_controller, 'test',
project=self.project, client_uuid='my_uuid', num=12)
countof = self.controller.stats("test", project=self.project)
countof = self.controller.stats('test', project=self.project)
self.assertEqual(countof['messages']['free'], 12)
# Test Queue Deletion
self.controller.delete("test", project=self.project)
self.controller.delete('test', project=self.project)
# Test DoesNotExist Exception
self.assertRaises(storage.exceptions.DoesNotExist,
self.controller.get, "test",
self.controller.get, 'test',
project=self.project)
@ -113,7 +113,7 @@ class MessageControllerTest(ControllerBaseTest):
override the tearDown method in order
to clean up storage's state.
"""
queue_name = "test_queue"
queue_name = 'test_queue'
controller_base_class = storage.MessageBase
def setUp(self):
@ -134,10 +134,10 @@ class MessageControllerTest(ControllerBaseTest):
messages = [
{
"ttl": 60,
"body": {
"event": "BackupStarted",
"backupId": "c378813c-3f0b-11e2-ad92-7823d2b0f3ce"
'ttl': 60,
'body': {
'event': 'BackupStarted',
'backupId': 'c378813c-3f0b-11e2-ad92-7823d2b0f3ce'
}
},
]
@ -145,7 +145,7 @@ class MessageControllerTest(ControllerBaseTest):
# Test Message Creation
created = list(self.controller.post(queue_name, messages,
project=self.project,
client_uuid="unused"))
client_uuid='unused'))
self.assertEqual(len(created), 1)
# Test Message Get
@ -162,7 +162,7 @@ class MessageControllerTest(ControllerBaseTest):
def test_get_multi(self):
_insert_fixtures(self.controller, self.queue_name,
project=self.project, client_uuid="my_uuid", num=15)
project=self.project, client_uuid='my_uuid', num=15)
def load_messages(expected, *args, **kwargs):
interaction = self.controller.list(*args, **kwargs)
@ -172,7 +172,7 @@ class MessageControllerTest(ControllerBaseTest):
# Test all messages, echo False and uuid
load_messages(0, self.queue_name, project=self.project,
client_uuid="my_uuid")
client_uuid='my_uuid')
# Test all messages and limit
load_messages(15, self.queue_name, project=self.project, limit=20,
@ -181,17 +181,17 @@ class MessageControllerTest(ControllerBaseTest):
# Test all messages, echo True, and uuid
interaction = load_messages(10, self.queue_name, echo=True,
project=self.project,
client_uuid="my_uuid")
client_uuid='my_uuid')
# Test all messages, echo True, uuid and marker
load_messages(5, self.queue_name, echo=True, project=self.project,
marker=interaction.next(), client_uuid="my_uuid")
marker=interaction.next(), client_uuid='my_uuid')
def test_claim_effects(self):
_insert_fixtures(self.controller, self.queue_name,
project=self.project, client_uuid="my_uuid", num=12)
project=self.project, client_uuid='my_uuid', num=12)
meta = {"ttl": 70}
meta = {'ttl': 70}
another_cid, _ = self.claim_controller.create(self.queue_name, meta,
project=self.project)
@ -201,21 +201,21 @@ class MessageControllerTest(ControllerBaseTest):
# A wrong claim does not ensure the message deletion
with testing.expect(storage.exceptions.NotPermitted):
self.controller.delete(self.queue_name, msg1["id"],
self.controller.delete(self.queue_name, msg1['id'],
project=self.project,
claim=another_cid)
# Make sure a message can be deleted with a claim
self.controller.delete(self.queue_name, msg1["id"],
self.controller.delete(self.queue_name, msg1['id'],
project=self.project,
claim=cid)
with testing.expect(storage.exceptions.DoesNotExist):
self.controller.get(self.queue_name, msg1["id"],
self.controller.get(self.queue_name, msg1['id'],
project=self.project)
# Make sure such a deletion is idempotent
self.controller.delete(self.queue_name, msg1["id"],
self.controller.delete(self.queue_name, msg1['id'],
project=self.project,
claim=cid)
@ -224,7 +224,7 @@ class MessageControllerTest(ControllerBaseTest):
project=self.project)
with testing.expect(storage.exceptions.NotPermitted):
self.controller.delete(self.queue_name, msg2["id"],
self.controller.delete(self.queue_name, msg2['id'],
project=self.project,
claim=cid)
@ -294,7 +294,7 @@ class ClaimControllerTest(ControllerBaseTest):
override the tearDown method in order
to clean up storage's state.
"""
queue_name = "test_queue"
queue_name = 'test_queue'
controller_base_class = storage.ClaimBase
def setUp(self):
@ -312,9 +312,9 @@ class ClaimControllerTest(ControllerBaseTest):
def test_claim_lifecycle(self):
_insert_fixtures(self.message_controller, self.queue_name,
project=self.project, client_uuid="my_uuid", num=20)
project=self.project, client_uuid='my_uuid', num=20)
meta = {"ttl": 70}
meta = {'ttl': 70}
# Make sure create works
claim_id, messages = self.controller.create(self.queue_name, meta,
@ -337,10 +337,10 @@ class ClaimControllerTest(ControllerBaseTest):
messages2 = list(messages2)
self.assertEquals(len(messages2), 15)
self.assertEquals(messages, messages2)
self.assertEquals(claim["ttl"], 70)
self.assertEquals(claim["id"], claim_id)
self.assertEquals(claim['ttl'], 70)
self.assertEquals(claim['id'], claim_id)
new_meta = {"ttl": 100}
new_meta = {'ttl': 100}
self.controller.update(self.queue_name, claim_id,
new_meta, project=self.project)
@ -350,11 +350,14 @@ class ClaimControllerTest(ControllerBaseTest):
messages2 = list(messages2)
self.assertEquals(len(messages2), 15)
#TODO(zyuan): Add some tests to ensure the ttl is extended/not-extended
# TODO(zyuan): Add some tests to ensure the ttl is
# extended/not-extended.
for msg1, msg2 in zip(messages, messages2):
self.assertEquals(msg1['body'], msg2['body'])
self.assertEquals(claim["ttl"], 100)
self.assertEquals(claim["id"], claim_id)
self.assertEquals(claim['ttl'], 100)
self.assertEquals(claim['id'], claim_id)
# Make sure delete works
self.controller.delete(self.queue_name, claim_id,
@ -365,7 +368,7 @@ class ClaimControllerTest(ControllerBaseTest):
claim_id, project=self.project)
def test_expired_claim(self):
meta = {"ttl": 0}
meta = {'ttl': 0}
claim_id, messages = self.controller.create(self.queue_name, meta,
project=self.project)
@ -395,9 +398,9 @@ def _insert_fixtures(controller, queue_name, project=None,
def messages():
for n in xrange(num):
yield {
"ttl": 120,
"body": {
"event": "Event number %s" % n
'ttl': 120,
'body': {
'event': 'Event number %s' % n
}}
controller.post(queue_name, messages(),
project=project, client_uuid=client_uuid)

View File

@ -65,6 +65,6 @@ class TestQueueBase(testing.TestBase):
def test_upsert(self):
self.assertRaises(AssertionError, self.controller.upsert,
"test", metadata=[])
'test', metadata=[])
self.assertIsNone(self.controller.upsert("test", metadata={}))
self.assertIsNone(self.controller.upsert('test', metadata={}))

View File

@ -32,23 +32,23 @@ from marconi.tests import util as testing
class MongodbUtilsTest(testing.TestBase):
def test_dup_marker_from_error(self):
error_message = ("E11000 duplicate key error index: "
"marconi.messages.$queue_marker dup key: "
"{ : ObjectId('51adff46b100eb85d8a93a2d'), : 3 }")
error_message = ('E11000 duplicate key error index: '
'marconi.messages.$queue_marker dup key: '
'{ : ObjectId("51adff46b100eb85d8a93a2d"), : 3 }')
marker = utils.dup_marker_from_error(error_message)
self.assertEquals(marker, 3)
error_message = ("E11000 duplicate key error index: "
"marconi.messages.$x_y dup key: "
"{ : ObjectId('51adff46b100eb85d8a93a2d'), : 3 }")
error_message = ('E11000 duplicate key error index: '
'marconi.messages.$x_y dup key: '
'{ : ObjectId("51adff46b100eb85d8a93a2d"), : 3 }')
self.assertRaises(exceptions.PatternNotFound,
utils.dup_marker_from_error, error_message)
error_message = ("E11000 duplicate key error index: "
"marconi.messages.$queue_marker dup key: "
"{ : ObjectId('51adff46b100eb85d8a93a2d') }")
error_message = ('E11000 duplicate key error index: '
'marconi.messages.$queue_marker dup key: '
'{ : ObjectId("51adff46b100eb85d8a93a2d") }')
self.assertRaises(exceptions.PatternNotFound,
utils.dup_marker_from_error, error_message)
@ -83,11 +83,11 @@ class MongodbUtilsTest(testing.TestBase):
class MongodbDriverTest(testing.TestBase):
def setUp(self):
if not os.environ.get("MONGODB_TEST_LIVE"):
self.skipTest("No MongoDB instance running")
if not os.environ.get('MONGODB_TEST_LIVE'):
self.skipTest('No MongoDB instance running')
super(MongodbDriverTest, self).setUp()
self.load_conf("wsgi_mongodb.conf")
self.load_conf('wsgi_mongodb.conf')
def test_db_instance(self):
driver = mongodb.Driver()
@ -101,11 +101,11 @@ class MongodbQueueTests(base.QueueControllerTest):
controller_class = controllers.QueueController
def setUp(self):
if not os.environ.get("MONGODB_TEST_LIVE"):
self.skipTest("No MongoDB instance running")
if not os.environ.get('MONGODB_TEST_LIVE'):
self.skipTest('No MongoDB instance running')
super(MongodbQueueTests, self).setUp()
self.load_conf("wsgi_mongodb.conf")
self.load_conf('wsgi_mongodb.conf')
def tearDown(self):
self.controller._col.drop()
@ -114,18 +114,18 @@ class MongodbQueueTests(base.QueueControllerTest):
def test_indexes(self):
col = self.controller._col
indexes = col.index_information()
self.assertIn("p_1_n_1", indexes)
self.assertIn('p_1_n_1', indexes)
def test_messages_purged(self):
queue_name = "test"
queue_name = 'test'
self.controller.upsert(queue_name, {})
qid = self.controller._get_id(queue_name)
self.message_controller.post(queue_name,
[{"ttl": 60}],
[{'ttl': 60}],
1234)
self.controller.delete(queue_name)
col = self.message_controller._col
self.assertEqual(col.find({"q": qid}).count(), 0)
self.assertEqual(col.find({'q': qid}).count(), 0)
class MongodbMessageTests(base.MessageControllerTest):
@ -134,11 +134,11 @@ class MongodbMessageTests(base.MessageControllerTest):
controller_class = controllers.MessageController
def setUp(self):
if not os.environ.get("MONGODB_TEST_LIVE"):
self.skipTest("No MongoDB instance running")
if not os.environ.get('MONGODB_TEST_LIVE'):
self.skipTest('No MongoDB instance running')
super(MongodbMessageTests, self).setUp()
self.load_conf("wsgi_mongodb.conf")
self.load_conf('wsgi_mongodb.conf')
def tearDown(self):
self.controller._col.drop()
@ -151,22 +151,22 @@ class MongodbMessageTests(base.MessageControllerTest):
def test_indexes(self):
col = self.controller._col
indexes = col.index_information()
self.assertIn("active", indexes)
self.assertIn("claimed", indexes)
self.assertIn("queue_marker", indexes)
self.assertIn('active', indexes)
self.assertIn('claimed', indexes)
self.assertIn('queue_marker', indexes)
def test_next_marker(self):
queue_name = "marker_test"
queue_name = 'marker_test'
iterations = 10
self.queue_controller.upsert(queue_name, {})
queue_id = self.queue_controller._get_id(queue_name)
seed_marker1 = self.controller._next_marker(queue_name)
self.assertEqual(seed_marker1, 1, "First marker is 1")
self.assertEqual(seed_marker1, 1, 'First marker is 1')
for i in range(iterations):
self.controller.post(queue_name, [{"ttl": 60}], "uuid")
self.controller.post(queue_name, [{'ttl': 60}], 'uuid')
marker1 = self.controller._next_marker(queue_id)
marker2 = self.controller._next_marker(queue_id)
marker3 = self.controller._next_marker(queue_id)
@ -183,10 +183,10 @@ class MongodbMessageTests(base.MessageControllerTest):
messages_per_queue = gc_threshold
nogc_messages_per_queue = gc_threshold - 1
projects = ["gc-test-project-%s" % i for i in range(num_projects)]
queue_names = ["gc-test-%s" % i for i in range(num_queues)]
client_uuid = "b623c53c-cf75-11e2-84e1-a1187188419e"
messages = [{"ttl": 0, "body": str(i)}
projects = ['gc-test-project-%s' % i for i in range(num_projects)]
queue_names = ['gc-test-%s' % i for i in range(num_queues)]
client_uuid = 'b623c53c-cf75-11e2-84e1-a1187188419e'
messages = [{'ttl': 0, 'body': str(i)}
for i in range(messages_per_queue)]
for project in projects:
@ -195,11 +195,11 @@ class MongodbMessageTests(base.MessageControllerTest):
self.controller.post(queue, messages, client_uuid, project)
# Add one that should not be gc'd due to being under threshold
self.queue_controller.upsert("nogc-test", {}, "nogc-test-project")
nogc_messages = [{"ttl": 0, "body": str(i)}
self.queue_controller.upsert('nogc-test', {}, 'nogc-test-project')
nogc_messages = [{'ttl': 0, 'body': str(i)}
for i in range(nogc_messages_per_queue)]
self.controller.post("nogc-test", nogc_messages,
client_uuid, "nogc-test-project")
self.controller.post('nogc-test', nogc_messages,
client_uuid, 'nogc-test-project')
total_expired = sum(
self._count_expired(queue, project)
@ -212,7 +212,7 @@ class MongodbMessageTests(base.MessageControllerTest):
# Make sure the messages in this queue were not gc'd since
# the count was under the threshold.
self.assertEquals(
self._count_expired("nogc-test", "nogc-test-project"),
self._count_expired('nogc-test', 'nogc-test-project'),
len(nogc_messages))
total_expired = sum(
@ -228,8 +228,8 @@ class MongodbMessageTests(base.MessageControllerTest):
# one remaining in the queue.
queue = random.choice(queue_names)
queue_id = self.queue_controller._get_id(queue, project)
message = self.driver.db.messages.find_one({"q": queue_id})
self.assertEquals(message["k"], messages_per_queue)
message = self.driver.db.messages.find_one({'q': queue_id})
self.assertEquals(message['k'], messages_per_queue)
class MongodbClaimTests(base.ClaimControllerTest):
@ -237,11 +237,11 @@ class MongodbClaimTests(base.ClaimControllerTest):
controller_class = controllers.ClaimController
def setUp(self):
if not os.environ.get("MONGODB_TEST_LIVE"):
self.skipTest("No MongoDB instance running")
if not os.environ.get('MONGODB_TEST_LIVE'):
self.skipTest('No MongoDB instance running')
super(MongodbClaimTests, self).setUp()
self.load_conf("wsgi_mongodb.conf")
self.load_conf('wsgi_mongodb.conf')
def test_claim_doesnt_exist(self):
"""Verifies that operations fail on expired/missing claims.
@ -255,7 +255,7 @@ class MongodbClaimTests(base.ClaimControllerTest):
epoch, project=self.project)
claim_id, messages = self.controller.create(self.queue_name,
{"ttl": 1},
{'ttl': 1},
project=self.project)
# Lets let it expire

View File

@ -37,7 +37,7 @@ def verify_claim_msg(count, *claim_response):
if msg_length_flag:
query_claim(headers, body)
else:
assert msg_length_flag, "More msgs returned than specified in limit"
assert msg_length_flag, 'More msgs returned than specified in limit'
def verify_claim_msglength(count, *body):
@ -65,30 +65,30 @@ def query_claim(headers, *body):
msg_list = body[0]
msg_list = json.loads(msg_list)
location = headers["Location"]
location = headers['Location']
url = functionlib.create_url_from_appender(location)
header = functionlib.create_marconi_headers()
get_msg = http.get(url, header)
if get_msg.status_code == 200:
query_body = json.loads(get_msg.text)
query_msgs = query_body["messages"]
query_msgs = query_body['messages']
test_result_flag = verify_query_msgs(query_msgs, msg_list)
if test_result_flag:
return test_result_flag
else:
print "URL"
print 'URL'
print url
print "HEADER"
print 'HEADER'
print header
print "Messages returned by Query Claim"
print 'Messages returned by Query Claim'
print query_msgs
print "# of Messages returned by Query Claim", len(query_msgs)
print '# of Messages returned by Query Claim', len(query_msgs)
print 'Messages returned by Claim Messages'
print msg_list
print "# of Messages returned by Claim messages", len(msg_list)
assert test_result_flag, "Query Claim Failed"
print '# of Messages returned by Claim messages', len(msg_list)
assert test_result_flag, 'Query Claim Failed'
def verify_query_msgs(querymsgs, msg_list):
@ -103,9 +103,9 @@ def verify_query_msgs(querymsgs, msg_list):
idx = 0
for msg in querymsgs:
if ((msg["body"] != msg_list[idx]["body"]) or
(msg["href"] != msg_list[idx]["href"]) or
(msg["ttl"] != msg_list[idx]["ttl"])):
if ((msg['body'] != msg_list[idx]['body']) or
(msg['href'] != msg_list[idx]['href']) or
(msg['ttl'] != msg_list[idx]['ttl'])):
test_result_flag = False
idx = idx + 1
@ -122,25 +122,25 @@ def patch_claim(*claim_response):
test_result_flag = False
headers = claim_response[0]
location = headers["Location"]
location = headers['Location']
url = functionlib.create_url_from_appender(location)
header = functionlib.create_marconi_headers()
ttl_value = 300
payload = '{ "ttl": ttlvalue }'
payload = payload.replace("ttlvalue", str(ttl_value))
payload = '{"ttl": ttlvalue }'
payload = payload.replace('ttlvalue', str(ttl_value))
patch_response = http.patch(url, header, body=payload)
if patch_response.status_code == 204:
test_result_flag = verify_patch_claim(url, header, ttl_value)
else:
print "Patch HTTP Response code: {}".format(patch_response.status_code)
print 'Patch HTTP Response code: {}'.format(patch_response.status_code)
print patch_response.headers
print patch_response.text
assert test_result_flag, "Patch Claim Failed"
assert test_result_flag, 'Patch Claim Failed'
if not test_result_flag:
assert test_result_flag, "Query claim after the patch failed"
assert test_result_flag, 'Query claim after the patch failed'
def verify_patch_claim(url, header, ttl_extended):
@ -157,7 +157,7 @@ def verify_patch_claim(url, header, ttl_extended):
get_claim = http.get(url, header)
response_body = json.loads(get_claim.text)
ttl = response_body["ttl"]
ttl = response_body['ttl']
if ttl < ttl_extended:
print get_claim.status_code
print get_claim.headers
@ -173,7 +173,7 @@ def create_urllist_fromhref(*response):
:param *response : http response containing the list of messages.
"""
rspbody = json.loads(response[1])
urllist = [functionlib.create_url_from_appender(item["href"])
urllist = [functionlib.create_url_from_appender(item['href'])
for item in rspbody]
return urllist
@ -194,14 +194,14 @@ def delete_claimed_msgs(*claim_response):
if delete_response.status_code == 204:
test_result_flag = functionlib.verify_delete(url, header)
else:
print "DELETE message with claim ID: {}".format(url)
print 'DELETE message with claim ID: {}'.format(url)
print delete_response.status_code
print delete_response.headers
print delete_response.text
assert test_result_flag, "Delete Claimed Message Failed"
assert test_result_flag, 'Delete Claimed Message Failed'
if not test_result_flag:
assert test_result_flag, "Get message after DELETE did not return 404"
assert test_result_flag, 'Get message after DELETE did not return 404'
def get_claimed_msgs(*claim_response):
@ -219,11 +219,11 @@ def get_claimed_msgs(*claim_response):
if get_response.status_code != 200:
print url
print header
print "Get Response Code: {}".format(get_response.status_code)
print 'Get Response Code: {}'.format(get_response.status_code)
test_result_flag = False
if not test_result_flag:
assert test_result_flag, "Get Claimed message Failed"
assert test_result_flag, 'Get Claimed message Failed'
def release_claim(*claim_response):
@ -236,7 +236,7 @@ def release_claim(*claim_response):
test_result_flag = False
headers = claim_response[0]
location = headers["Location"]
location = headers['Location']
url = functionlib.create_url_from_appender(location)
header = functionlib.create_marconi_headers()
@ -244,10 +244,10 @@ def release_claim(*claim_response):
if release_response.status_code == 204:
test_result_flag = functionlib.verify_delete(url, header)
else:
print "Release Claim HTTP code:{}".format(release_response.status_code)
print 'Release Claim HTTP code:{}'.format(release_response.status_code)
print release_response.headers
print release_response.text
assert test_result_flag, "Release Claim Failed"
assert test_result_flag, 'Release Claim Failed'
if not test_result_flag:
assert test_result_flag, "Get claim after the release failed"
assert test_result_flag, 'Get claim after the release failed'

View File

@ -19,7 +19,7 @@ from marconi.tests.system.common import config
from marconi.tests.system.common import functionlib
cfg = config.Config()
CFG = config.Config()
def get_data():
@ -32,7 +32,7 @@ def get_data():
for row in data:
row['header'] = functionlib.get_headers(row['header'])
row['url'] = row['url'].replace("<BASE_URL>", cfg.base_url)
row['url'] = row['url'].replace('<BASE_URL>', CFG.base_url)
return data

View File

@ -21,38 +21,38 @@ from marconi.tests.system.common import config
from marconi.tests.system.common import http
cfg = config.Config()
CFG = config.Config()
def get_keystone_token():
"""Gets Keystone Auth token."""
req_json = {
"auth": {
"passwordCredentials": {
"username": cfg.username,
"password": cfg.password
'auth': {
'passwordCredentials': {
'username': CFG.username,
'password': CFG.password
}
},
}
header = '{"Host": "identity.api.rackspacecloud.com",'
header += '"Content-Type": "application/json","Accept":"application/json"}'
url = cfg.auth_url
url = CFG.auth_url
response = http.post(url=url, header=header, body=req_json)
response_body = json.loads(response.text)
auth_token = response_body["access"]["token"]["id"]
auth_token = response_body['access']['token']['id']
return auth_token
def get_auth_token():
"""Returns a valid auth token if auth is turned on."""
if cfg.auth_enabled == "true":
if CFG.auth_enabled == 'true':
auth_token = get_keystone_token()
else:
auth_token = "notrealtoken"
auth_token = 'notrealtoken'
return auth_token
@ -64,10 +64,10 @@ def create_marconi_headers():
headers = '{"Host": "<HOST>","User-Agent": "<USER-AGENT>","Date":"<DATE>",'
headers += '"Accept": "application/json","Accept-Encoding": "gzip",'
headers += '"X-Auth-Token": "<auth_token>","Client-ID": "<UUID>"}'
headers = headers.replace("<auth_token>", auth_token)
headers = headers.replace("<HOST>", cfg.host)
headers = headers.replace("<USER-AGENT>", cfg.user_agent)
headers = headers.replace("<UUID>", cfg.uuid)
headers = headers.replace('<auth_token>', auth_token)
headers = headers.replace('<HOST>', CFG.host)
headers = headers.replace('<USER-AGENT>', CFG.user_agent)
headers = headers.replace('<UUID>', CFG.uuid)
return headers
@ -78,10 +78,10 @@ def invalid_auth_token_header():
headers = '{"Host":"<HOST>","User-Agent":"<USER-AGENT>","Date":"<DATE>",'
headers += '"Accept": "application/json","Accept-Encoding": "gzip",'
headers += '"X-Auth-Token": "<auth_token>"}'
headers = headers.replace("<auth_token>", auth_token)
headers = headers.replace("<HOST>", cfg.host)
headers = headers.replace("<USER-AGENT>", cfg.user_agent)
headers += 'X-Auth-Token: <auth_token>}'
headers = headers.replace('<auth_token>', auth_token)
headers = headers.replace('<HOST>', CFG.host)
headers = headers.replace('<USER-AGENT>', CFG.user_agent)
return headers
@ -93,8 +93,8 @@ def missing_header_fields():
headers = '{"Host": "<HOST>","Date": "<DATE>",'
headers += '"Accept": "application/json","Accept-Encoding": "gzip",'
headers += '"X-Auth-Token": "<auth_token>"}'
headers = headers.replace("<auth_token>", auth_token)
headers = headers.replace("<HOST>", cfg.host)
headers = headers.replace('<auth_token>', auth_token)
headers = headers.replace('<HOST>', CFG.host)
return headers
@ -106,9 +106,9 @@ def plain_text_in_header():
headers = '{"Host":"<HOST>","User-Agent":"<USER-AGENT>","Date":"<DATE>",'
headers += '"Accept": "text/plain","Accept-Encoding": "gzip",'
headers += '"X-Auth-Token": "<auth_token>"}'
headers = headers.replace("<auth_token>", auth_token)
headers = headers.replace("<HOST>", cfg.host)
headers = headers.replace("<USER-AGENT>", cfg.user_agent)
headers = headers.replace('<auth_token>', auth_token)
headers = headers.replace('<HOST>', CFG.host)
headers = headers.replace('<USER-AGENT>', CFG.user_agent)
return headers
@ -120,9 +120,9 @@ def asterisk_in_header():
headers = '{"Host":"<HOST>","User-Agent":"<USER-AGENT>","Date":"<DATE>",'
headers += '"Accept": "*/*","Accept-Encoding": "gzip",'
headers += '"X-Auth-Token": "<auth_token>"}'
headers = headers.replace("<auth_token>", auth_token)
headers = headers.replace("<HOST>", cfg.host)
headers = headers.replace("<USER-AGENT>", cfg.user_agent)
headers = headers.replace('<auth_token>', auth_token)
headers = headers.replace('<HOST>', CFG.host)
headers = headers.replace('<USER-AGENT>', CFG.user_agent)
return headers
@ -146,23 +146,23 @@ def get_headers(input_header):
def get_custom_body(kwargs):
"""Returns a custom request body."""
req_body = {"data": "<DATA>"}
if "metadatasize" in kwargs.keys():
random_data = binascii.b2a_hex(os.urandom(kwargs["metadatasize"]))
req_body["data"] = random_data
req_body = {'data': '<DATA>'}
if 'metadatasize' in kwargs.keys():
random_data = binascii.b2a_hex(os.urandom(kwargs['metadatasize']))
req_body['data'] = random_data
return json.dumps(req_body)
def create_url_from_appender(appender):
"""Returns complete url using the appender (with a a preceding '/')."""
next_url = str(cfg.base_server + appender)
next_url = str(CFG.base_server + appender)
return(next_url)
def get_url_from_location(header):
"""returns : the complete url referring to the location."""
location = header["location"]
location = header['location']
url = create_url_from_appender(location)
return url
@ -177,10 +177,10 @@ def verify_metadata(get_data, posted_body):
print(posted_body, type(posted_body))
if get_data in posted_body:
print("AYYY")
print('AYYY')
else:
test_result_flag = False
print("NAYYY")
print('NAYYY')
return test_result_flag
@ -193,13 +193,13 @@ def verify_delete(url, header):
if getmsg.status_code == 404:
test_result_flag = True
else:
print("GET after DELETE failed")
print("URL")
print('GET after DELETE failed')
print('URL')
print url
print("headers")
print('headers')
print header
print("Response Body")
print('Response Body')
print getmsg.text
assert test_result_flag, "GET Code {}".format(getmsg.status_code)
assert test_result_flag, 'GET Code {}'.format(getmsg.status_code)
return test_result_flag

View File

@ -24,13 +24,13 @@ def get(url, header='', param=''):
try:
response = requests.get(url, headers=header, params=param)
except requests.ConnectionError as detail:
print("ConnectionError: Exception in http.get {}".format(detail))
print('ConnectionError: Exception in http.get {}'.format(detail))
except requests.HTTPError as detail:
print("HTTPError: Exception in http.get {}".format(detail))
print('HTTPError: Exception in http.get {}'.format(detail))
except requests.Timeout as detail:
print("Timeout: Exception in http.get {}".format(detail))
print('Timeout: Exception in http.get {}'.format(detail))
except requests.TooManyRedirects as detail:
print("TooManyRedirects: Exception in http.get {}".format(detail))
print('TooManyRedirects: Exception in http.get {}'.format(detail))
return response
@ -44,13 +44,13 @@ def post(url, header='', body='', param=''):
response = requests.post(url, headers=header, data=body,
params=param)
except requests.ConnectionError as detail:
print("ConnectionError: Exception in http.post {}".format(detail))
print('ConnectionError: Exception in http.post {}'.format(detail))
except requests.HTTPError as detail:
print("HTTPError: Exception in http.post {}".format(detail))
print('HTTPError: Exception in http.post {}'.format(detail))
except requests.Timeout as detail:
print("Timeout: Exception in http.post {}".format(detail))
print('Timeout: Exception in http.post {}'.format(detail))
except requests.TooManyRedirects as detail:
print("TooManyRedirects: Exception in http.post {}".format(detail))
print('TooManyRedirects: Exception in http.post {}'.format(detail))
return response
@ -64,13 +64,13 @@ def put(url, header='', body='', param=''):
response = requests.put(url, headers=header, data=body,
params=param)
except requests.ConnectionError as detail:
print("ConnectionError: Exception in http.put {}".format(detail))
print('ConnectionError: Exception in http.put {}'.format(detail))
except requests.HTTPError as detail:
print("HTTPError: Exception in http.put {}".format(detail))
print('HTTPError: Exception in http.put {}'.format(detail))
except requests.Timeout as detail:
print("Timeout: Exception in http.put {}".format(detail))
print('Timeout: Exception in http.put {}'.format(detail))
except requests.TooManyRedirects as detail:
print("TooManyRedirects: Exception in http.put {}".format(detail))
print('TooManyRedirects: Exception in http.put {}'.format(detail))
return response
@ -83,13 +83,13 @@ def delete(url, header='', param=''):
try:
response = requests.delete(url, headers=header, params=param)
except requests.ConnectionError as detail:
print("ConnectionError: Exception in http.delete {}".format(detail))
print('ConnectionError: Exception in http.delete {}'.format(detail))
except requests.HTTPError as detail:
print("HTTPError: Exception in http.delete {}".format(detail))
print('HTTPError: Exception in http.delete {}'.format(detail))
except requests.Timeout as detail:
print("Timeout: Exception in http.delete {}".format(detail))
print('Timeout: Exception in http.delete {}'.format(detail))
except requests.TooManyRedirects as detail:
print("TooManyRedirects: Exception in http.delete {}".format(detail))
print('TooManyRedirects: Exception in http.delete {}'.format(detail))
return response
@ -103,13 +103,13 @@ def patch(url, header='', body='', param=''):
response = requests.patch(url, headers=header, data=body,
params=param)
except requests.ConnectionError as detail:
print("ConnectionError: Exception in http.patch {}".format(detail))
print('ConnectionError: Exception in http.patch {}'.format(detail))
except requests.HTTPError as detail:
print("HTTPError: Exception in http.patch {}".format(detail))
print('HTTPError: Exception in http.patch {}'.format(detail))
except requests.Timeout as detail:
print("Timeout: Exception in http.patch {}".format(detail))
print('Timeout: Exception in http.patch {}'.format(detail))
except requests.TooManyRedirects as detail:
print("TooManyRedirects: Exception in http.patch {}".format(detail))
print('TooManyRedirects: Exception in http.patch {}'.format(detail))
return response
@ -153,14 +153,14 @@ def executetests(row):
print url
print header
print body
print "Actual Response: {}".format(response.status_code)
print "Actual Response Headers"
print 'Actual Response: {}'.format(response.status_code)
print 'Actual Response Headers'
print response.headers
print"Actual Response Body"
print'Actual Response Body'
print response.text
print"ExpectedRC: {}".format(expected_RC)
print"expectedresponsebody: {}".format(expected_response_body)
assert test_result_flag, "Actual Response does not match the Expected"
print'ExpectedRC: {}'.format(expected_RC)
print'expectedresponsebody: {}'.format(expected_response_body)
assert test_result_flag, 'Actual Response does not match the Expected'
def verify_response(response, expected_RC):
@ -171,8 +171,8 @@ def verify_response(response, expected_RC):
if actual_RC != expected_RC:
test_result_flag = False
print("Unexpected http Response code {}".format(actual_RC))
print "Response Body returned"
print('Unexpected http Response code {}'.format(actual_RC))
print 'Response Body returned'
print actual_response_body
return test_result_flag

View File

@ -19,7 +19,7 @@ from marconi.tests.system.common import config
from marconi.tests.system.common import functionlib
cfg = config.Config()
CFG = config.Config()
def get_data():
@ -32,7 +32,7 @@ def get_data():
for row in data:
row['header'] = functionlib.get_headers(row['header'])
row['url'] = row['url'].replace("<BASE_URL>", cfg.base_url)
row['url'] = row['url'].replace('<BASE_URL>', CFG.base_url)
return data

View File

@ -22,7 +22,7 @@ from marconi.tests.system.common import functionlib
from marconi.tests.system.common import http
cfg = config.Config()
CFG = config.Config()
def generate_dict(dict_length):
@ -51,17 +51,17 @@ def single_message_body(**kwargs):
"""
valid_ttl = random.randint(60, 1209600)
if "messagesize" in kwargs.keys():
body = generate_dict(kwargs["messagesize"])
if 'messagesize' in kwargs.keys():
body = generate_dict(kwargs['messagesize'])
else:
body = generate_dict(2)
if "ttl" in kwargs.keys():
ttl = kwargs["ttl"]
if 'ttl' in kwargs.keys():
ttl = kwargs['ttl']
else:
ttl = valid_ttl
message_body = {"ttl": ttl, "body": body}
message_body = {'ttl': ttl, 'body': body}
return message_body
@ -70,7 +70,7 @@ def get_message_body(**kwargs):
:param **kwargs: can be {messagecount: x} , where x is the # of messages.
"""
message_count = kwargs["messagecount"]
message_count = kwargs['messagecount']
multiple_message_body = []
for i in range[message_count]:
message_body = single_message_body(**kwargs)
@ -84,7 +84,7 @@ def dummyget_message_body(dict):
return dict
def create_url(base_url=cfg.base_url, *msg_id_list):
def create_url(base_url=CFG.base_url, *msg_id_list):
"""Creates url list for retrieving messages with message id."""
url = [(base_url + msg_id) for msg_id in msg_id_list]
return url
@ -98,7 +98,7 @@ def verify_msg_length(count=10, *msg_list):
"""
test_result_flag = False
msg_body = json.loads(msg_list[0])
msg_list = msg_body["messages"]
msg_list = msg_body['messages']
msg_count = len(msg_list)
if (msg_count <= count):
test_result_flag = True
@ -113,8 +113,8 @@ def get_href(*msg_list):
:param *msg_list: list of messages returned by the server.
"""
msg_body = json.loads(msg_list[0])
link = msg_body["links"]
href = link[0]["href"]
link = msg_body['links']
href = link[0]['href']
return href
@ -136,14 +136,14 @@ def verify_post_msg(msg_headers, posted_body):
test_result_flag = functionlib.verify_metadata(getmsg.text,
posted_body)
else:
print("Failed to GET {}".format(url))
print("Request Header")
print('Failed to GET {}'.format(url))
print('Request Header')
print header
print("Response Headers")
print('Response Headers')
print getmsg.headers
print("Response Body")
print('Response Body')
print getmsg.text
assert test_result_flag, "HTTP code {}".format(getmsg.status_code)
assert test_result_flag, 'HTTP code {}'.format(getmsg.status_code)
def get_next_msgset(responsetext):
@ -162,9 +162,9 @@ def get_next_msgset(responsetext):
return test_result_flag
else:
test_result_flag = False
print("Failed to GET {}".format(url))
print('Failed to GET {}'.format(url))
print(getmsg.text)
assert test_result_flag, "HTTP code {}".format(getmsg.status_code)
assert test_result_flag, 'HTTP code {}'.format(getmsg.status_code)
def verify_get_msgs(count, *getresponse):
@ -181,11 +181,11 @@ def verify_get_msgs(count, *getresponse):
if msglengthflag:
test_result_flag = get_next_msgset(body)
else:
print("Messages returned exceed requested number of messages")
print('Messages returned exceed requested number of messages')
test_result_flag = False
if not test_result_flag:
assert test_result_flag, "Recursive Get Messages Failed"
assert test_result_flag, 'Recursive Get Messages Failed'
def delete_msg(*postresponse):
@ -204,11 +204,11 @@ def delete_msg(*postresponse):
if deletemsg.status_code == 204:
test_result_flag = functionlib.verify_delete(url, header)
else:
print("DELETE message failed")
print("URL")
print('DELETE message failed')
print('URL')
print url
print("headers")
print('headers')
print header
print("Response Body")
print('Response Body')
print deletemsg.text
assert test_result_flag, "DELETE Code {}".format(deletemsg.status_code)
assert test_result_flag, 'DELETE Code {}'.format(deletemsg.status_code)

View File

@ -19,7 +19,7 @@ from marconi.tests.system.common import config
from marconi.tests.system.common import functionlib
cfg = config.Config()
CFG = config.Config()
def get_data():
@ -32,7 +32,7 @@ def get_data():
for row in data:
row['header'] = functionlib.get_headers(row['header'])
row['url'] = row['url'].replace("<BASE_URL>", cfg.base_url)
row['url'] = row['url'].replace('<BASE_URL>', CFG.base_url)
return data

View File

@ -38,14 +38,14 @@ def verify_queue_stats(*get_response):
keys_in_body = body.keys()
keys_in_body.sort()
if (keys_in_body == ["actions", "messages"]):
stats = body["messages"]
if (keys_in_body == ['actions', 'messages']):
stats = body['messages']
keys_in_stats = stats.keys()
keys_in_stats.sort()
if (keys_in_stats == ["claimed", "free"]):
if (keys_in_stats == ['claimed', 'free']):
try:
int(stats["claimed"])
int(stats["free"])
int(stats['claimed'])
int(stats['free'])
except Exception:
test_result_flag = False
else:
@ -58,7 +58,7 @@ def verify_queue_stats(*get_response):
else:
print headers
print body
assert test_result_flag, "Get Request stats failed"
assert test_result_flag, 'Get Request stats failed'
def get_queue_name(namelength=65):
@ -68,7 +68,7 @@ def get_queue_name(namelength=65):
:param namelength: length of the queue name.
"""
appender = "/queues/" + binascii.b2a_hex(os.urandom(namelength))
appender = '/queues/' + binascii.b2a_hex(os.urandom(namelength))
url = functionlib.create_url_from_appender(appender)
return url

View File

@ -20,7 +20,7 @@ from marconi.tests import util as testing
PROJECT_CONFIG = config.project()
CFG = PROJECT_CONFIG.from_options(
without_help=3,
with_help=(None, "nonsense"))
with_help=(None, 'nonsense'))
class TestConfig(testing.TestBase):

View File

@ -32,6 +32,6 @@ class TestTransportAuth(util.TestBase):
self.cfg.conf = cfg.ConfigOpts()
def test_configs(self):
auth.strategy("keystone")._register_opts(self.cfg.conf)
self.assertIn("keystone_authtoken", self.cfg.conf)
self.assertIn("keystone_authtoken", dir(self.cfg.from_options()))
auth.strategy('keystone')._register_opts(self.cfg.conf)
self.assertIn('keystone_authtoken', self.cfg.conf)
self.assertIn('keystone_authtoken', dir(self.cfg.from_options()))

View File

@ -29,7 +29,7 @@ class TestBase(util.TestBase):
super(TestBase, self).setUp()
if self.config_filename is None:
self.skipTest("No config specified")
self.skipTest('No config specified')
conf_file = self.conf_path(self.config_filename)
boot = marconi.Bootstrap(conf_file)
@ -43,9 +43,9 @@ class TestBaseFaulty(TestBase):
def setUp(self):
self._storage_backup = marconi.Bootstrap.storage
faulty = faulty_storage.Driver()
setattr(marconi.Bootstrap, "storage", faulty)
setattr(marconi.Bootstrap, 'storage', faulty)
super(TestBaseFaulty, self).setUp()
def tearDown(self):
setattr(marconi.Bootstrap, "storage", self._storage_backup)
setattr(marconi.Bootstrap, 'storage', self._storage_backup)
super(TestBaseFaulty, self).tearDown()

View File

@ -35,7 +35,7 @@ class TestWSGIAuth(base.TestBase):
def test_non_authenticated(self):
env = testing.create_environ('/v1/480924/queues/',
method="GET",
method='GET',
headers=self.headers)
self.app(env, self.srmock)

View File

@ -30,62 +30,62 @@ class ClaimsBaseTest(base.TestBase):
def setUp(self):
super(ClaimsBaseTest, self).setUp()
doc = '{ "_ttl": 60 }'
doc = '{"_ttl": 60 }'
env = testing.create_environ('/v1/480924/queues/fizbit',
method="PUT", body=doc)
method='PUT', body=doc)
self.app(env, self.srmock)
doc = json.dumps([{"body": 239, "ttl": 30}] * 10)
doc = json.dumps([{'body': 239, 'ttl': 30}] * 10)
env = testing.create_environ('/v1/480924/queues/fizbit/messages',
method="POST",
method='POST',
body=doc,
headers={'Client-ID': '30387f00'})
self.app(env, self.srmock)
def test_bad_claim(self):
env = testing.create_environ('/v1/480924/queues/fizbit/claims',
method="POST")
method='POST')
self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_400)
env = testing.create_environ('/v1/480924/queues/fizbit/claims',
method="POST", body='[')
method='POST', body='[')
self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_400)
env = testing.create_environ('/v1/480924/queues/fizbit/claims',
method="POST", body='{}')
method='POST', body='{}')
self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_400)
def test_bad_patch(self):
env = testing.create_environ('/v1/480924/queues/fizbit/claims',
method="POST",
body='{ "ttl": 10 }')
method='POST',
body='{"ttl": 10}')
self.app(env, self.srmock)
target = self.srmock.headers_dict['Location']
env = testing.create_environ(target, method="PATCH")
env = testing.create_environ(target, method='PATCH')
self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_400)
env = testing.create_environ(target, method="PATCH", body='{')
env = testing.create_environ(target, method='PATCH', body='{')
self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_400)
def test_lifecycle(self):
doc = '{ "ttl": 10 }'
doc = '{"ttl": 10}'
# claim some messages
env = testing.create_environ('/v1/480924/queues/fizbit/claims',
method="POST",
method='POST',
body=doc)
body = self.app(env, self.srmock)
@ -98,7 +98,7 @@ class ClaimsBaseTest(base.TestBase):
# no more messages to claim
env = testing.create_environ('/v1/480924/queues/fizbit/claims',
method="POST",
method='POST',
body=doc,
query_string='limit=3')
@ -107,7 +107,7 @@ class ClaimsBaseTest(base.TestBase):
# check its metadata
env = testing.create_environ(target, method="GET")
env = testing.create_environ(target, method='GET')
body = self.app(env, self.srmock)
st = json.loads(body[0])
@ -121,7 +121,7 @@ class ClaimsBaseTest(base.TestBase):
# delete a message with its associated claim
env = testing.create_environ(msg_target, query_string=params,
method="DELETE")
method='DELETE')
self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_204)
@ -134,15 +134,15 @@ class ClaimsBaseTest(base.TestBase):
# update the claim
env = testing.create_environ(target,
body='{ "ttl": 60 }',
method="PATCH")
body='{"ttl": 60}',
method='PATCH')
self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_204)
# get the claimed messages again
env = testing.create_environ(target, method="GET")
env = testing.create_environ(target, method='GET')
body = self.app(env, self.srmock)
st = json.loads(body[0])
@ -152,7 +152,7 @@ class ClaimsBaseTest(base.TestBase):
# delete the claim
env = testing.create_environ(st['href'], method="DELETE")
env = testing.create_environ(st['href'], method='DELETE')
self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_204)
@ -160,7 +160,7 @@ class ClaimsBaseTest(base.TestBase):
# can not delete a message with a non-existing claim
env = testing.create_environ(msg_target, query_string=params,
method="DELETE")
method='DELETE')
self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_403)
@ -172,27 +172,27 @@ class ClaimsBaseTest(base.TestBase):
# get & update a non existing claim
env = testing.create_environ(st['href'], method="GET")
env = testing.create_environ(st['href'], method='GET')
body = self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_404)
env = testing.create_environ(st['href'], method="PATCH", body=doc)
env = testing.create_environ(st['href'], method='PATCH', body=doc)
body = self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_404)
def test_nonexistent(self):
doc = '{ "ttl": 10 }'
doc = '{"ttl": 10}'
env = testing.create_environ('/v1/480924/queues/nonexistent/claims',
method="POST", body=doc)
method='POST', body=doc)
self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_404)
def tearDown(self):
env = testing.create_environ('/v1/480924/queues/fizbit',
method="DELETE")
method='DELETE')
self.app(env, self.srmock)
super(ClaimsBaseTest, self).tearDown()
@ -203,11 +203,11 @@ class ClaimsMongoDBTests(ClaimsBaseTest):
config_filename = 'wsgi_mongodb.conf'
def setUp(self):
if not os.environ.get("MONGODB_TEST_LIVE"):
self.skipTest("No MongoDB instance running")
if not os.environ.get('MONGODB_TEST_LIVE'):
self.skipTest('No MongoDB instance running')
super(ClaimsMongoDBTests, self).setUp()
self.cfg = config.namespace("drivers:storage:mongodb").from_options()
self.cfg = config.namespace('drivers:storage:mongodb').from_options()
def tearDown(self):
conn = pymongo.MongoClient(self.cfg.uri)
@ -225,9 +225,9 @@ class ClaimsFaultyDriverTests(base.TestBaseFaulty):
config_filename = 'wsgi_faulty.conf'
def test_simple(self):
doc = '{ "ttl": 100 }'
doc = '{"ttl": 100}'
env = testing.create_environ('/v1/480924/queues/fizbit/claims',
method="POST",
method='POST',
body=doc)
self.app(env, self.srmock)
@ -235,14 +235,14 @@ class ClaimsFaultyDriverTests(base.TestBaseFaulty):
env = testing.create_environ('/v1/480924/queues/fizbit/claims'
'/nonexistent',
method="GET")
method='GET')
self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_503)
env = testing.create_environ('/v1/480924/queues/fizbit/claims'
'/nonexistent',
method="PATCH",
method='PATCH',
body=doc)
self.app(env, self.srmock)
@ -250,7 +250,7 @@ class ClaimsFaultyDriverTests(base.TestBaseFaulty):
env = testing.create_environ('/v1/480924/queues/fizbit/claims'
'/nonexistent',
method="DELETE")
method='DELETE')
self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_503)

View File

@ -27,9 +27,9 @@ class MessagesBaseTest(base.TestBase):
def setUp(self):
super(MessagesBaseTest, self).setUp()
doc = '{ "_ttl": 60 }'
doc = '{"_ttl": 60}'
env = testing.create_environ('/v1/480924/queues/fizbit',
method="PUT", body=doc)
method='PUT', body=doc)
self.app(env, self.srmock)
self.headers = {
@ -38,23 +38,23 @@ class MessagesBaseTest(base.TestBase):
def tearDown(self):
env = testing.create_environ('/v1/480924/queues/fizbit',
method="DELETE")
method='DELETE')
self.app(env, self.srmock)
super(MessagesBaseTest, self).tearDown()
def test_post(self):
doc = '''
doc = """
[
{"body": 239, "ttl": 10},
{"body": {"key": "value"}, "ttl": 20},
{"body": [1, 3], "ttl": 30}
]
'''
"""
path = '/v1/480924/queues/fizbit/messages'
env = testing.create_environ(path,
method="POST",
method='POST',
body=doc,
headers=self.headers)
@ -76,7 +76,7 @@ class MessagesBaseTest(base.TestBase):
for msg_id in msg_ids:
message_uri = path + '/' + msg_id
env = testing.create_environ(message_uri, method="GET")
env = testing.create_environ(message_uri, method='GET')
body = self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_200)
@ -92,14 +92,14 @@ class MessagesBaseTest(base.TestBase):
def test_post_bad_message(self):
env = testing.create_environ('/v1/480924/queues/fizbit/messages',
method="POST",
method='POST',
headers=self.headers)
self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_400)
env = testing.create_environ('/v1/480924/queues/fizbit/messages',
method="POST",
method='POST',
body='[',
headers=self.headers)
@ -107,7 +107,7 @@ class MessagesBaseTest(base.TestBase):
self.assertEquals(self.srmock.status, falcon.HTTP_400)
env = testing.create_environ('/v1/480924/queues/fizbit/messages',
method="POST",
method='POST',
body='[]',
headers=self.headers)
@ -115,7 +115,7 @@ class MessagesBaseTest(base.TestBase):
self.assertEquals(self.srmock.status, falcon.HTTP_400)
env = testing.create_environ('/v1/480924/queues/fizbit/messages',
method="POST",
method='POST',
body='{}',
headers=self.headers)
@ -127,19 +127,19 @@ class MessagesBaseTest(base.TestBase):
[msg_id] = self._get_msg_ids(self.srmock.headers_dict)
env = testing.create_environ('/v1/480924/queues/fizbit/messages/'
+ msg_id, method="GET")
+ msg_id, method='GET')
self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_200)
env = testing.create_environ('/v1/480924/queues/fizbit/messages/'
+ msg_id, method="DELETE")
+ msg_id, method='DELETE')
self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_204)
env = testing.create_environ('/v1/480924/queues/fizbit/messages/'
+ msg_id, method="GET")
+ msg_id, method='GET')
self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_404)
@ -203,23 +203,23 @@ class MessagesBaseTest(base.TestBase):
def test_no_uuid(self):
env = testing.create_environ('/v1/480924/queues/fizbit/messages',
method="POST",
method='POST',
body='[{"body": 0, "ttl": 0}]')
self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_400)
env = testing.create_environ('/v1/480924/queues/fizbit/messages',
method="GET")
method='GET')
self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_400)
def _post_messages(self, target, repeat=1):
doc = json.dumps([{"body": 239, "ttl": 30}] * repeat)
doc = json.dumps([{'body': 239, 'ttl': 30}] * repeat)
env = testing.create_environ(target,
method="POST",
method='POST',
body=doc,
headers=self.headers)
self.app(env, self.srmock)
@ -238,8 +238,8 @@ class MessagesMongoDBTests(MessagesBaseTest):
config_filename = 'wsgi_mongodb.conf'
def setUp(self):
if not os.environ.get("MONGODB_TEST_LIVE"):
self.skipTest("No MongoDB instance running")
if not os.environ.get('MONGODB_TEST_LIVE'):
self.skipTest('No MongoDB instance running')
super(MessagesMongoDBTests, self).setUp()
@ -255,7 +255,7 @@ class MessagesFaultyDriverTests(base.TestBaseFaulty):
}
env = testing.create_environ('/v1/480924/queues/fizbit/messages',
method="POST",
method='POST',
body=doc,
headers=headers)
@ -263,7 +263,7 @@ class MessagesFaultyDriverTests(base.TestBaseFaulty):
self.assertEquals(self.srmock.status, falcon.HTTP_503)
env = testing.create_environ('/v1/480924/queues/fizbit/messages',
method="GET",
method='GET',
headers=headers)
self.app(env, self.srmock)
@ -271,14 +271,14 @@ class MessagesFaultyDriverTests(base.TestBaseFaulty):
env = testing.create_environ('/v1/480924/queues/fizbit/messages'
'/nonexistent',
method="GET")
method='GET')
self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_503)
env = testing.create_environ('/v1/480924/queues/fizbit/messages'
'/nonexistent',
method="DELETE")
method='DELETE')
self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_503)

View File

@ -35,7 +35,7 @@ class QueueLifecycleBaseTest(base.TestBase):
# Create
env = testing.create_environ('/v1/480924/queues/gumshoe',
method="PUT", body=doc)
method='PUT', body=doc)
self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_201)
@ -51,7 +51,7 @@ class QueueLifecycleBaseTest(base.TestBase):
# Delete
env = testing.create_environ('/v1/480924/queues/gumshoe',
method="DELETE")
method='DELETE')
self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_204)
@ -63,22 +63,22 @@ class QueueLifecycleBaseTest(base.TestBase):
self.assertEquals(self.srmock.status, falcon.HTTP_404)
def test_no_metadata(self):
env = testing.create_environ('/v1/480924/queues/fizbat', method="PUT")
env = testing.create_environ('/v1/480924/queues/fizbat', method='PUT')
self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_400)
def test_bad_metadata(self):
env = testing.create_environ('/v1/480924/queues/fizbat',
body="{",
method="PUT")
body='{',
method='PUT')
self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_400)
env = testing.create_environ('/v1/480924/queues/fizbat',
body="[]",
method="PUT")
body='[]',
method='PUT')
self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_400)
@ -88,7 +88,7 @@ class QueueLifecycleBaseTest(base.TestBase):
padding_len = transport.MAX_QUEUE_METADATA_SIZE - (len(doc) - 2) + 1
doc = doc % ('x' * padding_len)
env = testing.create_environ('/v1/480924/queues/fizbat',
method="PUT", body=doc)
method='PUT', body=doc)
self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_400)
@ -98,7 +98,7 @@ class QueueLifecycleBaseTest(base.TestBase):
padding_len = transport.MAX_QUEUE_METADATA_SIZE * 100
doc = doc % ('x' * padding_len)
env = testing.create_environ('/v1/480924/queues/gumshoe',
method="PUT", body=doc)
method='PUT', body=doc)
self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_400)
@ -109,7 +109,7 @@ class QueueLifecycleBaseTest(base.TestBase):
padding_len = transport.MAX_QUEUE_METADATA_SIZE - (len(doc) - 2)
doc = doc % ('x' * padding_len)
env = testing.create_environ('/v1/480924/queues/gumshoe',
method="PUT", body=doc)
method='PUT', body=doc)
self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_201)
@ -124,7 +124,7 @@ class QueueLifecycleBaseTest(base.TestBase):
# Create
doc1 = '{"messages": {"ttl": 600}}'
env = testing.create_environ('/v1/480924/queues/xyz',
method="PUT", body=doc1)
method='PUT', body=doc1)
self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_201)
@ -132,7 +132,7 @@ class QueueLifecycleBaseTest(base.TestBase):
# Update
doc2 = '{"messages": {"ttl": 100}}'
env = testing.create_environ('/v1/480924/queues/xyz',
method="PUT", body=doc2)
method='PUT', body=doc2)
self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_204)
@ -155,18 +155,18 @@ class QueueLifecycleBaseTest(base.TestBase):
# Create some
env = testing.create_environ('/v1/480924/queues/q1',
method="PUT",
body='{ "_ttl": 30 }')
method='PUT',
body='{"_ttl": 30 }')
self.app(env, self.srmock)
env = testing.create_environ('/v1/480924/queues/q2',
method="PUT",
method='PUT',
body='{}')
self.app(env, self.srmock)
env = testing.create_environ('/v1/480924/queues/q3',
method="PUT",
body='{ "_ttl": 30 }')
method='PUT',
body='{"_ttl": 30 }')
self.app(env, self.srmock)
# List
@ -213,11 +213,11 @@ class QueueLifecycleMongoDBTests(QueueLifecycleBaseTest):
config_filename = 'wsgi_mongodb.conf'
def setUp(self):
if not os.environ.get("MONGODB_TEST_LIVE"):
self.skipTest("No MongoDB instance running")
if not os.environ.get('MONGODB_TEST_LIVE'):
self.skipTest('No MongoDB instance running')
super(QueueLifecycleMongoDBTests, self).setUp()
self.cfg = config.namespace("drivers:storage:mongodb").from_options()
self.cfg = config.namespace('drivers:storage:mongodb').from_options()
def tearDown(self):
conn = pymongo.MongoClient(self.cfg.uri)
@ -237,7 +237,7 @@ class QueueFaultyDriverTests(base.TestBaseFaulty):
def test_simple(self):
doc = '{"messages": {"ttl": 600}}'
env = testing.create_environ('/v1/480924/queues/gumshoe',
method="PUT", body=doc)
method='PUT', body=doc)
self.app(env, self.srmock)
self.assertEquals(self.srmock.status, falcon.HTTP_503)

View File

@ -44,7 +44,7 @@ class TestBase(testtools.TestCase):
"""Returns the full path to the specified Marconi conf file.
:param filename: Name of the conf file to find (e.g.,
"wsgi_memory.conf")
'wsgi_memory.conf')
"""
parent = os.path.dirname(self._my_dir())
@ -54,7 +54,7 @@ class TestBase(testtools.TestCase):
"""Loads `filename` configuration file.
:param filename: Name of the conf file to find (e.g.,
"wsgi_memory.conf")
'wsgi_memory.conf')
:returns: Project's config object.
"""

View File

@ -4,10 +4,10 @@ from marconi.common import config
from marconi.transport import base
OPTIONS = {
"auth_strategy": ""
'auth_strategy': ""
}
cfg = config.project('marconi').from_options(**OPTIONS)
CFG = config.project('marconi').from_options(**OPTIONS)
MAX_QUEUE_METADATA_SIZE = 64 * 1024
"""Maximum metadata size per queue when serialized as JSON"""

View File

@ -40,7 +40,7 @@ class KeystoneAuth(object):
return auth_token.AuthProtocol(app, conf=conf)
STRATEGIES["keystone"] = KeystoneAuth
STRATEGIES['keystone'] = KeystoneAuth
def strategy(strategy):

View File

@ -52,7 +52,7 @@ class CollectionResource(object):
**claim_options)
# Buffer claimed messages
#TODO(kgriffs): optimize, along with serialization (below)
# TODO(kgriffs): optimize, along with serialization (below)
resp_msgs = list(msgs)
except storage_exceptions.DoesNotExist:
@ -93,7 +93,7 @@ class ItemResource(object):
project=project_id)
# Buffer claimed messages
#TODO(kgriffs): Optimize along with serialization (see below)
# TODO(kgriffs): Optimize along with serialization (see below)
meta['messages'] = list(msgs)
except storage_exceptions.DoesNotExist:
@ -104,7 +104,7 @@ class ItemResource(object):
raise wsgi_exceptions.HTTPServiceUnavailable(description)
# Serialize claimed messages
#TODO(kgriffs): Optimize
# TODO(kgriffs): Optimize
for msg in meta['messages']:
msg['href'] = _msg_uri_from_claim(
req.path.rsplit('/', 2)[0], msg['id'], meta['id'])
@ -152,7 +152,7 @@ class ItemResource(object):
raise wsgi_exceptions.HTTPServiceUnavailable(description)
#TODO(kgriffs): Clean up/optimize and move to wsgi.helpers
# TODO(kgriffs): Clean up/optimize and move to wsgi.helpers
def _msg_uri_from_claim(base_path, msg_id, claim_id):
return '/'.join(
[base_path, 'messages', msg_id]

View File

@ -30,9 +30,9 @@ OPTIONS = {
'port': 8888
}
pconfig = config.project('marconi')
gcfg = pconfig.from_options()
lcfg = config.namespace('drivers:transport:wsgi').from_options(**OPTIONS)
PROJECT_CFG = config.project('marconi')
GLOBAL_CFG = PROJECT_CFG.from_options()
WSGI_CFG = config.namespace('drivers:transport:wsgi').from_options(**OPTIONS)
LOG = logging.getLogger(__name__)
@ -77,13 +77,14 @@ class Driver(transport.DriverBase):
'/claims/{claim_id}', claim_item)
# NOTE(flaper87): Install Auth
if gcfg.auth_strategy:
strategy = auth.strategy(gcfg.auth_strategy)
self.app = strategy.install(self.app, pconfig.conf)
if GLOBAL_CFG.auth_strategy:
strategy = auth.strategy(GLOBAL_CFG.auth_strategy)
self.app = strategy.install(self.app, PROJECT_CFG.conf)
def listen(self):
msg = _("Serving on host %(bind)s:%(port)s") % {"bind": lcfg.bind,
"port": lcfg.port}
msg = _('Serving on host %(bind)s:%(port)s')
msg %= {'bind': WSGI_CFG.bind, 'port': WSGI_CFG.port}
LOG.debug(msg)
httpd = simple_server.make_server(lcfg.bind, lcfg.port, self.app)
httpd = simple_server.make_server(WSGI_CFG.bind, WSGI_CFG.port,
self.app)
httpd.serve_forever()

View File

@ -29,7 +29,7 @@ JSONArray = list
LOG = logging.getLogger(__name__)
#TODO(kgriffs): Consider moving this to Falcon and/or Oslo
# TODO(kgriffs): Consider moving this to Falcon and/or Oslo
def filter_stream(stream, len, spec, doctype=JSONObject):
"""Reads, deserializes, and validates a document from a stream.
@ -52,7 +52,7 @@ def filter_stream(stream, len, spec, doctype=JSONObject):
"""
try:
#TODO(kgriffs): read_json should stream the resulting list
# TODO(kgriffs): read_json should stream the resulting list
# of messages, returning a generator rather than buffering
# everything in memory (bp/streaming-serialization).
document = helpers.read_json(stream, len)
@ -82,10 +82,10 @@ def filter_stream(stream, len, spec, doctype=JSONObject):
# streaming JSON deserializer (see above.git )
return (filter(obj, spec) for obj in document)
raise ValueError("doctype not in (JSONObject, JSONArray)")
raise ValueError('doctype not in (JSONObject, JSONArray)')
#TODO(kgriffs): Consider moving this to Falcon and/or Oslo
# TODO(kgriffs): Consider moving this to Falcon and/or Oslo
def filter(document, spec):
"""Validates and retrieves typed fields from a single document.
@ -111,7 +111,7 @@ def filter(document, spec):
return filtered
#TODO(kgriffs): Consider moving this to Falcon and/or Oslo
# TODO(kgriffs): Consider moving this to Falcon and/or Oslo
def get_checked_field(document, name, value_type):
"""Validates and retrieves a typed field from a document.

View File

@ -74,7 +74,7 @@ class CollectionResource(object):
message_ids = ex.succeeded_ids
if not message_ids:
#TODO(kgriffs): Include error code that is different
# TODO(kgriffs): Include error code that is different
# from the code used in the generic case, below.
description = _('No messages could be enqueued.')
raise wsgi_exceptions.HTTPServiceUnavailable(description)
@ -90,13 +90,13 @@ class CollectionResource(object):
resp.location = req.path + '/' + resource
hrefs = [req.path + '/' + id for id in message_ids]
body = {"resources": hrefs, "partial": partial}
body = {'resources': hrefs, 'partial': partial}
resp.body = helpers.to_json(body)
def on_get(self, req, resp, project_id, queue_name):
uuid = req.get_header('Client-ID', required=True)
#TODO(kgriffs): Optimize
# TODO(kgriffs): Optimize
kwargs = helpers.purge({
'marker': req.get_param('marker'),
'limit': req.get_param_as_int('limit'),

View File

@ -33,7 +33,7 @@ class ItemResource(object):
self.queue_controller = queue_controller
def on_put(self, req, resp, project_id, queue_name):
#TODO(kgriffs): Migrate this check to input validator middleware
# TODO(kgriffs): Migrate this check to input validator middleware
if req.content_length > transport.MAX_QUEUE_METADATA_SIZE:
description = _('Queue metadata size is too large.')
raise wsgi_exceptions.HTTPBadRequestBody(description)
@ -102,7 +102,7 @@ class CollectionResource(object):
self.queue_controller = queue_controller
def on_get(self, req, resp, project_id):
#TODO(kgriffs): Optimize
# TODO(kgriffs): Optimize
kwargs = helpers.purge({
'marker': req.get_param('marker'),
'limit': req.get_param_as_int('limit'),