2013-09-20 01:00:54 +08:00
|
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
|
2013-09-07 16:29:15 +02:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
|
|
# implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
""" Swift tests """
|
|
|
|
|
2010-11-11 16:41:07 -06:00
|
|
|
import os
|
2012-04-30 16:38:15 -04:00
|
|
|
import copy
|
Adding StatsD logging to Swift.
Documentation, including a list of metrics reported and their semantics,
is in the Admin Guide in a new section, "Reporting Metrics to StatsD".
An optional "metric prefix" may be configured which will be prepended to
every metric name sent to StatsD.
Here is the rationale for doing a deep integration like this versus only
sending metrics to StatsD in middleware. It's the only way to report
some internal activities of Swift in a real-time manner. So to have one
way of reporting to StatsD and one place/style of configuration, even
some things (like, say, timing of PUT requests into the proxy-server)
which could be logged via middleware are consistently logged the same
way (deep integration via the logger delegate methods).
When log_statsd_host is configured, get_logger() injects a
swift.common.utils.StatsdClient object into the logger as
logger.statsd_client. Then a set of delegate methods on LogAdapter
either pass through to the StatsdClient object or become no-ops. This
allows StatsD logging to look like:
self.logger.increment('some.metric.here')
and do the right thing in all cases and with no messy conditional logic.
I wanted to use the pystatsd module for the StatsD client, but the
version on PyPi is lagging the git repo (and is missing both the prefix
functionality and timing_since() method). So I wrote my
swift.common.utils.StatsdClient. The interface is the same as
pystatsd.Client, but the code was written from scratch. It's pretty
simple, and the tests I added cover it. This also frees Swift from an
optional dependency on the pystatsd module, making this feature easier
to enable.
There's test coverage for the new code and all existing tests continue
to pass.
Refactored out _one_audit_pass() method in swift/account/auditor.py and
swift/container/auditor.py.
Fixed some misc. PEP8 violations.
Misc test cleanups and refactorings (particularly the way "fake logging"
is handled).
Change-Id: Ie968a9ae8771f59ee7591e2ae11999c44bfe33b2
2012-04-01 16:47:08 -07:00
|
|
|
import logging
|
2013-11-18 14:24:55 -08:00
|
|
|
import errno
|
2013-11-18 11:41:58 -08:00
|
|
|
import sys
|
2014-04-28 19:22:51 -07:00
|
|
|
from contextlib import contextmanager, closing
|
2014-03-17 12:18:25 -07:00
|
|
|
from collections import defaultdict, Iterable
|
2014-05-27 16:57:25 -07:00
|
|
|
from numbers import Number
|
2010-11-11 16:41:07 -06:00
|
|
|
from tempfile import NamedTemporaryFile
|
2013-08-30 21:37:07 -07:00
|
|
|
import time
|
2010-07-12 17:03:45 -05:00
|
|
|
from eventlet.green import socket
|
2011-02-11 13:18:19 -06:00
|
|
|
from tempfile import mkdtemp
|
|
|
|
from shutil import rmtree
|
2012-04-30 16:38:15 -04:00
|
|
|
from test import get_config
|
2015-01-08 20:29:47 -08:00
|
|
|
from swift.common import swob, utils
|
2014-04-28 19:22:51 -07:00
|
|
|
from swift.common.ring import Ring, RingData
|
2013-03-20 19:26:45 -07:00
|
|
|
from hashlib import md5
|
2013-03-26 20:42:26 +00:00
|
|
|
from eventlet import sleep, Timeout
|
2012-04-30 16:38:15 -04:00
|
|
|
import logging.handlers
|
2013-03-26 20:42:26 +00:00
|
|
|
from httplib import HTTPException
|
2014-03-17 12:18:25 -07:00
|
|
|
from swift.common import storage_policy
|
|
|
|
import functools
|
2014-04-28 19:22:51 -07:00
|
|
|
import cPickle as pickle
|
|
|
|
from gzip import GzipFile
|
|
|
|
import mock as mocklib
|
2014-03-17 12:18:25 -07:00
|
|
|
|
2015-01-08 20:29:47 -08:00
|
|
|
# try not to import this module from swift
|
|
|
|
if not os.path.basename(sys.argv[0]).startswith('swift'):
|
|
|
|
# never patch HASH_PATH_SUFFIX AGAIN!
|
|
|
|
utils.HASH_PATH_SUFFIX = 'endcap'
|
|
|
|
|
2014-03-17 12:18:25 -07:00
|
|
|
|
|
|
|
def patch_policies(thing_or_policies=None, legacy_only=False):
|
|
|
|
if legacy_only:
|
2014-06-23 16:01:02 -07:00
|
|
|
default_policies = [storage_policy.StoragePolicy(
|
|
|
|
0, 'legacy', True, object_ring=FakeRing())]
|
2014-03-17 12:18:25 -07:00
|
|
|
else:
|
2014-06-23 16:01:02 -07:00
|
|
|
default_policies = [
|
|
|
|
storage_policy.StoragePolicy(
|
|
|
|
0, 'nulo', True, object_ring=FakeRing()),
|
|
|
|
storage_policy.StoragePolicy(
|
|
|
|
1, 'unu', object_ring=FakeRing()),
|
|
|
|
]
|
2014-03-17 12:18:25 -07:00
|
|
|
|
|
|
|
thing_or_policies = thing_or_policies or default_policies
|
|
|
|
|
|
|
|
if isinstance(thing_or_policies, (
|
|
|
|
Iterable, storage_policy.StoragePolicyCollection)):
|
|
|
|
return PatchPolicies(thing_or_policies)
|
|
|
|
else:
|
|
|
|
# it's a thing!
|
|
|
|
return PatchPolicies(default_policies)(thing_or_policies)
|
|
|
|
|
|
|
|
|
|
|
|
class PatchPolicies(object):
|
|
|
|
"""
|
|
|
|
Why not mock.patch? In my case, when used as a decorator on the class it
|
|
|
|
seemed to patch setUp at the wrong time (i.e. in setup the global wasn't
|
|
|
|
patched yet)
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, policies):
|
|
|
|
if isinstance(policies, storage_policy.StoragePolicyCollection):
|
|
|
|
self.policies = policies
|
|
|
|
else:
|
|
|
|
self.policies = storage_policy.StoragePolicyCollection(policies)
|
|
|
|
|
|
|
|
def __call__(self, thing):
|
|
|
|
if isinstance(thing, type):
|
|
|
|
return self._patch_class(thing)
|
|
|
|
else:
|
|
|
|
return self._patch_method(thing)
|
|
|
|
|
|
|
|
def _patch_class(self, cls):
|
|
|
|
|
|
|
|
class NewClass(cls):
|
|
|
|
|
|
|
|
already_patched = False
|
|
|
|
|
|
|
|
def setUp(cls_self):
|
|
|
|
self._orig_POLICIES = storage_policy._POLICIES
|
|
|
|
if not cls_self.already_patched:
|
|
|
|
storage_policy._POLICIES = self.policies
|
|
|
|
cls_self.already_patched = True
|
|
|
|
super(NewClass, cls_self).setUp()
|
|
|
|
|
|
|
|
def tearDown(cls_self):
|
|
|
|
super(NewClass, cls_self).tearDown()
|
|
|
|
storage_policy._POLICIES = self._orig_POLICIES
|
|
|
|
|
|
|
|
NewClass.__name__ = cls.__name__
|
|
|
|
return NewClass
|
|
|
|
|
|
|
|
def _patch_method(self, f):
|
|
|
|
@functools.wraps(f)
|
|
|
|
def mywrapper(*args, **kwargs):
|
|
|
|
self._orig_POLICIES = storage_policy._POLICIES
|
|
|
|
try:
|
|
|
|
storage_policy._POLICIES = self.policies
|
|
|
|
return f(*args, **kwargs)
|
|
|
|
finally:
|
|
|
|
storage_policy._POLICIES = self._orig_POLICIES
|
|
|
|
return mywrapper
|
|
|
|
|
|
|
|
def __enter__(self):
|
|
|
|
self._orig_POLICIES = storage_policy._POLICIES
|
|
|
|
storage_policy._POLICIES = self.policies
|
|
|
|
|
|
|
|
def __exit__(self, *args):
|
|
|
|
storage_policy._POLICIES = self._orig_POLICIES
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-31 22:36:58 -04:00
|
|
|
|
2014-04-28 19:22:51 -07:00
|
|
|
class FakeRing(Ring):
|
2013-03-30 15:55:29 +03:00
|
|
|
|
2014-11-13 16:40:05 -08:00
|
|
|
def __init__(self, replicas=3, max_more_nodes=0, part_power=0,
|
|
|
|
base_port=1000):
|
2014-04-28 19:22:51 -07:00
|
|
|
"""
|
|
|
|
:param part_power: make part calculation based on the path
|
|
|
|
|
|
|
|
If you set a part_power when you setup your FakeRing the parts you get
|
|
|
|
out of ring methods will actually be based on the path - otherwise we
|
|
|
|
exercise the real ring code, but ignore the result and return 1.
|
|
|
|
"""
|
2014-11-13 16:40:05 -08:00
|
|
|
self._base_port = base_port
|
|
|
|
self.max_more_nodes = max_more_nodes
|
|
|
|
self._part_shift = 32 - part_power
|
2013-03-30 15:55:29 +03:00
|
|
|
# 9 total nodes (6 more past the initial 3) is the cap, no matter if
|
|
|
|
# this is set higher, or R^2 for R replicas
|
2014-04-28 19:22:51 -07:00
|
|
|
self.set_replicas(replicas)
|
|
|
|
self._reload()
|
|
|
|
|
|
|
|
def _reload(self):
|
|
|
|
self._rtime = time.time()
|
|
|
|
|
2013-03-30 15:55:29 +03:00
|
|
|
def set_replicas(self, replicas):
|
|
|
|
self.replicas = replicas
|
2014-04-28 19:22:51 -07:00
|
|
|
self._devs = []
|
|
|
|
for x in range(self.replicas):
|
|
|
|
ip = '10.0.0.%s' % x
|
2014-11-13 16:40:05 -08:00
|
|
|
port = self._base_port + x
|
2014-04-28 19:22:51 -07:00
|
|
|
self._devs.append({
|
|
|
|
'ip': ip,
|
|
|
|
'replication_ip': ip,
|
|
|
|
'port': port,
|
|
|
|
'replication_port': port,
|
|
|
|
'device': 'sd' + (chr(ord('a') + x)),
|
|
|
|
'zone': x % 3,
|
|
|
|
'region': x % 2,
|
|
|
|
'id': x,
|
|
|
|
})
|
2013-03-30 15:55:29 +03:00
|
|
|
|
|
|
|
@property
|
|
|
|
def replica_count(self):
|
|
|
|
return self.replicas
|
|
|
|
|
2014-04-28 19:22:51 -07:00
|
|
|
def _get_part_nodes(self, part):
|
|
|
|
return list(self._devs)
|
2013-03-30 15:55:29 +03:00
|
|
|
|
2013-06-13 11:24:29 -07:00
|
|
|
def get_more_nodes(self, part):
|
2013-03-30 15:55:29 +03:00
|
|
|
# replicas^2 is the true cap
|
|
|
|
for x in xrange(self.replicas, min(self.replicas + self.max_more_nodes,
|
|
|
|
self.replicas * self.replicas)):
|
2013-06-13 11:24:29 -07:00
|
|
|
yield {'ip': '10.0.0.%s' % x,
|
2014-11-13 16:40:05 -08:00
|
|
|
'port': self._base_port + x,
|
2013-06-13 11:24:29 -07:00
|
|
|
'device': 'sda',
|
|
|
|
'zone': x % 3,
|
|
|
|
'region': x % 2,
|
|
|
|
'id': x}
|
2013-03-30 15:55:29 +03:00
|
|
|
|
|
|
|
|
2014-04-28 19:22:51 -07:00
|
|
|
def write_fake_ring(path, *devs):
|
|
|
|
"""
|
|
|
|
Pretty much just a two node, two replica, 2 part power ring...
|
|
|
|
"""
|
|
|
|
dev1 = {'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
|
|
|
|
'port': 6000}
|
|
|
|
dev2 = {'id': 0, 'zone': 0, 'device': 'sdb1', 'ip': '127.0.0.1',
|
|
|
|
'port': 6000}
|
|
|
|
|
|
|
|
dev1_updates, dev2_updates = devs or ({}, {})
|
|
|
|
|
|
|
|
dev1.update(dev1_updates)
|
|
|
|
dev2.update(dev2_updates)
|
|
|
|
|
|
|
|
replica2part2dev_id = [[0, 1, 0, 1], [1, 0, 1, 0]]
|
|
|
|
devs = [dev1, dev2]
|
|
|
|
part_shift = 30
|
|
|
|
with closing(GzipFile(path, 'wb')) as f:
|
|
|
|
pickle.dump(RingData(replica2part2dev_id, devs, part_shift), f)
|
|
|
|
|
|
|
|
|
2013-03-30 15:55:29 +03:00
|
|
|
class FakeMemcache(object):
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
self.store = {}
|
|
|
|
|
|
|
|
def get(self, key):
|
|
|
|
return self.store.get(key)
|
|
|
|
|
|
|
|
def keys(self):
|
|
|
|
return self.store.keys()
|
|
|
|
|
|
|
|
def set(self, key, value, time=0):
|
|
|
|
self.store[key] = value
|
|
|
|
return True
|
|
|
|
|
|
|
|
def incr(self, key, time=0):
|
|
|
|
self.store[key] = self.store.setdefault(key, 0) + 1
|
|
|
|
return self.store[key]
|
|
|
|
|
|
|
|
@contextmanager
|
|
|
|
def soft_lock(self, key, timeout=0, retries=5):
|
|
|
|
yield True
|
|
|
|
|
|
|
|
def delete(self, key):
|
|
|
|
try:
|
|
|
|
del self.store[key]
|
|
|
|
except Exception:
|
|
|
|
pass
|
|
|
|
return True
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def readuntil2crlfs(fd):
|
|
|
|
rv = ''
|
|
|
|
lc = ''
|
|
|
|
crlfs = 0
|
|
|
|
while crlfs < 2:
|
|
|
|
c = fd.read(1)
|
2013-04-24 14:01:56 -07:00
|
|
|
if not c:
|
|
|
|
raise ValueError("didn't get two CRLFs; just got %r" % rv)
|
2010-07-12 17:03:45 -05:00
|
|
|
rv = rv + c
|
|
|
|
if c == '\r' and lc != '\n':
|
|
|
|
crlfs = 0
|
|
|
|
if lc == '\r' and c == '\n':
|
|
|
|
crlfs += 1
|
|
|
|
lc = c
|
|
|
|
return rv
|
|
|
|
|
|
|
|
|
|
|
|
def connect_tcp(hostport):
|
|
|
|
rv = socket.socket()
|
|
|
|
rv.connect(hostport)
|
|
|
|
return rv
|
2010-07-29 13:30:16 -05:00
|
|
|
|
2010-11-11 16:41:07 -06:00
|
|
|
|
|
|
|
@contextmanager
|
|
|
|
def tmpfile(content):
|
|
|
|
with NamedTemporaryFile('w', delete=False) as f:
|
|
|
|
file_name = f.name
|
|
|
|
f.write(str(content))
|
|
|
|
try:
|
|
|
|
yield file_name
|
|
|
|
finally:
|
|
|
|
os.unlink(file_name)
|
|
|
|
|
2011-01-19 14:18:37 -06:00
|
|
|
xattr_data = {}
|
|
|
|
|
2011-01-19 16:19:43 -06:00
|
|
|
|
2011-01-19 14:18:37 -06:00
|
|
|
def _get_inode(fd):
|
|
|
|
if not isinstance(fd, int):
|
|
|
|
try:
|
|
|
|
fd = fd.fileno()
|
|
|
|
except AttributeError:
|
|
|
|
return os.stat(fd).st_ino
|
|
|
|
return os.fstat(fd).st_ino
|
|
|
|
|
2011-01-19 16:19:43 -06:00
|
|
|
|
2011-01-19 14:18:37 -06:00
|
|
|
def _setxattr(fd, k, v):
|
|
|
|
inode = _get_inode(fd)
|
|
|
|
data = xattr_data.get(inode, {})
|
|
|
|
data[k] = v
|
|
|
|
xattr_data[inode] = data
|
|
|
|
|
2011-01-19 16:19:43 -06:00
|
|
|
|
2011-01-19 14:18:37 -06:00
|
|
|
def _getxattr(fd, k):
|
|
|
|
inode = _get_inode(fd)
|
|
|
|
data = xattr_data.get(inode, {}).get(k)
|
|
|
|
if not data:
|
2013-11-18 14:24:55 -08:00
|
|
|
raise IOError(errno.ENODATA, "Fake IOError")
|
2011-01-19 14:18:37 -06:00
|
|
|
return data
|
|
|
|
|
|
|
|
import xattr
|
|
|
|
xattr.setxattr = _setxattr
|
|
|
|
xattr.getxattr = _getxattr
|
|
|
|
|
2010-11-11 16:41:07 -06:00
|
|
|
|
2011-02-11 13:18:19 -06:00
|
|
|
@contextmanager
|
|
|
|
def temptree(files, contents=''):
|
|
|
|
# generate enough contents to fill the files
|
|
|
|
c = len(files)
|
|
|
|
contents = (list(contents) + [''] * c)[:c]
|
|
|
|
tempdir = mkdtemp()
|
|
|
|
for path, content in zip(files, contents):
|
|
|
|
if os.path.isabs(path):
|
|
|
|
path = '.' + path
|
|
|
|
new_path = os.path.join(tempdir, path)
|
|
|
|
subdir = os.path.dirname(new_path)
|
|
|
|
if not os.path.exists(subdir):
|
|
|
|
os.makedirs(subdir)
|
|
|
|
with open(new_path, 'w') as f:
|
|
|
|
f.write(str(content))
|
|
|
|
try:
|
|
|
|
yield tempdir
|
|
|
|
finally:
|
|
|
|
rmtree(tempdir)
|
|
|
|
|
|
|
|
|
2014-05-27 16:57:25 -07:00
|
|
|
def with_tempdir(f):
|
|
|
|
"""
|
|
|
|
Decorator to give a single test a tempdir as argument to test method.
|
|
|
|
"""
|
|
|
|
@functools.wraps(f)
|
|
|
|
def wrapped(*args, **kwargs):
|
|
|
|
tempdir = mkdtemp()
|
|
|
|
args = list(args)
|
|
|
|
args.append(tempdir)
|
|
|
|
try:
|
|
|
|
return f(*args, **kwargs)
|
|
|
|
finally:
|
|
|
|
rmtree(tempdir)
|
|
|
|
return wrapped
|
|
|
|
|
|
|
|
|
Adding StatsD logging to Swift.
Documentation, including a list of metrics reported and their semantics,
is in the Admin Guide in a new section, "Reporting Metrics to StatsD".
An optional "metric prefix" may be configured which will be prepended to
every metric name sent to StatsD.
Here is the rationale for doing a deep integration like this versus only
sending metrics to StatsD in middleware. It's the only way to report
some internal activities of Swift in a real-time manner. So to have one
way of reporting to StatsD and one place/style of configuration, even
some things (like, say, timing of PUT requests into the proxy-server)
which could be logged via middleware are consistently logged the same
way (deep integration via the logger delegate methods).
When log_statsd_host is configured, get_logger() injects a
swift.common.utils.StatsdClient object into the logger as
logger.statsd_client. Then a set of delegate methods on LogAdapter
either pass through to the StatsdClient object or become no-ops. This
allows StatsD logging to look like:
self.logger.increment('some.metric.here')
and do the right thing in all cases and with no messy conditional logic.
I wanted to use the pystatsd module for the StatsD client, but the
version on PyPi is lagging the git repo (and is missing both the prefix
functionality and timing_since() method). So I wrote my
swift.common.utils.StatsdClient. The interface is the same as
pystatsd.Client, but the code was written from scratch. It's pretty
simple, and the tests I added cover it. This also frees Swift from an
optional dependency on the pystatsd module, making this feature easier
to enable.
There's test coverage for the new code and all existing tests continue
to pass.
Refactored out _one_audit_pass() method in swift/account/auditor.py and
swift/container/auditor.py.
Fixed some misc. PEP8 violations.
Misc test cleanups and refactorings (particularly the way "fake logging"
is handled).
Change-Id: Ie968a9ae8771f59ee7591e2ae11999c44bfe33b2
2012-04-01 16:47:08 -07:00
|
|
|
class NullLoggingHandler(logging.Handler):
|
|
|
|
|
|
|
|
def emit(self, record):
|
|
|
|
pass
|
|
|
|
|
2013-09-01 15:10:39 -04:00
|
|
|
|
2013-08-30 21:37:07 -07:00
|
|
|
class UnmockTimeModule(object):
|
|
|
|
"""
|
|
|
|
Even if a test mocks time.time - you can restore unmolested behavior in a
|
|
|
|
another module who imports time directly by monkey patching it's imported
|
|
|
|
reference to the module with an instance of this class
|
|
|
|
"""
|
|
|
|
|
|
|
|
_orig_time = time.time
|
|
|
|
|
|
|
|
def __getattribute__(self, name):
|
|
|
|
if name == 'time':
|
|
|
|
return UnmockTimeModule._orig_time
|
|
|
|
return getattr(time, name)
|
|
|
|
|
|
|
|
|
|
|
|
# logging.LogRecord.__init__ calls time.time
|
|
|
|
logging.time = UnmockTimeModule()
|
|
|
|
|
Adding StatsD logging to Swift.
Documentation, including a list of metrics reported and their semantics,
is in the Admin Guide in a new section, "Reporting Metrics to StatsD".
An optional "metric prefix" may be configured which will be prepended to
every metric name sent to StatsD.
Here is the rationale for doing a deep integration like this versus only
sending metrics to StatsD in middleware. It's the only way to report
some internal activities of Swift in a real-time manner. So to have one
way of reporting to StatsD and one place/style of configuration, even
some things (like, say, timing of PUT requests into the proxy-server)
which could be logged via middleware are consistently logged the same
way (deep integration via the logger delegate methods).
When log_statsd_host is configured, get_logger() injects a
swift.common.utils.StatsdClient object into the logger as
logger.statsd_client. Then a set of delegate methods on LogAdapter
either pass through to the StatsdClient object or become no-ops. This
allows StatsD logging to look like:
self.logger.increment('some.metric.here')
and do the right thing in all cases and with no messy conditional logic.
I wanted to use the pystatsd module for the StatsD client, but the
version on PyPi is lagging the git repo (and is missing both the prefix
functionality and timing_since() method). So I wrote my
swift.common.utils.StatsdClient. The interface is the same as
pystatsd.Client, but the code was written from scratch. It's pretty
simple, and the tests I added cover it. This also frees Swift from an
optional dependency on the pystatsd module, making this feature easier
to enable.
There's test coverage for the new code and all existing tests continue
to pass.
Refactored out _one_audit_pass() method in swift/account/auditor.py and
swift/container/auditor.py.
Fixed some misc. PEP8 violations.
Misc test cleanups and refactorings (particularly the way "fake logging"
is handled).
Change-Id: Ie968a9ae8771f59ee7591e2ae11999c44bfe33b2
2012-04-01 16:47:08 -07:00
|
|
|
|
2013-08-30 21:01:12 -07:00
|
|
|
class FakeLogger(logging.Logger):
|
2011-03-15 22:12:03 -07:00
|
|
|
# a thread safe logger
|
|
|
|
|
2011-05-10 15:36:01 -07:00
|
|
|
def __init__(self, *args, **kwargs):
|
Adding StatsD logging to Swift.
Documentation, including a list of metrics reported and their semantics,
is in the Admin Guide in a new section, "Reporting Metrics to StatsD".
An optional "metric prefix" may be configured which will be prepended to
every metric name sent to StatsD.
Here is the rationale for doing a deep integration like this versus only
sending metrics to StatsD in middleware. It's the only way to report
some internal activities of Swift in a real-time manner. So to have one
way of reporting to StatsD and one place/style of configuration, even
some things (like, say, timing of PUT requests into the proxy-server)
which could be logged via middleware are consistently logged the same
way (deep integration via the logger delegate methods).
When log_statsd_host is configured, get_logger() injects a
swift.common.utils.StatsdClient object into the logger as
logger.statsd_client. Then a set of delegate methods on LogAdapter
either pass through to the StatsdClient object or become no-ops. This
allows StatsD logging to look like:
self.logger.increment('some.metric.here')
and do the right thing in all cases and with no messy conditional logic.
I wanted to use the pystatsd module for the StatsD client, but the
version on PyPi is lagging the git repo (and is missing both the prefix
functionality and timing_since() method). So I wrote my
swift.common.utils.StatsdClient. The interface is the same as
pystatsd.Client, but the code was written from scratch. It's pretty
simple, and the tests I added cover it. This also frees Swift from an
optional dependency on the pystatsd module, making this feature easier
to enable.
There's test coverage for the new code and all existing tests continue
to pass.
Refactored out _one_audit_pass() method in swift/account/auditor.py and
swift/container/auditor.py.
Fixed some misc. PEP8 violations.
Misc test cleanups and refactorings (particularly the way "fake logging"
is handled).
Change-Id: Ie968a9ae8771f59ee7591e2ae11999c44bfe33b2
2012-04-01 16:47:08 -07:00
|
|
|
self._clear()
|
2013-08-30 21:01:12 -07:00
|
|
|
self.name = 'swift.unit.fake_logger'
|
2012-04-30 16:38:15 -04:00
|
|
|
self.level = logging.NOTSET
|
|
|
|
if 'facility' in kwargs:
|
|
|
|
self.facility = kwargs['facility']
|
2013-11-18 11:41:58 -08:00
|
|
|
self.statsd_client = None
|
2014-03-17 20:18:42 -07:00
|
|
|
self.thread_locals = None
|
2014-04-28 19:22:51 -07:00
|
|
|
self.parent = None
|
2011-03-15 22:12:03 -07:00
|
|
|
|
Adding StatsD logging to Swift.
Documentation, including a list of metrics reported and their semantics,
is in the Admin Guide in a new section, "Reporting Metrics to StatsD".
An optional "metric prefix" may be configured which will be prepended to
every metric name sent to StatsD.
Here is the rationale for doing a deep integration like this versus only
sending metrics to StatsD in middleware. It's the only way to report
some internal activities of Swift in a real-time manner. So to have one
way of reporting to StatsD and one place/style of configuration, even
some things (like, say, timing of PUT requests into the proxy-server)
which could be logged via middleware are consistently logged the same
way (deep integration via the logger delegate methods).
When log_statsd_host is configured, get_logger() injects a
swift.common.utils.StatsdClient object into the logger as
logger.statsd_client. Then a set of delegate methods on LogAdapter
either pass through to the StatsdClient object or become no-ops. This
allows StatsD logging to look like:
self.logger.increment('some.metric.here')
and do the right thing in all cases and with no messy conditional logic.
I wanted to use the pystatsd module for the StatsD client, but the
version on PyPi is lagging the git repo (and is missing both the prefix
functionality and timing_since() method). So I wrote my
swift.common.utils.StatsdClient. The interface is the same as
pystatsd.Client, but the code was written from scratch. It's pretty
simple, and the tests I added cover it. This also frees Swift from an
optional dependency on the pystatsd module, making this feature easier
to enable.
There's test coverage for the new code and all existing tests continue
to pass.
Refactored out _one_audit_pass() method in swift/account/auditor.py and
swift/container/auditor.py.
Fixed some misc. PEP8 violations.
Misc test cleanups and refactorings (particularly the way "fake logging"
is handled).
Change-Id: Ie968a9ae8771f59ee7591e2ae11999c44bfe33b2
2012-04-01 16:47:08 -07:00
|
|
|
def _clear(self):
|
2012-08-17 17:00:50 -07:00
|
|
|
self.log_dict = defaultdict(list)
|
Zero-copy object-server GET responses with splice()
This commit lets the object server use splice() and tee() to move data
from disk to the network without ever copying it into user space.
Requires Linux. Sorry, FreeBSD folks. You still have the old
mechanism, as does anyone who doesn't want to use splice. This
requires a relatively recent kernel (2.6.38+) to work, which includes
the two most recent Ubuntu LTS releases (Precise and Trusty) as well
as RHEL 7. However, it excludes Lucid and RHEL 6. On those systems,
setting "splice = on" will result in warnings in the logs but no
actual use of splice.
Note that this only applies to GET responses without Range headers. It
can easily be extended to single-range GET requests, but this commit
leaves that for future work. Same goes for PUT requests, or at least
non-chunked ones.
On some real hardware I had laying around (not a VM), this produced a
37% reduction in CPU usage for GETs made directly to the object
server. Measurements were done by looking at /proc/<pid>/stat,
specifically the utime and stime fields (user and kernel CPU jiffies,
respectively).
Note: There is a Python module called "splicetee" available on PyPi,
but it's licensed under the GPL, so it cannot easily be added to
OpenStack's requirements. That's why this patch uses ctypes instead.
Also fixed a long-standing annoyance in FakeLogger:
>>> fake_logger.warn('stuff')
>>> fake_logger.get_lines_for_level('warn')
[]
>>>
This, of course, is because the correct log level is 'warning'. Now
you get a KeyError if you call get_lines_for_level with a bogus log
level.
Change-Id: Ic6d6b833a5b04ca2019be94b1b90d941929d21c8
2014-06-10 14:15:27 -07:00
|
|
|
self.lines_dict = {'critical': [], 'error': [], 'info': [],
|
|
|
|
'warning': [], 'debug': []}
|
Adding StatsD logging to Swift.
Documentation, including a list of metrics reported and their semantics,
is in the Admin Guide in a new section, "Reporting Metrics to StatsD".
An optional "metric prefix" may be configured which will be prepended to
every metric name sent to StatsD.
Here is the rationale for doing a deep integration like this versus only
sending metrics to StatsD in middleware. It's the only way to report
some internal activities of Swift in a real-time manner. So to have one
way of reporting to StatsD and one place/style of configuration, even
some things (like, say, timing of PUT requests into the proxy-server)
which could be logged via middleware are consistently logged the same
way (deep integration via the logger delegate methods).
When log_statsd_host is configured, get_logger() injects a
swift.common.utils.StatsdClient object into the logger as
logger.statsd_client. Then a set of delegate methods on LogAdapter
either pass through to the StatsdClient object or become no-ops. This
allows StatsD logging to look like:
self.logger.increment('some.metric.here')
and do the right thing in all cases and with no messy conditional logic.
I wanted to use the pystatsd module for the StatsD client, but the
version on PyPi is lagging the git repo (and is missing both the prefix
functionality and timing_since() method). So I wrote my
swift.common.utils.StatsdClient. The interface is the same as
pystatsd.Client, but the code was written from scratch. It's pretty
simple, and the tests I added cover it. This also frees Swift from an
optional dependency on the pystatsd module, making this feature easier
to enable.
There's test coverage for the new code and all existing tests continue
to pass.
Refactored out _one_audit_pass() method in swift/account/auditor.py and
swift/container/auditor.py.
Fixed some misc. PEP8 violations.
Misc test cleanups and refactorings (particularly the way "fake logging"
is handled).
Change-Id: Ie968a9ae8771f59ee7591e2ae11999c44bfe33b2
2012-04-01 16:47:08 -07:00
|
|
|
|
2012-08-17 17:00:50 -07:00
|
|
|
def _store_in(store_name):
|
|
|
|
def stub_fn(self, *args, **kwargs):
|
|
|
|
self.log_dict[store_name].append((args, kwargs))
|
|
|
|
return stub_fn
|
2011-03-15 22:12:03 -07:00
|
|
|
|
2014-04-28 19:22:51 -07:00
|
|
|
def _store_and_log_in(store_name, level):
|
2013-08-30 21:37:07 -07:00
|
|
|
def stub_fn(self, *args, **kwargs):
|
|
|
|
self.log_dict[store_name].append((args, kwargs))
|
2014-04-28 19:22:51 -07:00
|
|
|
self._log(level, args[0], args[1:], **kwargs)
|
2013-08-30 21:37:07 -07:00
|
|
|
return stub_fn
|
|
|
|
|
2013-08-30 21:01:12 -07:00
|
|
|
def get_lines_for_level(self, level):
|
Zero-copy object-server GET responses with splice()
This commit lets the object server use splice() and tee() to move data
from disk to the network without ever copying it into user space.
Requires Linux. Sorry, FreeBSD folks. You still have the old
mechanism, as does anyone who doesn't want to use splice. This
requires a relatively recent kernel (2.6.38+) to work, which includes
the two most recent Ubuntu LTS releases (Precise and Trusty) as well
as RHEL 7. However, it excludes Lucid and RHEL 6. On those systems,
setting "splice = on" will result in warnings in the logs but no
actual use of splice.
Note that this only applies to GET responses without Range headers. It
can easily be extended to single-range GET requests, but this commit
leaves that for future work. Same goes for PUT requests, or at least
non-chunked ones.
On some real hardware I had laying around (not a VM), this produced a
37% reduction in CPU usage for GETs made directly to the object
server. Measurements were done by looking at /proc/<pid>/stat,
specifically the utime and stime fields (user and kernel CPU jiffies,
respectively).
Note: There is a Python module called "splicetee" available on PyPi,
but it's licensed under the GPL, so it cannot easily be added to
OpenStack's requirements. That's why this patch uses ctypes instead.
Also fixed a long-standing annoyance in FakeLogger:
>>> fake_logger.warn('stuff')
>>> fake_logger.get_lines_for_level('warn')
[]
>>>
This, of course, is because the correct log level is 'warning'. Now
you get a KeyError if you call get_lines_for_level with a bogus log
level.
Change-Id: Ic6d6b833a5b04ca2019be94b1b90d941929d21c8
2014-06-10 14:15:27 -07:00
|
|
|
if level not in self.lines_dict:
|
|
|
|
raise KeyError(
|
|
|
|
"Invalid log level '%s'; valid levels are %s" %
|
|
|
|
(level,
|
|
|
|
', '.join("'%s'" % lvl for lvl in sorted(self.lines_dict))))
|
2013-08-30 21:01:12 -07:00
|
|
|
return self.lines_dict[level]
|
|
|
|
|
Zero-copy object-server GET responses with splice()
This commit lets the object server use splice() and tee() to move data
from disk to the network without ever copying it into user space.
Requires Linux. Sorry, FreeBSD folks. You still have the old
mechanism, as does anyone who doesn't want to use splice. This
requires a relatively recent kernel (2.6.38+) to work, which includes
the two most recent Ubuntu LTS releases (Precise and Trusty) as well
as RHEL 7. However, it excludes Lucid and RHEL 6. On those systems,
setting "splice = on" will result in warnings in the logs but no
actual use of splice.
Note that this only applies to GET responses without Range headers. It
can easily be extended to single-range GET requests, but this commit
leaves that for future work. Same goes for PUT requests, or at least
non-chunked ones.
On some real hardware I had laying around (not a VM), this produced a
37% reduction in CPU usage for GETs made directly to the object
server. Measurements were done by looking at /proc/<pid>/stat,
specifically the utime and stime fields (user and kernel CPU jiffies,
respectively).
Note: There is a Python module called "splicetee" available on PyPi,
but it's licensed under the GPL, so it cannot easily be added to
OpenStack's requirements. That's why this patch uses ctypes instead.
Also fixed a long-standing annoyance in FakeLogger:
>>> fake_logger.warn('stuff')
>>> fake_logger.get_lines_for_level('warn')
[]
>>>
This, of course, is because the correct log level is 'warning'. Now
you get a KeyError if you call get_lines_for_level with a bogus log
level.
Change-Id: Ic6d6b833a5b04ca2019be94b1b90d941929d21c8
2014-06-10 14:15:27 -07:00
|
|
|
def all_log_lines(self):
|
|
|
|
return dict((level, msgs) for level, msgs in self.lines_dict.items()
|
|
|
|
if len(msgs) > 0)
|
|
|
|
|
2014-04-28 19:22:51 -07:00
|
|
|
error = _store_and_log_in('error', logging.ERROR)
|
|
|
|
info = _store_and_log_in('info', logging.INFO)
|
|
|
|
warning = _store_and_log_in('warning', logging.WARNING)
|
|
|
|
warn = _store_and_log_in('warning', logging.WARNING)
|
|
|
|
debug = _store_and_log_in('debug', logging.DEBUG)
|
2011-05-10 15:36:01 -07:00
|
|
|
|
Adding StatsD logging to Swift.
Documentation, including a list of metrics reported and their semantics,
is in the Admin Guide in a new section, "Reporting Metrics to StatsD".
An optional "metric prefix" may be configured which will be prepended to
every metric name sent to StatsD.
Here is the rationale for doing a deep integration like this versus only
sending metrics to StatsD in middleware. It's the only way to report
some internal activities of Swift in a real-time manner. So to have one
way of reporting to StatsD and one place/style of configuration, even
some things (like, say, timing of PUT requests into the proxy-server)
which could be logged via middleware are consistently logged the same
way (deep integration via the logger delegate methods).
When log_statsd_host is configured, get_logger() injects a
swift.common.utils.StatsdClient object into the logger as
logger.statsd_client. Then a set of delegate methods on LogAdapter
either pass through to the StatsdClient object or become no-ops. This
allows StatsD logging to look like:
self.logger.increment('some.metric.here')
and do the right thing in all cases and with no messy conditional logic.
I wanted to use the pystatsd module for the StatsD client, but the
version on PyPi is lagging the git repo (and is missing both the prefix
functionality and timing_since() method). So I wrote my
swift.common.utils.StatsdClient. The interface is the same as
pystatsd.Client, but the code was written from scratch. It's pretty
simple, and the tests I added cover it. This also frees Swift from an
optional dependency on the pystatsd module, making this feature easier
to enable.
There's test coverage for the new code and all existing tests continue
to pass.
Refactored out _one_audit_pass() method in swift/account/auditor.py and
swift/container/auditor.py.
Fixed some misc. PEP8 violations.
Misc test cleanups and refactorings (particularly the way "fake logging"
is handled).
Change-Id: Ie968a9ae8771f59ee7591e2ae11999c44bfe33b2
2012-04-01 16:47:08 -07:00
|
|
|
def exception(self, *args, **kwargs):
|
2013-11-18 11:41:58 -08:00
|
|
|
self.log_dict['exception'].append((args, kwargs,
|
|
|
|
str(sys.exc_info()[1])))
|
2013-03-30 15:55:29 +03:00
|
|
|
print 'FakeLogger Exception: %s' % self.log_dict
|
Adding StatsD logging to Swift.
Documentation, including a list of metrics reported and their semantics,
is in the Admin Guide in a new section, "Reporting Metrics to StatsD".
An optional "metric prefix" may be configured which will be prepended to
every metric name sent to StatsD.
Here is the rationale for doing a deep integration like this versus only
sending metrics to StatsD in middleware. It's the only way to report
some internal activities of Swift in a real-time manner. So to have one
way of reporting to StatsD and one place/style of configuration, even
some things (like, say, timing of PUT requests into the proxy-server)
which could be logged via middleware are consistently logged the same
way (deep integration via the logger delegate methods).
When log_statsd_host is configured, get_logger() injects a
swift.common.utils.StatsdClient object into the logger as
logger.statsd_client. Then a set of delegate methods on LogAdapter
either pass through to the StatsdClient object or become no-ops. This
allows StatsD logging to look like:
self.logger.increment('some.metric.here')
and do the right thing in all cases and with no messy conditional logic.
I wanted to use the pystatsd module for the StatsD client, but the
version on PyPi is lagging the git repo (and is missing both the prefix
functionality and timing_since() method). So I wrote my
swift.common.utils.StatsdClient. The interface is the same as
pystatsd.Client, but the code was written from scratch. It's pretty
simple, and the tests I added cover it. This also frees Swift from an
optional dependency on the pystatsd module, making this feature easier
to enable.
There's test coverage for the new code and all existing tests continue
to pass.
Refactored out _one_audit_pass() method in swift/account/auditor.py and
swift/container/auditor.py.
Fixed some misc. PEP8 violations.
Misc test cleanups and refactorings (particularly the way "fake logging"
is handled).
Change-Id: Ie968a9ae8771f59ee7591e2ae11999c44bfe33b2
2012-04-01 16:47:08 -07:00
|
|
|
|
|
|
|
# mock out the StatsD logging methods:
|
2014-04-28 19:22:51 -07:00
|
|
|
update_stats = _store_in('update_stats')
|
2012-08-17 17:00:50 -07:00
|
|
|
increment = _store_in('increment')
|
|
|
|
decrement = _store_in('decrement')
|
|
|
|
timing = _store_in('timing')
|
|
|
|
timing_since = _store_in('timing_since')
|
2014-04-28 19:22:51 -07:00
|
|
|
transfer_rate = _store_in('transfer_rate')
|
2012-08-17 17:00:50 -07:00
|
|
|
set_statsd_prefix = _store_in('set_statsd_prefix')
|
Adding StatsD logging to Swift.
Documentation, including a list of metrics reported and their semantics,
is in the Admin Guide in a new section, "Reporting Metrics to StatsD".
An optional "metric prefix" may be configured which will be prepended to
every metric name sent to StatsD.
Here is the rationale for doing a deep integration like this versus only
sending metrics to StatsD in middleware. It's the only way to report
some internal activities of Swift in a real-time manner. So to have one
way of reporting to StatsD and one place/style of configuration, even
some things (like, say, timing of PUT requests into the proxy-server)
which could be logged via middleware are consistently logged the same
way (deep integration via the logger delegate methods).
When log_statsd_host is configured, get_logger() injects a
swift.common.utils.StatsdClient object into the logger as
logger.statsd_client. Then a set of delegate methods on LogAdapter
either pass through to the StatsdClient object or become no-ops. This
allows StatsD logging to look like:
self.logger.increment('some.metric.here')
and do the right thing in all cases and with no messy conditional logic.
I wanted to use the pystatsd module for the StatsD client, but the
version on PyPi is lagging the git repo (and is missing both the prefix
functionality and timing_since() method). So I wrote my
swift.common.utils.StatsdClient. The interface is the same as
pystatsd.Client, but the code was written from scratch. It's pretty
simple, and the tests I added cover it. This also frees Swift from an
optional dependency on the pystatsd module, making this feature easier
to enable.
There's test coverage for the new code and all existing tests continue
to pass.
Refactored out _one_audit_pass() method in swift/account/auditor.py and
swift/container/auditor.py.
Fixed some misc. PEP8 violations.
Misc test cleanups and refactorings (particularly the way "fake logging"
is handled).
Change-Id: Ie968a9ae8771f59ee7591e2ae11999c44bfe33b2
2012-04-01 16:47:08 -07:00
|
|
|
|
2012-10-18 14:49:46 -07:00
|
|
|
def get_increments(self):
|
|
|
|
return [call[0][0] for call in self.log_dict['increment']]
|
|
|
|
|
|
|
|
def get_increment_counts(self):
|
|
|
|
counts = {}
|
|
|
|
for metric in self.get_increments():
|
|
|
|
if metric not in counts:
|
|
|
|
counts[metric] = 0
|
|
|
|
counts[metric] += 1
|
|
|
|
return counts
|
|
|
|
|
2012-04-30 16:38:15 -04:00
|
|
|
def setFormatter(self, obj):
|
|
|
|
self.formatter = obj
|
|
|
|
|
|
|
|
def close(self):
|
Adding StatsD logging to Swift.
Documentation, including a list of metrics reported and their semantics,
is in the Admin Guide in a new section, "Reporting Metrics to StatsD".
An optional "metric prefix" may be configured which will be prepended to
every metric name sent to StatsD.
Here is the rationale for doing a deep integration like this versus only
sending metrics to StatsD in middleware. It's the only way to report
some internal activities of Swift in a real-time manner. So to have one
way of reporting to StatsD and one place/style of configuration, even
some things (like, say, timing of PUT requests into the proxy-server)
which could be logged via middleware are consistently logged the same
way (deep integration via the logger delegate methods).
When log_statsd_host is configured, get_logger() injects a
swift.common.utils.StatsdClient object into the logger as
logger.statsd_client. Then a set of delegate methods on LogAdapter
either pass through to the StatsdClient object or become no-ops. This
allows StatsD logging to look like:
self.logger.increment('some.metric.here')
and do the right thing in all cases and with no messy conditional logic.
I wanted to use the pystatsd module for the StatsD client, but the
version on PyPi is lagging the git repo (and is missing both the prefix
functionality and timing_since() method). So I wrote my
swift.common.utils.StatsdClient. The interface is the same as
pystatsd.Client, but the code was written from scratch. It's pretty
simple, and the tests I added cover it. This also frees Swift from an
optional dependency on the pystatsd module, making this feature easier
to enable.
There's test coverage for the new code and all existing tests continue
to pass.
Refactored out _one_audit_pass() method in swift/account/auditor.py and
swift/container/auditor.py.
Fixed some misc. PEP8 violations.
Misc test cleanups and refactorings (particularly the way "fake logging"
is handled).
Change-Id: Ie968a9ae8771f59ee7591e2ae11999c44bfe33b2
2012-04-01 16:47:08 -07:00
|
|
|
self._clear()
|
2012-04-30 16:38:15 -04:00
|
|
|
|
|
|
|
def set_name(self, name):
|
|
|
|
# don't touch _handlers
|
|
|
|
self._name = name
|
|
|
|
|
|
|
|
def acquire(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def release(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def createLock(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def emit(self, record):
|
|
|
|
pass
|
|
|
|
|
2013-11-18 11:41:58 -08:00
|
|
|
def _handle(self, record):
|
2013-08-30 21:37:07 -07:00
|
|
|
try:
|
|
|
|
line = record.getMessage()
|
|
|
|
except TypeError:
|
|
|
|
print 'WARNING: unable to format log message %r %% %r' % (
|
|
|
|
record.msg, record.args)
|
|
|
|
raise
|
2014-04-28 19:22:51 -07:00
|
|
|
self.lines_dict[record.levelname.lower()].append(line)
|
2012-04-30 16:38:15 -04:00
|
|
|
|
2013-11-18 11:41:58 -08:00
|
|
|
def handle(self, record):
|
|
|
|
self._handle(record)
|
|
|
|
|
2012-04-30 16:38:15 -04:00
|
|
|
def flush(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def handleError(self, record):
|
|
|
|
pass
|
|
|
|
|
Adding StatsD logging to Swift.
Documentation, including a list of metrics reported and their semantics,
is in the Admin Guide in a new section, "Reporting Metrics to StatsD".
An optional "metric prefix" may be configured which will be prepended to
every metric name sent to StatsD.
Here is the rationale for doing a deep integration like this versus only
sending metrics to StatsD in middleware. It's the only way to report
some internal activities of Swift in a real-time manner. So to have one
way of reporting to StatsD and one place/style of configuration, even
some things (like, say, timing of PUT requests into the proxy-server)
which could be logged via middleware are consistently logged the same
way (deep integration via the logger delegate methods).
When log_statsd_host is configured, get_logger() injects a
swift.common.utils.StatsdClient object into the logger as
logger.statsd_client. Then a set of delegate methods on LogAdapter
either pass through to the StatsdClient object or become no-ops. This
allows StatsD logging to look like:
self.logger.increment('some.metric.here')
and do the right thing in all cases and with no messy conditional logic.
I wanted to use the pystatsd module for the StatsD client, but the
version on PyPi is lagging the git repo (and is missing both the prefix
functionality and timing_since() method). So I wrote my
swift.common.utils.StatsdClient. The interface is the same as
pystatsd.Client, but the code was written from scratch. It's pretty
simple, and the tests I added cover it. This also frees Swift from an
optional dependency on the pystatsd module, making this feature easier
to enable.
There's test coverage for the new code and all existing tests continue
to pass.
Refactored out _one_audit_pass() method in swift/account/auditor.py and
swift/container/auditor.py.
Fixed some misc. PEP8 violations.
Misc test cleanups and refactorings (particularly the way "fake logging"
is handled).
Change-Id: Ie968a9ae8771f59ee7591e2ae11999c44bfe33b2
2012-04-01 16:47:08 -07:00
|
|
|
|
2013-11-18 11:41:58 -08:00
|
|
|
class DebugLogger(FakeLogger):
|
|
|
|
"""A simple stdout logging version of FakeLogger"""
|
|
|
|
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
FakeLogger.__init__(self, *args, **kwargs)
|
2014-04-28 19:22:51 -07:00
|
|
|
self.formatter = logging.Formatter(
|
|
|
|
"%(server)s %(levelname)s: %(message)s")
|
2013-11-18 11:41:58 -08:00
|
|
|
|
|
|
|
def handle(self, record):
|
|
|
|
self._handle(record)
|
|
|
|
print self.formatter.format(record)
|
|
|
|
|
|
|
|
|
2015-01-08 20:29:47 -08:00
|
|
|
class DebugLogAdapter(utils.LogAdapter):
|
2014-04-28 19:22:51 -07:00
|
|
|
|
|
|
|
def _send_to_logger(name):
|
|
|
|
def stub_fn(self, *args, **kwargs):
|
|
|
|
return getattr(self.logger, name)(*args, **kwargs)
|
|
|
|
return stub_fn
|
|
|
|
|
|
|
|
# delegate to FakeLogger's mocks
|
|
|
|
update_stats = _send_to_logger('update_stats')
|
|
|
|
increment = _send_to_logger('increment')
|
|
|
|
decrement = _send_to_logger('decrement')
|
|
|
|
timing = _send_to_logger('timing')
|
|
|
|
timing_since = _send_to_logger('timing_since')
|
|
|
|
transfer_rate = _send_to_logger('transfer_rate')
|
|
|
|
set_statsd_prefix = _send_to_logger('set_statsd_prefix')
|
|
|
|
|
|
|
|
def __getattribute__(self, name):
|
|
|
|
try:
|
|
|
|
return object.__getattribute__(self, name)
|
|
|
|
except AttributeError:
|
|
|
|
return getattr(self.__dict__['logger'], name)
|
|
|
|
|
|
|
|
|
2013-11-18 11:41:58 -08:00
|
|
|
def debug_logger(name='test'):
|
|
|
|
"""get a named adapted debug logger"""
|
2014-04-28 19:22:51 -07:00
|
|
|
return DebugLogAdapter(DebugLogger(), name)
|
2013-11-18 11:41:58 -08:00
|
|
|
|
|
|
|
|
2012-04-30 16:38:15 -04:00
|
|
|
original_syslog_handler = logging.handlers.SysLogHandler
|
|
|
|
|
|
|
|
|
|
|
|
def fake_syslog_handler():
|
|
|
|
for attr in dir(original_syslog_handler):
|
|
|
|
if attr.startswith('LOG'):
|
|
|
|
setattr(FakeLogger, attr,
|
|
|
|
copy.copy(getattr(logging.handlers.SysLogHandler, attr)))
|
|
|
|
FakeLogger.priority_map = \
|
|
|
|
copy.deepcopy(logging.handlers.SysLogHandler.priority_map)
|
|
|
|
|
|
|
|
logging.handlers.SysLogHandler = FakeLogger
|
|
|
|
|
|
|
|
|
2015-01-08 20:29:47 -08:00
|
|
|
if utils.config_true_value(
|
|
|
|
get_config('unit_test').get('fake_syslog', 'False')):
|
2012-04-30 16:38:15 -04:00
|
|
|
fake_syslog_handler()
|
|
|
|
|
2011-03-15 22:12:03 -07:00
|
|
|
|
2010-07-29 13:30:16 -05:00
|
|
|
class MockTrue(object):
|
|
|
|
"""
|
2010-07-29 13:31:27 -05:00
|
|
|
Instances of MockTrue evaluate like True
|
2012-04-30 16:38:15 -04:00
|
|
|
Any attr accessed on an instance of MockTrue will return a MockTrue
|
|
|
|
instance. Any method called on an instance of MockTrue will return
|
|
|
|
a MockTrue instance.
|
2010-07-29 13:30:16 -05:00
|
|
|
|
|
|
|
>>> thing = MockTrue()
|
|
|
|
>>> thing
|
|
|
|
True
|
|
|
|
>>> thing == True # True == True
|
|
|
|
True
|
|
|
|
>>> thing == False # True == False
|
|
|
|
False
|
|
|
|
>>> thing != True # True != True
|
|
|
|
False
|
|
|
|
>>> thing != False # True != False
|
|
|
|
True
|
|
|
|
>>> thing.attribute
|
|
|
|
True
|
|
|
|
>>> thing.method()
|
|
|
|
True
|
|
|
|
>>> thing.attribute.method()
|
|
|
|
True
|
|
|
|
>>> thing.method().attribute
|
|
|
|
True
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __getattribute__(self, *args, **kwargs):
|
|
|
|
return self
|
2012-04-30 16:38:15 -04:00
|
|
|
|
2010-07-29 13:30:16 -05:00
|
|
|
def __call__(self, *args, **kwargs):
|
|
|
|
return self
|
2012-04-30 16:38:15 -04:00
|
|
|
|
2010-07-29 13:30:16 -05:00
|
|
|
def __repr__(*args, **kwargs):
|
|
|
|
return repr(True)
|
2012-04-30 16:38:15 -04:00
|
|
|
|
2010-07-29 13:30:16 -05:00
|
|
|
def __eq__(self, other):
|
2012-01-04 14:43:16 +08:00
|
|
|
return other is True
|
2012-04-30 16:38:15 -04:00
|
|
|
|
2010-07-29 13:30:16 -05:00
|
|
|
def __ne__(self, other):
|
2012-01-04 14:43:16 +08:00
|
|
|
return other is not True
|
2012-08-21 12:51:59 -07:00
|
|
|
|
|
|
|
|
|
|
|
@contextmanager
|
|
|
|
def mock(update):
|
|
|
|
returns = []
|
|
|
|
deletes = []
|
|
|
|
for key, value in update.items():
|
|
|
|
imports = key.split('.')
|
|
|
|
attr = imports.pop(-1)
|
|
|
|
module = __import__(imports[0], fromlist=imports[1:])
|
|
|
|
for modname in imports[1:]:
|
|
|
|
module = getattr(module, modname)
|
|
|
|
if hasattr(module, attr):
|
|
|
|
returns.append((module, attr, getattr(module, attr)))
|
|
|
|
else:
|
|
|
|
deletes.append((module, attr))
|
|
|
|
setattr(module, attr, value)
|
2013-07-12 13:27:56 -04:00
|
|
|
try:
|
|
|
|
yield True
|
|
|
|
finally:
|
|
|
|
for module, attr, value in returns:
|
|
|
|
setattr(module, attr, value)
|
|
|
|
for module, attr in deletes:
|
|
|
|
delattr(module, attr)
|
2013-03-20 19:26:45 -07:00
|
|
|
|
|
|
|
|
|
|
|
def fake_http_connect(*code_iter, **kwargs):
|
|
|
|
|
|
|
|
class FakeConn(object):
|
|
|
|
|
|
|
|
def __init__(self, status, etag=None, body='', timestamp='1',
|
2014-06-25 11:38:54 -07:00
|
|
|
headers=None):
|
|
|
|
# connect exception
|
2014-07-02 11:37:26 -07:00
|
|
|
if isinstance(status, (Exception, Timeout)):
|
2014-06-25 11:38:54 -07:00
|
|
|
raise status
|
|
|
|
if isinstance(status, tuple):
|
|
|
|
self.expect_status, self.status = status
|
2013-03-20 19:26:45 -07:00
|
|
|
else:
|
2014-06-25 11:38:54 -07:00
|
|
|
self.expect_status, self.status = (None, status)
|
|
|
|
if not self.expect_status:
|
|
|
|
# when a swift backend service returns a status before reading
|
|
|
|
# from the body (mostly an error response) eventlet.wsgi will
|
|
|
|
# respond with that status line immediately instead of 100
|
|
|
|
# Continue, even if the client sent the Expect 100 header.
|
|
|
|
# BufferedHttp and the proxy both see these error statuses
|
|
|
|
# when they call getexpect, so our FakeConn tries to act like
|
|
|
|
# our backend services and return certain types of responses
|
|
|
|
# as expect statuses just like a real backend server would do.
|
|
|
|
if self.status in (507, 412, 409):
|
|
|
|
self.expect_status = status
|
|
|
|
else:
|
|
|
|
self.expect_status = 100
|
2013-03-20 19:26:45 -07:00
|
|
|
self.reason = 'Fake'
|
|
|
|
self.host = '1.2.3.4'
|
|
|
|
self.port = '1234'
|
|
|
|
self.sent = 0
|
|
|
|
self.received = 0
|
|
|
|
self.etag = etag
|
|
|
|
self.body = body
|
2013-04-11 12:52:33 -07:00
|
|
|
self.headers = headers or {}
|
2013-03-20 19:26:45 -07:00
|
|
|
self.timestamp = timestamp
|
2014-03-24 16:10:18 -07:00
|
|
|
if 'slow' in kwargs and isinstance(kwargs['slow'], list):
|
|
|
|
try:
|
|
|
|
self._next_sleep = kwargs['slow'].pop(0)
|
|
|
|
except IndexError:
|
|
|
|
self._next_sleep = None
|
2013-03-20 19:26:45 -07:00
|
|
|
|
|
|
|
def getresponse(self):
|
2014-07-02 11:37:26 -07:00
|
|
|
if isinstance(self.status, (Exception, Timeout)):
|
2014-06-25 11:38:54 -07:00
|
|
|
raise self.status
|
2014-04-28 20:03:48 -07:00
|
|
|
exc = kwargs.get('raise_exc')
|
|
|
|
if exc:
|
2014-07-02 11:37:26 -07:00
|
|
|
if isinstance(exc, (Exception, Timeout)):
|
2014-04-28 20:03:48 -07:00
|
|
|
raise exc
|
2013-03-20 19:26:45 -07:00
|
|
|
raise Exception('test')
|
|
|
|
if kwargs.get('raise_timeout_exc'):
|
|
|
|
raise Timeout()
|
|
|
|
return self
|
|
|
|
|
|
|
|
def getexpect(self):
|
2014-07-02 11:37:26 -07:00
|
|
|
if isinstance(self.expect_status, (Exception, Timeout)):
|
2014-06-25 11:38:54 -07:00
|
|
|
raise self.expect_status
|
2014-06-25 20:34:39 -07:00
|
|
|
headers = {}
|
|
|
|
if self.expect_status == 409:
|
|
|
|
headers['X-Backend-Timestamp'] = self.timestamp
|
|
|
|
return FakeConn(self.expect_status, headers=headers)
|
2013-03-20 19:26:45 -07:00
|
|
|
|
|
|
|
def getheaders(self):
|
|
|
|
etag = self.etag
|
|
|
|
if not etag:
|
|
|
|
if isinstance(self.body, str):
|
|
|
|
etag = '"' + md5(self.body).hexdigest() + '"'
|
|
|
|
else:
|
|
|
|
etag = '"68b329da9893e34099c7d8ad5cb9c940"'
|
|
|
|
|
2014-11-11 17:03:29 -08:00
|
|
|
headers = swob.HeaderKeyDict({
|
|
|
|
'content-length': len(self.body),
|
|
|
|
'content-type': 'x-application/test',
|
|
|
|
'x-timestamp': self.timestamp,
|
|
|
|
'x-backend-timestamp': self.timestamp,
|
|
|
|
'last-modified': self.timestamp,
|
|
|
|
'x-object-meta-test': 'testing',
|
|
|
|
'x-delete-at': '9876543210',
|
|
|
|
'etag': etag,
|
|
|
|
'x-works': 'yes',
|
|
|
|
})
|
2013-06-26 08:23:00 +03:00
|
|
|
if self.status // 100 == 2:
|
|
|
|
headers['x-account-container-count'] = \
|
|
|
|
kwargs.get('count', 12345)
|
2013-03-20 19:26:45 -07:00
|
|
|
if not self.timestamp:
|
2014-11-11 17:03:29 -08:00
|
|
|
# when timestamp is None, HeaderKeyDict raises KeyError
|
|
|
|
headers.pop('x-timestamp', None)
|
2013-03-20 19:26:45 -07:00
|
|
|
try:
|
|
|
|
if container_ts_iter.next() is False:
|
|
|
|
headers['x-container-timestamp'] = '1'
|
|
|
|
except StopIteration:
|
|
|
|
pass
|
2014-03-24 16:10:18 -07:00
|
|
|
am_slow, value = self.get_slow()
|
|
|
|
if am_slow:
|
2013-03-20 19:26:45 -07:00
|
|
|
headers['content-length'] = '4'
|
2013-04-11 12:52:33 -07:00
|
|
|
headers.update(self.headers)
|
2013-03-20 19:26:45 -07:00
|
|
|
return headers.items()
|
|
|
|
|
2014-03-24 16:10:18 -07:00
|
|
|
def get_slow(self):
|
|
|
|
if 'slow' in kwargs and isinstance(kwargs['slow'], list):
|
|
|
|
if self._next_sleep is not None:
|
|
|
|
return True, self._next_sleep
|
|
|
|
else:
|
|
|
|
return False, 0.01
|
|
|
|
if kwargs.get('slow') and isinstance(kwargs['slow'], Number):
|
|
|
|
return True, kwargs['slow']
|
|
|
|
return bool(kwargs.get('slow')), 0.1
|
2013-11-04 17:06:06 +00:00
|
|
|
|
2013-03-20 19:26:45 -07:00
|
|
|
def read(self, amt=None):
|
2014-03-24 16:10:18 -07:00
|
|
|
am_slow, value = self.get_slow()
|
|
|
|
if am_slow:
|
2013-03-20 19:26:45 -07:00
|
|
|
if self.sent < 4:
|
|
|
|
self.sent += 1
|
2014-03-24 16:10:18 -07:00
|
|
|
sleep(value)
|
2013-03-20 19:26:45 -07:00
|
|
|
return ' '
|
|
|
|
rv = self.body[:amt]
|
|
|
|
self.body = self.body[amt:]
|
|
|
|
return rv
|
|
|
|
|
|
|
|
def send(self, amt=None):
|
2014-03-24 16:10:18 -07:00
|
|
|
am_slow, value = self.get_slow()
|
|
|
|
if am_slow:
|
2013-03-20 19:26:45 -07:00
|
|
|
if self.received < 4:
|
|
|
|
self.received += 1
|
2014-03-24 16:10:18 -07:00
|
|
|
sleep(value)
|
2013-03-20 19:26:45 -07:00
|
|
|
|
|
|
|
def getheader(self, name, default=None):
|
2014-11-11 17:03:29 -08:00
|
|
|
return swob.HeaderKeyDict(self.getheaders()).get(name, default)
|
2013-03-20 19:26:45 -07:00
|
|
|
|
2015-02-17 16:55:34 -05:00
|
|
|
def close(self):
|
|
|
|
pass
|
|
|
|
|
2013-03-20 19:26:45 -07:00
|
|
|
timestamps_iter = iter(kwargs.get('timestamps') or ['1'] * len(code_iter))
|
|
|
|
etag_iter = iter(kwargs.get('etags') or [None] * len(code_iter))
|
2013-04-11 12:52:33 -07:00
|
|
|
if isinstance(kwargs.get('headers'), list):
|
|
|
|
headers_iter = iter(kwargs['headers'])
|
|
|
|
else:
|
|
|
|
headers_iter = iter([kwargs.get('headers', {})] * len(code_iter))
|
|
|
|
|
2013-03-20 19:26:45 -07:00
|
|
|
x = kwargs.get('missing_container', [False] * len(code_iter))
|
|
|
|
if not isinstance(x, (tuple, list)):
|
|
|
|
x = [x] * len(code_iter)
|
|
|
|
container_ts_iter = iter(x)
|
|
|
|
code_iter = iter(code_iter)
|
|
|
|
static_body = kwargs.get('body', None)
|
|
|
|
body_iter = kwargs.get('body_iter', None)
|
|
|
|
if body_iter:
|
|
|
|
body_iter = iter(body_iter)
|
|
|
|
|
|
|
|
def connect(*args, **ckwargs):
|
2013-08-28 21:26:08 +00:00
|
|
|
if kwargs.get('slow_connect', False):
|
|
|
|
sleep(0.1)
|
2013-03-20 19:26:45 -07:00
|
|
|
if 'give_content_type' in kwargs:
|
|
|
|
if len(args) >= 7 and 'Content-Type' in args[6]:
|
|
|
|
kwargs['give_content_type'](args[6]['Content-Type'])
|
|
|
|
else:
|
|
|
|
kwargs['give_content_type']('')
|
|
|
|
if 'give_connect' in kwargs:
|
|
|
|
kwargs['give_connect'](*args, **ckwargs)
|
|
|
|
status = code_iter.next()
|
|
|
|
etag = etag_iter.next()
|
2013-04-11 12:52:33 -07:00
|
|
|
headers = headers_iter.next()
|
2013-03-20 19:26:45 -07:00
|
|
|
timestamp = timestamps_iter.next()
|
|
|
|
|
|
|
|
if status <= 0:
|
|
|
|
raise HTTPException()
|
|
|
|
if body_iter is None:
|
|
|
|
body = static_body or ''
|
|
|
|
else:
|
|
|
|
body = body_iter.next()
|
|
|
|
return FakeConn(status, etag, body=body, timestamp=timestamp,
|
2014-06-25 11:38:54 -07:00
|
|
|
headers=headers)
|
2013-03-20 19:26:45 -07:00
|
|
|
|
2014-03-03 09:08:43 -08:00
|
|
|
connect.code_iter = code_iter
|
|
|
|
|
2013-03-20 19:26:45 -07:00
|
|
|
return connect
|
2014-04-28 19:22:51 -07:00
|
|
|
|
|
|
|
|
|
|
|
@contextmanager
|
|
|
|
def mocked_http_conn(*args, **kwargs):
|
2014-11-17 20:29:45 -08:00
|
|
|
requests = []
|
|
|
|
|
|
|
|
def capture_requests(ip, port, method, path, headers, qs, ssl):
|
|
|
|
req = {
|
|
|
|
'ip': ip,
|
|
|
|
'port': port,
|
|
|
|
'method': method,
|
|
|
|
'path': path,
|
|
|
|
'headers': headers,
|
|
|
|
'qs': qs,
|
|
|
|
'ssl': ssl,
|
|
|
|
}
|
|
|
|
requests.append(req)
|
|
|
|
kwargs.setdefault('give_connect', capture_requests)
|
2014-04-28 19:22:51 -07:00
|
|
|
fake_conn = fake_http_connect(*args, **kwargs)
|
2014-11-17 20:29:45 -08:00
|
|
|
fake_conn.requests = requests
|
2014-04-28 19:22:51 -07:00
|
|
|
with mocklib.patch('swift.common.bufferedhttp.http_connect_raw',
|
|
|
|
new=fake_conn):
|
|
|
|
yield fake_conn
|
2014-11-17 20:29:45 -08:00
|
|
|
left_over_status = list(fake_conn.code_iter)
|
|
|
|
if left_over_status:
|
|
|
|
raise AssertionError('left over status %r' % left_over_status)
|