2010-07-12 17:03:45 -05:00
|
|
|
""" Swift tests """
|
|
|
|
|
2010-11-11 16:41:07 -06:00
|
|
|
import os
|
2012-04-30 16:38:15 -04:00
|
|
|
import copy
|
Adding StatsD logging to Swift.
Documentation, including a list of metrics reported and their semantics,
is in the Admin Guide in a new section, "Reporting Metrics to StatsD".
An optional "metric prefix" may be configured which will be prepended to
every metric name sent to StatsD.
Here is the rationale for doing a deep integration like this versus only
sending metrics to StatsD in middleware. It's the only way to report
some internal activities of Swift in a real-time manner. So to have one
way of reporting to StatsD and one place/style of configuration, even
some things (like, say, timing of PUT requests into the proxy-server)
which could be logged via middleware are consistently logged the same
way (deep integration via the logger delegate methods).
When log_statsd_host is configured, get_logger() injects a
swift.common.utils.StatsdClient object into the logger as
logger.statsd_client. Then a set of delegate methods on LogAdapter
either pass through to the StatsdClient object or become no-ops. This
allows StatsD logging to look like:
self.logger.increment('some.metric.here')
and do the right thing in all cases and with no messy conditional logic.
I wanted to use the pystatsd module for the StatsD client, but the
version on PyPi is lagging the git repo (and is missing both the prefix
functionality and timing_since() method). So I wrote my
swift.common.utils.StatsdClient. The interface is the same as
pystatsd.Client, but the code was written from scratch. It's pretty
simple, and the tests I added cover it. This also frees Swift from an
optional dependency on the pystatsd module, making this feature easier
to enable.
There's test coverage for the new code and all existing tests continue
to pass.
Refactored out _one_audit_pass() method in swift/account/auditor.py and
swift/container/auditor.py.
Fixed some misc. PEP8 violations.
Misc test cleanups and refactorings (particularly the way "fake logging"
is handled).
Change-Id: Ie968a9ae8771f59ee7591e2ae11999c44bfe33b2
2012-04-01 16:47:08 -07:00
|
|
|
import logging
|
|
|
|
from sys import exc_info
|
2010-11-11 16:41:07 -06:00
|
|
|
from contextlib import contextmanager
|
2012-08-17 17:00:50 -07:00
|
|
|
from collections import defaultdict
|
2010-11-11 16:41:07 -06:00
|
|
|
from tempfile import NamedTemporaryFile
|
2010-07-12 17:03:45 -05:00
|
|
|
from eventlet.green import socket
|
2011-02-11 13:18:19 -06:00
|
|
|
from tempfile import mkdtemp
|
|
|
|
from shutil import rmtree
|
2012-04-30 16:38:15 -04:00
|
|
|
from test import get_config
|
2013-03-26 20:42:26 +00:00
|
|
|
from swift.common.utils import config_true_value
|
2013-03-20 19:26:45 -07:00
|
|
|
from hashlib import md5
|
2013-03-26 20:42:26 +00:00
|
|
|
from eventlet import sleep, Timeout
|
2012-04-30 16:38:15 -04:00
|
|
|
import logging.handlers
|
2013-03-26 20:42:26 +00:00
|
|
|
from httplib import HTTPException
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-03-30 15:55:29 +03:00
|
|
|
class FakeRing(object):
|
|
|
|
|
2013-06-13 11:24:29 -07:00
|
|
|
def __init__(self, replicas=3, max_more_nodes=0):
|
2013-03-30 15:55:29 +03:00
|
|
|
# 9 total nodes (6 more past the initial 3) is the cap, no matter if
|
|
|
|
# this is set higher, or R^2 for R replicas
|
|
|
|
self.replicas = replicas
|
2013-06-13 11:24:29 -07:00
|
|
|
self.max_more_nodes = max_more_nodes
|
2013-03-30 15:55:29 +03:00
|
|
|
self.devs = {}
|
|
|
|
|
|
|
|
def set_replicas(self, replicas):
|
|
|
|
self.replicas = replicas
|
|
|
|
self.devs = {}
|
|
|
|
|
|
|
|
@property
|
|
|
|
def replica_count(self):
|
|
|
|
return self.replicas
|
|
|
|
|
|
|
|
def get_part(self, account, container=None, obj=None):
|
|
|
|
return 1
|
|
|
|
|
|
|
|
def get_nodes(self, account, container=None, obj=None):
|
|
|
|
devs = []
|
|
|
|
for x in xrange(self.replicas):
|
|
|
|
devs.append(self.devs.get(x))
|
|
|
|
if devs[x] is None:
|
|
|
|
self.devs[x] = devs[x] = \
|
|
|
|
{'ip': '10.0.0.%s' % x,
|
|
|
|
'port': 1000 + x,
|
|
|
|
'device': 'sd' + (chr(ord('a') + x)),
|
2013-06-13 11:24:29 -07:00
|
|
|
'zone': x % 3,
|
|
|
|
'region': x % 2,
|
2013-03-30 15:55:29 +03:00
|
|
|
'id': x}
|
|
|
|
return 1, devs
|
|
|
|
|
|
|
|
def get_part_nodes(self, part):
|
|
|
|
return self.get_nodes('blah')[1]
|
|
|
|
|
2013-06-13 11:24:29 -07:00
|
|
|
def get_more_nodes(self, part):
|
2013-03-30 15:55:29 +03:00
|
|
|
# replicas^2 is the true cap
|
|
|
|
for x in xrange(self.replicas, min(self.replicas + self.max_more_nodes,
|
|
|
|
self.replicas * self.replicas)):
|
2013-06-13 11:24:29 -07:00
|
|
|
yield {'ip': '10.0.0.%s' % x,
|
|
|
|
'port': 1000 + x,
|
|
|
|
'device': 'sda',
|
|
|
|
'zone': x % 3,
|
|
|
|
'region': x % 2,
|
|
|
|
'id': x}
|
2013-03-30 15:55:29 +03:00
|
|
|
|
|
|
|
|
|
|
|
class FakeMemcache(object):
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
self.store = {}
|
|
|
|
|
|
|
|
def get(self, key):
|
|
|
|
return self.store.get(key)
|
|
|
|
|
|
|
|
def keys(self):
|
|
|
|
return self.store.keys()
|
|
|
|
|
|
|
|
def set(self, key, value, time=0):
|
|
|
|
self.store[key] = value
|
|
|
|
return True
|
|
|
|
|
|
|
|
def incr(self, key, time=0):
|
|
|
|
self.store[key] = self.store.setdefault(key, 0) + 1
|
|
|
|
return self.store[key]
|
|
|
|
|
|
|
|
@contextmanager
|
|
|
|
def soft_lock(self, key, timeout=0, retries=5):
|
|
|
|
yield True
|
|
|
|
|
|
|
|
def delete(self, key):
|
|
|
|
try:
|
|
|
|
del self.store[key]
|
|
|
|
except Exception:
|
|
|
|
pass
|
|
|
|
return True
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def readuntil2crlfs(fd):
|
|
|
|
rv = ''
|
|
|
|
lc = ''
|
|
|
|
crlfs = 0
|
|
|
|
while crlfs < 2:
|
|
|
|
c = fd.read(1)
|
2013-04-24 14:01:56 -07:00
|
|
|
if not c:
|
|
|
|
raise ValueError("didn't get two CRLFs; just got %r" % rv)
|
2010-07-12 17:03:45 -05:00
|
|
|
rv = rv + c
|
|
|
|
if c == '\r' and lc != '\n':
|
|
|
|
crlfs = 0
|
|
|
|
if lc == '\r' and c == '\n':
|
|
|
|
crlfs += 1
|
|
|
|
lc = c
|
|
|
|
return rv
|
|
|
|
|
|
|
|
|
|
|
|
def connect_tcp(hostport):
|
|
|
|
rv = socket.socket()
|
|
|
|
rv.connect(hostport)
|
|
|
|
return rv
|
2010-07-29 13:30:16 -05:00
|
|
|
|
2010-11-11 16:41:07 -06:00
|
|
|
|
|
|
|
@contextmanager
|
|
|
|
def tmpfile(content):
|
|
|
|
with NamedTemporaryFile('w', delete=False) as f:
|
|
|
|
file_name = f.name
|
|
|
|
f.write(str(content))
|
|
|
|
try:
|
|
|
|
yield file_name
|
|
|
|
finally:
|
|
|
|
os.unlink(file_name)
|
|
|
|
|
2011-01-19 14:18:37 -06:00
|
|
|
xattr_data = {}
|
|
|
|
|
2011-01-19 16:19:43 -06:00
|
|
|
|
2011-01-19 14:18:37 -06:00
|
|
|
def _get_inode(fd):
|
|
|
|
if not isinstance(fd, int):
|
|
|
|
try:
|
|
|
|
fd = fd.fileno()
|
|
|
|
except AttributeError:
|
|
|
|
return os.stat(fd).st_ino
|
|
|
|
return os.fstat(fd).st_ino
|
|
|
|
|
2011-01-19 16:19:43 -06:00
|
|
|
|
2011-01-19 14:18:37 -06:00
|
|
|
def _setxattr(fd, k, v):
|
|
|
|
inode = _get_inode(fd)
|
|
|
|
data = xattr_data.get(inode, {})
|
|
|
|
data[k] = v
|
|
|
|
xattr_data[inode] = data
|
|
|
|
|
2011-01-19 16:19:43 -06:00
|
|
|
|
2011-01-19 14:18:37 -06:00
|
|
|
def _getxattr(fd, k):
|
|
|
|
inode = _get_inode(fd)
|
|
|
|
data = xattr_data.get(inode, {}).get(k)
|
|
|
|
if not data:
|
|
|
|
raise IOError
|
|
|
|
return data
|
|
|
|
|
|
|
|
import xattr
|
|
|
|
xattr.setxattr = _setxattr
|
|
|
|
xattr.getxattr = _getxattr
|
|
|
|
|
2010-11-11 16:41:07 -06:00
|
|
|
|
2011-02-11 13:18:19 -06:00
|
|
|
@contextmanager
|
|
|
|
def temptree(files, contents=''):
|
|
|
|
# generate enough contents to fill the files
|
|
|
|
c = len(files)
|
|
|
|
contents = (list(contents) + [''] * c)[:c]
|
|
|
|
tempdir = mkdtemp()
|
|
|
|
for path, content in zip(files, contents):
|
|
|
|
if os.path.isabs(path):
|
|
|
|
path = '.' + path
|
|
|
|
new_path = os.path.join(tempdir, path)
|
|
|
|
subdir = os.path.dirname(new_path)
|
|
|
|
if not os.path.exists(subdir):
|
|
|
|
os.makedirs(subdir)
|
|
|
|
with open(new_path, 'w') as f:
|
|
|
|
f.write(str(content))
|
|
|
|
try:
|
|
|
|
yield tempdir
|
|
|
|
finally:
|
|
|
|
rmtree(tempdir)
|
|
|
|
|
|
|
|
|
Adding StatsD logging to Swift.
Documentation, including a list of metrics reported and their semantics,
is in the Admin Guide in a new section, "Reporting Metrics to StatsD".
An optional "metric prefix" may be configured which will be prepended to
every metric name sent to StatsD.
Here is the rationale for doing a deep integration like this versus only
sending metrics to StatsD in middleware. It's the only way to report
some internal activities of Swift in a real-time manner. So to have one
way of reporting to StatsD and one place/style of configuration, even
some things (like, say, timing of PUT requests into the proxy-server)
which could be logged via middleware are consistently logged the same
way (deep integration via the logger delegate methods).
When log_statsd_host is configured, get_logger() injects a
swift.common.utils.StatsdClient object into the logger as
logger.statsd_client. Then a set of delegate methods on LogAdapter
either pass through to the StatsdClient object or become no-ops. This
allows StatsD logging to look like:
self.logger.increment('some.metric.here')
and do the right thing in all cases and with no messy conditional logic.
I wanted to use the pystatsd module for the StatsD client, but the
version on PyPi is lagging the git repo (and is missing both the prefix
functionality and timing_since() method). So I wrote my
swift.common.utils.StatsdClient. The interface is the same as
pystatsd.Client, but the code was written from scratch. It's pretty
simple, and the tests I added cover it. This also frees Swift from an
optional dependency on the pystatsd module, making this feature easier
to enable.
There's test coverage for the new code and all existing tests continue
to pass.
Refactored out _one_audit_pass() method in swift/account/auditor.py and
swift/container/auditor.py.
Fixed some misc. PEP8 violations.
Misc test cleanups and refactorings (particularly the way "fake logging"
is handled).
Change-Id: Ie968a9ae8771f59ee7591e2ae11999c44bfe33b2
2012-04-01 16:47:08 -07:00
|
|
|
class NullLoggingHandler(logging.Handler):
|
|
|
|
|
|
|
|
def emit(self, record):
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
class FakeLogger(object):
|
2011-03-15 22:12:03 -07:00
|
|
|
# a thread safe logger
|
|
|
|
|
2011-05-10 15:36:01 -07:00
|
|
|
def __init__(self, *args, **kwargs):
|
Adding StatsD logging to Swift.
Documentation, including a list of metrics reported and their semantics,
is in the Admin Guide in a new section, "Reporting Metrics to StatsD".
An optional "metric prefix" may be configured which will be prepended to
every metric name sent to StatsD.
Here is the rationale for doing a deep integration like this versus only
sending metrics to StatsD in middleware. It's the only way to report
some internal activities of Swift in a real-time manner. So to have one
way of reporting to StatsD and one place/style of configuration, even
some things (like, say, timing of PUT requests into the proxy-server)
which could be logged via middleware are consistently logged the same
way (deep integration via the logger delegate methods).
When log_statsd_host is configured, get_logger() injects a
swift.common.utils.StatsdClient object into the logger as
logger.statsd_client. Then a set of delegate methods on LogAdapter
either pass through to the StatsdClient object or become no-ops. This
allows StatsD logging to look like:
self.logger.increment('some.metric.here')
and do the right thing in all cases and with no messy conditional logic.
I wanted to use the pystatsd module for the StatsD client, but the
version on PyPi is lagging the git repo (and is missing both the prefix
functionality and timing_since() method). So I wrote my
swift.common.utils.StatsdClient. The interface is the same as
pystatsd.Client, but the code was written from scratch. It's pretty
simple, and the tests I added cover it. This also frees Swift from an
optional dependency on the pystatsd module, making this feature easier
to enable.
There's test coverage for the new code and all existing tests continue
to pass.
Refactored out _one_audit_pass() method in swift/account/auditor.py and
swift/container/auditor.py.
Fixed some misc. PEP8 violations.
Misc test cleanups and refactorings (particularly the way "fake logging"
is handled).
Change-Id: Ie968a9ae8771f59ee7591e2ae11999c44bfe33b2
2012-04-01 16:47:08 -07:00
|
|
|
self._clear()
|
2012-04-30 16:38:15 -04:00
|
|
|
self.level = logging.NOTSET
|
|
|
|
if 'facility' in kwargs:
|
|
|
|
self.facility = kwargs['facility']
|
2011-03-15 22:12:03 -07:00
|
|
|
|
Adding StatsD logging to Swift.
Documentation, including a list of metrics reported and their semantics,
is in the Admin Guide in a new section, "Reporting Metrics to StatsD".
An optional "metric prefix" may be configured which will be prepended to
every metric name sent to StatsD.
Here is the rationale for doing a deep integration like this versus only
sending metrics to StatsD in middleware. It's the only way to report
some internal activities of Swift in a real-time manner. So to have one
way of reporting to StatsD and one place/style of configuration, even
some things (like, say, timing of PUT requests into the proxy-server)
which could be logged via middleware are consistently logged the same
way (deep integration via the logger delegate methods).
When log_statsd_host is configured, get_logger() injects a
swift.common.utils.StatsdClient object into the logger as
logger.statsd_client. Then a set of delegate methods on LogAdapter
either pass through to the StatsdClient object or become no-ops. This
allows StatsD logging to look like:
self.logger.increment('some.metric.here')
and do the right thing in all cases and with no messy conditional logic.
I wanted to use the pystatsd module for the StatsD client, but the
version on PyPi is lagging the git repo (and is missing both the prefix
functionality and timing_since() method). So I wrote my
swift.common.utils.StatsdClient. The interface is the same as
pystatsd.Client, but the code was written from scratch. It's pretty
simple, and the tests I added cover it. This also frees Swift from an
optional dependency on the pystatsd module, making this feature easier
to enable.
There's test coverage for the new code and all existing tests continue
to pass.
Refactored out _one_audit_pass() method in swift/account/auditor.py and
swift/container/auditor.py.
Fixed some misc. PEP8 violations.
Misc test cleanups and refactorings (particularly the way "fake logging"
is handled).
Change-Id: Ie968a9ae8771f59ee7591e2ae11999c44bfe33b2
2012-04-01 16:47:08 -07:00
|
|
|
def _clear(self):
|
2012-08-17 17:00:50 -07:00
|
|
|
self.log_dict = defaultdict(list)
|
Adding StatsD logging to Swift.
Documentation, including a list of metrics reported and their semantics,
is in the Admin Guide in a new section, "Reporting Metrics to StatsD".
An optional "metric prefix" may be configured which will be prepended to
every metric name sent to StatsD.
Here is the rationale for doing a deep integration like this versus only
sending metrics to StatsD in middleware. It's the only way to report
some internal activities of Swift in a real-time manner. So to have one
way of reporting to StatsD and one place/style of configuration, even
some things (like, say, timing of PUT requests into the proxy-server)
which could be logged via middleware are consistently logged the same
way (deep integration via the logger delegate methods).
When log_statsd_host is configured, get_logger() injects a
swift.common.utils.StatsdClient object into the logger as
logger.statsd_client. Then a set of delegate methods on LogAdapter
either pass through to the StatsdClient object or become no-ops. This
allows StatsD logging to look like:
self.logger.increment('some.metric.here')
and do the right thing in all cases and with no messy conditional logic.
I wanted to use the pystatsd module for the StatsD client, but the
version on PyPi is lagging the git repo (and is missing both the prefix
functionality and timing_since() method). So I wrote my
swift.common.utils.StatsdClient. The interface is the same as
pystatsd.Client, but the code was written from scratch. It's pretty
simple, and the tests I added cover it. This also frees Swift from an
optional dependency on the pystatsd module, making this feature easier
to enable.
There's test coverage for the new code and all existing tests continue
to pass.
Refactored out _one_audit_pass() method in swift/account/auditor.py and
swift/container/auditor.py.
Fixed some misc. PEP8 violations.
Misc test cleanups and refactorings (particularly the way "fake logging"
is handled).
Change-Id: Ie968a9ae8771f59ee7591e2ae11999c44bfe33b2
2012-04-01 16:47:08 -07:00
|
|
|
|
2012-08-17 17:00:50 -07:00
|
|
|
def _store_in(store_name):
|
|
|
|
def stub_fn(self, *args, **kwargs):
|
|
|
|
self.log_dict[store_name].append((args, kwargs))
|
|
|
|
return stub_fn
|
2011-03-15 22:12:03 -07:00
|
|
|
|
2012-08-17 17:00:50 -07:00
|
|
|
error = _store_in('error')
|
|
|
|
info = _store_in('info')
|
|
|
|
warning = _store_in('warning')
|
|
|
|
debug = _store_in('debug')
|
2011-05-10 15:36:01 -07:00
|
|
|
|
Adding StatsD logging to Swift.
Documentation, including a list of metrics reported and their semantics,
is in the Admin Guide in a new section, "Reporting Metrics to StatsD".
An optional "metric prefix" may be configured which will be prepended to
every metric name sent to StatsD.
Here is the rationale for doing a deep integration like this versus only
sending metrics to StatsD in middleware. It's the only way to report
some internal activities of Swift in a real-time manner. So to have one
way of reporting to StatsD and one place/style of configuration, even
some things (like, say, timing of PUT requests into the proxy-server)
which could be logged via middleware are consistently logged the same
way (deep integration via the logger delegate methods).
When log_statsd_host is configured, get_logger() injects a
swift.common.utils.StatsdClient object into the logger as
logger.statsd_client. Then a set of delegate methods on LogAdapter
either pass through to the StatsdClient object or become no-ops. This
allows StatsD logging to look like:
self.logger.increment('some.metric.here')
and do the right thing in all cases and with no messy conditional logic.
I wanted to use the pystatsd module for the StatsD client, but the
version on PyPi is lagging the git repo (and is missing both the prefix
functionality and timing_since() method). So I wrote my
swift.common.utils.StatsdClient. The interface is the same as
pystatsd.Client, but the code was written from scratch. It's pretty
simple, and the tests I added cover it. This also frees Swift from an
optional dependency on the pystatsd module, making this feature easier
to enable.
There's test coverage for the new code and all existing tests continue
to pass.
Refactored out _one_audit_pass() method in swift/account/auditor.py and
swift/container/auditor.py.
Fixed some misc. PEP8 violations.
Misc test cleanups and refactorings (particularly the way "fake logging"
is handled).
Change-Id: Ie968a9ae8771f59ee7591e2ae11999c44bfe33b2
2012-04-01 16:47:08 -07:00
|
|
|
def exception(self, *args, **kwargs):
|
|
|
|
self.log_dict['exception'].append((args, kwargs, str(exc_info()[1])))
|
2013-03-30 15:55:29 +03:00
|
|
|
print 'FakeLogger Exception: %s' % self.log_dict
|
Adding StatsD logging to Swift.
Documentation, including a list of metrics reported and their semantics,
is in the Admin Guide in a new section, "Reporting Metrics to StatsD".
An optional "metric prefix" may be configured which will be prepended to
every metric name sent to StatsD.
Here is the rationale for doing a deep integration like this versus only
sending metrics to StatsD in middleware. It's the only way to report
some internal activities of Swift in a real-time manner. So to have one
way of reporting to StatsD and one place/style of configuration, even
some things (like, say, timing of PUT requests into the proxy-server)
which could be logged via middleware are consistently logged the same
way (deep integration via the logger delegate methods).
When log_statsd_host is configured, get_logger() injects a
swift.common.utils.StatsdClient object into the logger as
logger.statsd_client. Then a set of delegate methods on LogAdapter
either pass through to the StatsdClient object or become no-ops. This
allows StatsD logging to look like:
self.logger.increment('some.metric.here')
and do the right thing in all cases and with no messy conditional logic.
I wanted to use the pystatsd module for the StatsD client, but the
version on PyPi is lagging the git repo (and is missing both the prefix
functionality and timing_since() method). So I wrote my
swift.common.utils.StatsdClient. The interface is the same as
pystatsd.Client, but the code was written from scratch. It's pretty
simple, and the tests I added cover it. This also frees Swift from an
optional dependency on the pystatsd module, making this feature easier
to enable.
There's test coverage for the new code and all existing tests continue
to pass.
Refactored out _one_audit_pass() method in swift/account/auditor.py and
swift/container/auditor.py.
Fixed some misc. PEP8 violations.
Misc test cleanups and refactorings (particularly the way "fake logging"
is handled).
Change-Id: Ie968a9ae8771f59ee7591e2ae11999c44bfe33b2
2012-04-01 16:47:08 -07:00
|
|
|
|
|
|
|
# mock out the StatsD logging methods:
|
2012-08-17 17:00:50 -07:00
|
|
|
increment = _store_in('increment')
|
|
|
|
decrement = _store_in('decrement')
|
|
|
|
timing = _store_in('timing')
|
|
|
|
timing_since = _store_in('timing_since')
|
|
|
|
update_stats = _store_in('update_stats')
|
|
|
|
set_statsd_prefix = _store_in('set_statsd_prefix')
|
Adding StatsD logging to Swift.
Documentation, including a list of metrics reported and their semantics,
is in the Admin Guide in a new section, "Reporting Metrics to StatsD".
An optional "metric prefix" may be configured which will be prepended to
every metric name sent to StatsD.
Here is the rationale for doing a deep integration like this versus only
sending metrics to StatsD in middleware. It's the only way to report
some internal activities of Swift in a real-time manner. So to have one
way of reporting to StatsD and one place/style of configuration, even
some things (like, say, timing of PUT requests into the proxy-server)
which could be logged via middleware are consistently logged the same
way (deep integration via the logger delegate methods).
When log_statsd_host is configured, get_logger() injects a
swift.common.utils.StatsdClient object into the logger as
logger.statsd_client. Then a set of delegate methods on LogAdapter
either pass through to the StatsdClient object or become no-ops. This
allows StatsD logging to look like:
self.logger.increment('some.metric.here')
and do the right thing in all cases and with no messy conditional logic.
I wanted to use the pystatsd module for the StatsD client, but the
version on PyPi is lagging the git repo (and is missing both the prefix
functionality and timing_since() method). So I wrote my
swift.common.utils.StatsdClient. The interface is the same as
pystatsd.Client, but the code was written from scratch. It's pretty
simple, and the tests I added cover it. This also frees Swift from an
optional dependency on the pystatsd module, making this feature easier
to enable.
There's test coverage for the new code and all existing tests continue
to pass.
Refactored out _one_audit_pass() method in swift/account/auditor.py and
swift/container/auditor.py.
Fixed some misc. PEP8 violations.
Misc test cleanups and refactorings (particularly the way "fake logging"
is handled).
Change-Id: Ie968a9ae8771f59ee7591e2ae11999c44bfe33b2
2012-04-01 16:47:08 -07:00
|
|
|
|
2012-10-18 14:49:46 -07:00
|
|
|
def get_increments(self):
|
|
|
|
return [call[0][0] for call in self.log_dict['increment']]
|
|
|
|
|
|
|
|
def get_increment_counts(self):
|
|
|
|
counts = {}
|
|
|
|
for metric in self.get_increments():
|
|
|
|
if metric not in counts:
|
|
|
|
counts[metric] = 0
|
|
|
|
counts[metric] += 1
|
|
|
|
return counts
|
|
|
|
|
2012-04-30 16:38:15 -04:00
|
|
|
def setFormatter(self, obj):
|
|
|
|
self.formatter = obj
|
|
|
|
|
|
|
|
def close(self):
|
Adding StatsD logging to Swift.
Documentation, including a list of metrics reported and their semantics,
is in the Admin Guide in a new section, "Reporting Metrics to StatsD".
An optional "metric prefix" may be configured which will be prepended to
every metric name sent to StatsD.
Here is the rationale for doing a deep integration like this versus only
sending metrics to StatsD in middleware. It's the only way to report
some internal activities of Swift in a real-time manner. So to have one
way of reporting to StatsD and one place/style of configuration, even
some things (like, say, timing of PUT requests into the proxy-server)
which could be logged via middleware are consistently logged the same
way (deep integration via the logger delegate methods).
When log_statsd_host is configured, get_logger() injects a
swift.common.utils.StatsdClient object into the logger as
logger.statsd_client. Then a set of delegate methods on LogAdapter
either pass through to the StatsdClient object or become no-ops. This
allows StatsD logging to look like:
self.logger.increment('some.metric.here')
and do the right thing in all cases and with no messy conditional logic.
I wanted to use the pystatsd module for the StatsD client, but the
version on PyPi is lagging the git repo (and is missing both the prefix
functionality and timing_since() method). So I wrote my
swift.common.utils.StatsdClient. The interface is the same as
pystatsd.Client, but the code was written from scratch. It's pretty
simple, and the tests I added cover it. This also frees Swift from an
optional dependency on the pystatsd module, making this feature easier
to enable.
There's test coverage for the new code and all existing tests continue
to pass.
Refactored out _one_audit_pass() method in swift/account/auditor.py and
swift/container/auditor.py.
Fixed some misc. PEP8 violations.
Misc test cleanups and refactorings (particularly the way "fake logging"
is handled).
Change-Id: Ie968a9ae8771f59ee7591e2ae11999c44bfe33b2
2012-04-01 16:47:08 -07:00
|
|
|
self._clear()
|
2012-04-30 16:38:15 -04:00
|
|
|
|
|
|
|
def set_name(self, name):
|
|
|
|
# don't touch _handlers
|
|
|
|
self._name = name
|
|
|
|
|
|
|
|
def acquire(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def release(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def createLock(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def emit(self, record):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def handle(self, record):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def flush(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def handleError(self, record):
|
|
|
|
pass
|
|
|
|
|
Adding StatsD logging to Swift.
Documentation, including a list of metrics reported and their semantics,
is in the Admin Guide in a new section, "Reporting Metrics to StatsD".
An optional "metric prefix" may be configured which will be prepended to
every metric name sent to StatsD.
Here is the rationale for doing a deep integration like this versus only
sending metrics to StatsD in middleware. It's the only way to report
some internal activities of Swift in a real-time manner. So to have one
way of reporting to StatsD and one place/style of configuration, even
some things (like, say, timing of PUT requests into the proxy-server)
which could be logged via middleware are consistently logged the same
way (deep integration via the logger delegate methods).
When log_statsd_host is configured, get_logger() injects a
swift.common.utils.StatsdClient object into the logger as
logger.statsd_client. Then a set of delegate methods on LogAdapter
either pass through to the StatsdClient object or become no-ops. This
allows StatsD logging to look like:
self.logger.increment('some.metric.here')
and do the right thing in all cases and with no messy conditional logic.
I wanted to use the pystatsd module for the StatsD client, but the
version on PyPi is lagging the git repo (and is missing both the prefix
functionality and timing_since() method). So I wrote my
swift.common.utils.StatsdClient. The interface is the same as
pystatsd.Client, but the code was written from scratch. It's pretty
simple, and the tests I added cover it. This also frees Swift from an
optional dependency on the pystatsd module, making this feature easier
to enable.
There's test coverage for the new code and all existing tests continue
to pass.
Refactored out _one_audit_pass() method in swift/account/auditor.py and
swift/container/auditor.py.
Fixed some misc. PEP8 violations.
Misc test cleanups and refactorings (particularly the way "fake logging"
is handled).
Change-Id: Ie968a9ae8771f59ee7591e2ae11999c44bfe33b2
2012-04-01 16:47:08 -07:00
|
|
|
|
2012-04-30 16:38:15 -04:00
|
|
|
original_syslog_handler = logging.handlers.SysLogHandler
|
|
|
|
|
|
|
|
|
|
|
|
def fake_syslog_handler():
|
|
|
|
for attr in dir(original_syslog_handler):
|
|
|
|
if attr.startswith('LOG'):
|
|
|
|
setattr(FakeLogger, attr,
|
|
|
|
copy.copy(getattr(logging.handlers.SysLogHandler, attr)))
|
|
|
|
FakeLogger.priority_map = \
|
|
|
|
copy.deepcopy(logging.handlers.SysLogHandler.priority_map)
|
|
|
|
|
|
|
|
logging.handlers.SysLogHandler = FakeLogger
|
|
|
|
|
|
|
|
|
2012-10-19 13:50:57 -07:00
|
|
|
if config_true_value(get_config('unit_test').get('fake_syslog', 'False')):
|
2012-04-30 16:38:15 -04:00
|
|
|
fake_syslog_handler()
|
|
|
|
|
2011-03-15 22:12:03 -07:00
|
|
|
|
2010-07-29 13:30:16 -05:00
|
|
|
class MockTrue(object):
|
|
|
|
"""
|
2010-07-29 13:31:27 -05:00
|
|
|
Instances of MockTrue evaluate like True
|
2012-04-30 16:38:15 -04:00
|
|
|
Any attr accessed on an instance of MockTrue will return a MockTrue
|
|
|
|
instance. Any method called on an instance of MockTrue will return
|
|
|
|
a MockTrue instance.
|
2010-07-29 13:30:16 -05:00
|
|
|
|
|
|
|
>>> thing = MockTrue()
|
|
|
|
>>> thing
|
|
|
|
True
|
|
|
|
>>> thing == True # True == True
|
|
|
|
True
|
|
|
|
>>> thing == False # True == False
|
|
|
|
False
|
|
|
|
>>> thing != True # True != True
|
|
|
|
False
|
|
|
|
>>> thing != False # True != False
|
|
|
|
True
|
|
|
|
>>> thing.attribute
|
|
|
|
True
|
|
|
|
>>> thing.method()
|
|
|
|
True
|
|
|
|
>>> thing.attribute.method()
|
|
|
|
True
|
|
|
|
>>> thing.method().attribute
|
|
|
|
True
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __getattribute__(self, *args, **kwargs):
|
|
|
|
return self
|
2012-04-30 16:38:15 -04:00
|
|
|
|
2010-07-29 13:30:16 -05:00
|
|
|
def __call__(self, *args, **kwargs):
|
|
|
|
return self
|
2012-04-30 16:38:15 -04:00
|
|
|
|
2010-07-29 13:30:16 -05:00
|
|
|
def __repr__(*args, **kwargs):
|
|
|
|
return repr(True)
|
2012-04-30 16:38:15 -04:00
|
|
|
|
2010-07-29 13:30:16 -05:00
|
|
|
def __eq__(self, other):
|
2012-01-04 14:43:16 +08:00
|
|
|
return other is True
|
2012-04-30 16:38:15 -04:00
|
|
|
|
2010-07-29 13:30:16 -05:00
|
|
|
def __ne__(self, other):
|
2012-01-04 14:43:16 +08:00
|
|
|
return other is not True
|
2012-08-21 12:51:59 -07:00
|
|
|
|
|
|
|
|
|
|
|
@contextmanager
|
|
|
|
def mock(update):
|
|
|
|
returns = []
|
|
|
|
deletes = []
|
|
|
|
for key, value in update.items():
|
|
|
|
imports = key.split('.')
|
|
|
|
attr = imports.pop(-1)
|
|
|
|
module = __import__(imports[0], fromlist=imports[1:])
|
|
|
|
for modname in imports[1:]:
|
|
|
|
module = getattr(module, modname)
|
|
|
|
if hasattr(module, attr):
|
|
|
|
returns.append((module, attr, getattr(module, attr)))
|
|
|
|
else:
|
|
|
|
deletes.append((module, attr))
|
|
|
|
setattr(module, attr, value)
|
2013-07-12 13:27:56 -04:00
|
|
|
try:
|
|
|
|
yield True
|
|
|
|
finally:
|
|
|
|
for module, attr, value in returns:
|
|
|
|
setattr(module, attr, value)
|
|
|
|
for module, attr in deletes:
|
|
|
|
delattr(module, attr)
|
2013-03-20 19:26:45 -07:00
|
|
|
|
|
|
|
|
|
|
|
def fake_http_connect(*code_iter, **kwargs):
|
|
|
|
|
|
|
|
class FakeConn(object):
|
|
|
|
|
|
|
|
def __init__(self, status, etag=None, body='', timestamp='1',
|
2013-04-11 12:52:33 -07:00
|
|
|
expect_status=None, headers=None):
|
2013-03-20 19:26:45 -07:00
|
|
|
self.status = status
|
|
|
|
if expect_status is None:
|
|
|
|
self.expect_status = self.status
|
|
|
|
else:
|
|
|
|
self.expect_status = expect_status
|
|
|
|
self.reason = 'Fake'
|
|
|
|
self.host = '1.2.3.4'
|
|
|
|
self.port = '1234'
|
|
|
|
self.sent = 0
|
|
|
|
self.received = 0
|
|
|
|
self.etag = etag
|
|
|
|
self.body = body
|
2013-04-11 12:52:33 -07:00
|
|
|
self.headers = headers or {}
|
2013-03-20 19:26:45 -07:00
|
|
|
self.timestamp = timestamp
|
|
|
|
|
|
|
|
def getresponse(self):
|
|
|
|
if kwargs.get('raise_exc'):
|
|
|
|
raise Exception('test')
|
|
|
|
if kwargs.get('raise_timeout_exc'):
|
|
|
|
raise Timeout()
|
|
|
|
return self
|
|
|
|
|
|
|
|
def getexpect(self):
|
|
|
|
if self.expect_status == -2:
|
|
|
|
raise HTTPException()
|
|
|
|
if self.expect_status == -3:
|
|
|
|
return FakeConn(507)
|
|
|
|
if self.expect_status == -4:
|
|
|
|
return FakeConn(201)
|
|
|
|
return FakeConn(100)
|
|
|
|
|
|
|
|
def getheaders(self):
|
|
|
|
etag = self.etag
|
|
|
|
if not etag:
|
|
|
|
if isinstance(self.body, str):
|
|
|
|
etag = '"' + md5(self.body).hexdigest() + '"'
|
|
|
|
else:
|
|
|
|
etag = '"68b329da9893e34099c7d8ad5cb9c940"'
|
|
|
|
|
|
|
|
headers = {'content-length': len(self.body),
|
|
|
|
'content-type': 'x-application/test',
|
|
|
|
'x-timestamp': self.timestamp,
|
|
|
|
'last-modified': self.timestamp,
|
|
|
|
'x-object-meta-test': 'testing',
|
2013-04-24 09:32:31 -04:00
|
|
|
'x-delete-at': '9876543210',
|
2013-03-20 19:26:45 -07:00
|
|
|
'etag': etag,
|
2013-06-26 08:23:00 +03:00
|
|
|
'x-works': 'yes'}
|
|
|
|
if self.status // 100 == 2:
|
|
|
|
headers['x-account-container-count'] = \
|
|
|
|
kwargs.get('count', 12345)
|
2013-03-20 19:26:45 -07:00
|
|
|
if not self.timestamp:
|
|
|
|
del headers['x-timestamp']
|
|
|
|
try:
|
|
|
|
if container_ts_iter.next() is False:
|
|
|
|
headers['x-container-timestamp'] = '1'
|
|
|
|
except StopIteration:
|
|
|
|
pass
|
|
|
|
if 'slow' in kwargs:
|
|
|
|
headers['content-length'] = '4'
|
2013-04-11 12:52:33 -07:00
|
|
|
headers.update(self.headers)
|
2013-03-20 19:26:45 -07:00
|
|
|
return headers.items()
|
|
|
|
|
|
|
|
def read(self, amt=None):
|
|
|
|
if 'slow' in kwargs:
|
|
|
|
if self.sent < 4:
|
|
|
|
self.sent += 1
|
|
|
|
sleep(0.1)
|
|
|
|
return ' '
|
|
|
|
rv = self.body[:amt]
|
|
|
|
self.body = self.body[amt:]
|
|
|
|
return rv
|
|
|
|
|
|
|
|
def send(self, amt=None):
|
|
|
|
if 'slow' in kwargs:
|
|
|
|
if self.received < 4:
|
|
|
|
self.received += 1
|
|
|
|
sleep(0.1)
|
|
|
|
|
|
|
|
def getheader(self, name, default=None):
|
|
|
|
return dict(self.getheaders()).get(name.lower(), default)
|
|
|
|
|
|
|
|
timestamps_iter = iter(kwargs.get('timestamps') or ['1'] * len(code_iter))
|
|
|
|
etag_iter = iter(kwargs.get('etags') or [None] * len(code_iter))
|
2013-04-11 12:52:33 -07:00
|
|
|
if isinstance(kwargs.get('headers'), list):
|
|
|
|
headers_iter = iter(kwargs['headers'])
|
|
|
|
else:
|
|
|
|
headers_iter = iter([kwargs.get('headers', {})] * len(code_iter))
|
|
|
|
|
2013-03-20 19:26:45 -07:00
|
|
|
x = kwargs.get('missing_container', [False] * len(code_iter))
|
|
|
|
if not isinstance(x, (tuple, list)):
|
|
|
|
x = [x] * len(code_iter)
|
|
|
|
container_ts_iter = iter(x)
|
|
|
|
code_iter = iter(code_iter)
|
|
|
|
static_body = kwargs.get('body', None)
|
|
|
|
body_iter = kwargs.get('body_iter', None)
|
|
|
|
if body_iter:
|
|
|
|
body_iter = iter(body_iter)
|
|
|
|
|
|
|
|
def connect(*args, **ckwargs):
|
2013-08-28 21:26:08 +00:00
|
|
|
if kwargs.get('slow_connect', False):
|
|
|
|
sleep(0.1)
|
2013-03-20 19:26:45 -07:00
|
|
|
if 'give_content_type' in kwargs:
|
|
|
|
if len(args) >= 7 and 'Content-Type' in args[6]:
|
|
|
|
kwargs['give_content_type'](args[6]['Content-Type'])
|
|
|
|
else:
|
|
|
|
kwargs['give_content_type']('')
|
|
|
|
if 'give_connect' in kwargs:
|
|
|
|
kwargs['give_connect'](*args, **ckwargs)
|
|
|
|
status = code_iter.next()
|
|
|
|
if isinstance(status, tuple):
|
|
|
|
status, expect_status = status
|
|
|
|
else:
|
|
|
|
expect_status = status
|
|
|
|
etag = etag_iter.next()
|
2013-04-11 12:52:33 -07:00
|
|
|
headers = headers_iter.next()
|
2013-03-20 19:26:45 -07:00
|
|
|
timestamp = timestamps_iter.next()
|
|
|
|
|
|
|
|
if status <= 0:
|
|
|
|
raise HTTPException()
|
|
|
|
if body_iter is None:
|
|
|
|
body = static_body or ''
|
|
|
|
else:
|
|
|
|
body = body_iter.next()
|
|
|
|
return FakeConn(status, etag, body=body, timestamp=timestamp,
|
2013-04-11 12:52:33 -07:00
|
|
|
expect_status=expect_status, headers=headers)
|
2013-03-20 19:26:45 -07:00
|
|
|
|
|
|
|
return connect
|