2013-08-31 22:36:58 -04:00
|
|
|
# -*- coding:utf-8 -*-
|
2013-09-20 01:00:54 +08:00
|
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
|
2010-07-12 17:03:45 -05:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
|
|
# implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
2013-08-31 22:36:58 -04:00
|
|
|
"""Tests for swift.common.utils"""
|
2023-04-17 19:08:32 +01:00
|
|
|
import itertools
|
2013-10-07 21:28:48 -07:00
|
|
|
from collections import defaultdict
|
Socket errors don't warrant tracebacks when talking to memcached
Currently, timeouts when talking to memcached cause log lines like
ERROR:root:Timeout connecting to memcached: 192.168.23.62:11211
Meanwhile, socket errors (which you'd expect to be about as common
as timeouts) cause log lines like
ERROR:root:Error talking to memcached: 192.168.23.70:11211
Traceback (most recent call last):
File "/usr/lib/pymodules/python2.7/swift/common/memcached.py", line 293, in set
(key, flags, timeout, len(value), value))
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 390, in sendall
tail = self.send(data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 384, in send
return self._send_loop(self.fd.send, data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 371, in _send_loop
return send_method(data, *args)
error: [Errno 32] Broken pipe
... or ...
ERROR:root:Error connecting to memcached: 192.168.23.73:11211
Traceback (most recent call last):
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 239, in _get_conns
fp, sock = self._client_cache[server].get()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 135, in get
fp, sock = self.create()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 128, in create
sock.connect((host, int(port)))
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 237, in connect
while not socket_connect(fd, address):
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 39, in socket_connect
raise socket.error(err, errno.errorcode[err])
error: [Errno 101] ENETUNREACH
... which seems excessive. Now, socket errors are logged as normal
errors, without the tracebacks.
Change-Id: I71a2c4786c0406dbc43b829cad5a0c7e2c45de21
2017-08-22 05:31:59 +00:00
|
|
|
import errno
|
2019-05-29 18:14:17 -07:00
|
|
|
import io
|
2020-06-09 10:50:07 -07:00
|
|
|
import logging
|
2015-09-03 12:19:05 +10:00
|
|
|
import six
|
2010-07-12 17:03:45 -05:00
|
|
|
import socket
|
|
|
|
import time
|
|
|
|
import unittest
|
Socket errors don't warrant tracebacks when talking to memcached
Currently, timeouts when talking to memcached cause log lines like
ERROR:root:Timeout connecting to memcached: 192.168.23.62:11211
Meanwhile, socket errors (which you'd expect to be about as common
as timeouts) cause log lines like
ERROR:root:Error talking to memcached: 192.168.23.70:11211
Traceback (most recent call last):
File "/usr/lib/pymodules/python2.7/swift/common/memcached.py", line 293, in set
(key, flags, timeout, len(value), value))
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 390, in sendall
tail = self.send(data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 384, in send
return self._send_loop(self.fd.send, data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 371, in _send_loop
return send_method(data, *args)
error: [Errno 32] Broken pipe
... or ...
ERROR:root:Error connecting to memcached: 192.168.23.73:11211
Traceback (most recent call last):
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 239, in _get_conns
fp, sock = self._client_cache[server].get()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 135, in get
fp, sock = self.create()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 128, in create
sock.connect((host, int(port)))
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 237, in connect
while not socket_connect(fd, address):
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 39, in socket_connect
raise socket.error(err, errno.errorcode[err])
error: [Errno 101] ENETUNREACH
... which seems excessive. Now, socket errors are logged as normal
errors, without the tracebacks.
Change-Id: I71a2c4786c0406dbc43b829cad5a0c7e2c45de21
2017-08-22 05:31:59 +00:00
|
|
|
import os
|
|
|
|
|
|
|
|
import mock
|
2021-12-07 13:24:19 +11:00
|
|
|
from six.moves.configparser import NoSectionError, NoOptionError
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-09-04 22:20:44 +00:00
|
|
|
from eventlet import GreenPool, sleep, Queue
|
|
|
|
from eventlet.pools import Pool
|
2021-12-07 13:24:19 +11:00
|
|
|
from eventlet.green import ssl
|
2013-09-04 22:20:44 +00:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
from swift.common import memcached
|
2023-04-19 15:54:50 -07:00
|
|
|
from swift.common.memcached import MemcacheConnectionError, md5hash, \
|
2024-03-14 13:10:00 -07:00
|
|
|
MemcacheCommand, EXPTIME_MAXDELTA
|
2021-06-11 11:29:40 +10:00
|
|
|
from swift.common.utils import md5, human_readable
|
2013-10-07 21:28:48 -07:00
|
|
|
from mock import patch, MagicMock
|
2021-01-22 14:21:23 -06:00
|
|
|
from test.debug_logger import debug_logger
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
|
2013-09-04 22:20:44 +00:00
|
|
|
class MockedMemcachePool(memcached.MemcacheConnPool):
|
|
|
|
def __init__(self, mocks):
|
|
|
|
Pool.__init__(self, max_size=2)
|
|
|
|
self.mocks = mocks
|
2013-11-14 15:58:52 -08:00
|
|
|
# setting this for the eventlet workaround in the MemcacheConnPool
|
|
|
|
self._parent_class_getter = super(memcached.MemcacheConnPool, self).get
|
2013-09-04 22:20:44 +00:00
|
|
|
|
|
|
|
def create(self):
|
|
|
|
return self.mocks.pop(0)
|
|
|
|
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class ExplodingMockMemcached(object):
|
2020-11-02 15:27:16 -08:00
|
|
|
should_explode = True
|
2010-07-12 17:03:45 -05:00
|
|
|
exploded = False
|
2012-09-04 23:01:02 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def sendall(self, string):
|
2020-11-02 15:27:16 -08:00
|
|
|
if self.should_explode:
|
|
|
|
self.exploded = True
|
|
|
|
raise socket.error(errno.EPIPE, os.strerror(errno.EPIPE))
|
2012-09-04 23:01:02 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def readline(self):
|
2020-11-02 15:27:16 -08:00
|
|
|
if self.should_explode:
|
|
|
|
self.exploded = True
|
|
|
|
raise socket.error(errno.EPIPE, os.strerror(errno.EPIPE))
|
2021-01-07 21:00:59 -08:00
|
|
|
return b'STORED\r\n'
|
2012-09-04 23:01:02 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def read(self, size):
|
2020-11-02 15:27:16 -08:00
|
|
|
if self.should_explode:
|
|
|
|
self.exploded = True
|
|
|
|
raise socket.error(errno.EPIPE, os.strerror(errno.EPIPE))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-05-07 09:40:53 -07:00
|
|
|
def close(self):
|
|
|
|
pass
|
2012-09-04 23:01:02 +08:00
|
|
|
|
2013-08-31 22:36:58 -04:00
|
|
|
|
2021-01-07 21:00:59 -08:00
|
|
|
TOO_BIG_KEY = md5(
|
|
|
|
b'too-big', usedforsecurity=False).hexdigest().encode('ascii')
|
|
|
|
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class MockMemcached(object):
|
2016-06-23 10:46:27 -07:00
|
|
|
# See https://github.com/memcached/memcached/blob/master/doc/protocol.txt
|
|
|
|
# In particular, the "Storage commands" section may be interesting.
|
2012-09-04 23:01:02 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def __init__(self):
|
2018-02-01 14:30:32 -08:00
|
|
|
self.inbuf = b''
|
|
|
|
self.outbuf = b''
|
2024-03-14 13:10:00 -07:00
|
|
|
# Structure: key -> (flags, absolute exptime, value)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.cache = {}
|
|
|
|
self.down = False
|
|
|
|
self.exc_on_delete = False
|
2011-02-23 11:44:36 -08:00
|
|
|
self.read_return_none = False
|
2015-09-03 12:19:05 +10:00
|
|
|
self.read_return_empty_str = False
|
2013-05-07 09:40:53 -07:00
|
|
|
self.close_called = False
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2024-03-14 13:10:00 -07:00
|
|
|
def _get_absolute_exptime(self, exptime):
|
|
|
|
exptime = int(exptime)
|
|
|
|
if exptime == 0:
|
|
|
|
# '0' means this cache item doesn't expire.
|
|
|
|
return 0
|
|
|
|
elif exptime <= EXPTIME_MAXDELTA:
|
|
|
|
# Expiration time client passes in is delta from current unix time.
|
|
|
|
return exptime + time.time()
|
|
|
|
else:
|
|
|
|
# Already a absolute time.
|
|
|
|
return exptime
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def sendall(self, string):
|
|
|
|
if self.down:
|
|
|
|
raise Exception('mock is down')
|
|
|
|
self.inbuf += string
|
2018-02-01 14:30:32 -08:00
|
|
|
while b'\n' in self.inbuf:
|
|
|
|
cmd, self.inbuf = self.inbuf.split(b'\n', 1)
|
2010-07-12 17:03:45 -05:00
|
|
|
parts = cmd.split()
|
2018-02-01 14:30:32 -08:00
|
|
|
cmd_name = parts[0].decode('ascii').lower()
|
|
|
|
handler = getattr(self, 'handle_%s' % cmd_name, None)
|
2016-06-23 10:46:27 -07:00
|
|
|
if handler:
|
|
|
|
handler(*parts[1:])
|
|
|
|
else:
|
|
|
|
raise ValueError('Unhandled command: %s' % parts[0])
|
|
|
|
|
2018-02-01 14:30:32 -08:00
|
|
|
def handle_set(self, key, flags, exptime, num_bytes, noreply=b''):
|
2024-03-14 13:10:00 -07:00
|
|
|
self.cache[key] = (
|
|
|
|
flags,
|
|
|
|
self._get_absolute_exptime(exptime),
|
|
|
|
self.inbuf[:int(num_bytes)]
|
|
|
|
)
|
2016-06-23 10:46:27 -07:00
|
|
|
self.inbuf = self.inbuf[int(num_bytes) + 2:]
|
2018-02-01 14:30:32 -08:00
|
|
|
if noreply != b'noreply':
|
2021-01-07 21:00:59 -08:00
|
|
|
if key == TOO_BIG_KEY:
|
|
|
|
self.outbuf += b'SERVER_ERROR object too large for cache\r\n'
|
|
|
|
else:
|
|
|
|
self.outbuf += b'STORED\r\n'
|
2016-06-23 10:46:27 -07:00
|
|
|
|
2018-02-01 14:30:32 -08:00
|
|
|
def handle_add(self, key, flags, exptime, num_bytes, noreply=b''):
|
2016-06-23 10:46:27 -07:00
|
|
|
value = self.inbuf[:int(num_bytes)]
|
|
|
|
self.inbuf = self.inbuf[int(num_bytes) + 2:]
|
|
|
|
if key in self.cache:
|
2018-02-01 14:30:32 -08:00
|
|
|
if noreply != b'noreply':
|
|
|
|
self.outbuf += b'NOT_STORED\r\n'
|
2016-06-23 10:46:27 -07:00
|
|
|
else:
|
2024-03-14 13:10:00 -07:00
|
|
|
self.cache[key] = flags, self._get_absolute_exptime(exptime), value
|
2018-02-01 14:30:32 -08:00
|
|
|
if noreply != b'noreply':
|
|
|
|
self.outbuf += b'STORED\r\n'
|
2016-06-23 10:46:27 -07:00
|
|
|
|
2024-03-14 13:10:00 -07:00
|
|
|
def _is_expired(self, key):
|
|
|
|
_, exptime, _ = self.cache[key]
|
|
|
|
if exptime != 0 and time.time() > exptime:
|
|
|
|
self.cache.pop(key)
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
|
2018-02-01 14:30:32 -08:00
|
|
|
def handle_delete(self, key, noreply=b''):
|
2016-06-23 10:46:27 -07:00
|
|
|
if self.exc_on_delete:
|
|
|
|
raise Exception('mock is has exc_on_delete set')
|
2024-03-14 13:10:00 -07:00
|
|
|
if key in self.cache and not self._is_expired(key):
|
2016-06-23 10:46:27 -07:00
|
|
|
del self.cache[key]
|
2018-02-01 14:30:32 -08:00
|
|
|
if noreply != b'noreply':
|
|
|
|
self.outbuf += b'DELETED\r\n'
|
|
|
|
elif noreply != b'noreply':
|
|
|
|
self.outbuf += b'NOT_FOUND\r\n'
|
2016-06-23 10:46:27 -07:00
|
|
|
|
|
|
|
def handle_get(self, *keys):
|
|
|
|
for key in keys:
|
2024-03-14 13:10:00 -07:00
|
|
|
if key in self.cache and not self._is_expired(key):
|
2016-06-23 10:46:27 -07:00
|
|
|
val = self.cache[key]
|
2018-02-01 14:30:32 -08:00
|
|
|
self.outbuf += b' '.join([
|
|
|
|
b'VALUE',
|
|
|
|
key,
|
|
|
|
val[0],
|
|
|
|
str(len(val[2])).encode('ascii')
|
|
|
|
]) + b'\r\n'
|
|
|
|
self.outbuf += val[2] + b'\r\n'
|
|
|
|
self.outbuf += b'END\r\n'
|
|
|
|
|
|
|
|
def handle_incr(self, key, value, noreply=b''):
|
2024-03-14 13:10:00 -07:00
|
|
|
if key in self.cache and not self._is_expired(key):
|
2016-06-23 10:46:27 -07:00
|
|
|
current = self.cache[key][2]
|
2018-02-01 14:30:32 -08:00
|
|
|
new_val = str(int(current) + int(value)).encode('ascii')
|
2016-06-23 10:46:27 -07:00
|
|
|
self.cache[key] = self.cache[key][:2] + (new_val, )
|
2018-02-01 14:30:32 -08:00
|
|
|
self.outbuf += new_val + b'\r\n'
|
2016-06-23 10:46:27 -07:00
|
|
|
else:
|
2018-02-01 14:30:32 -08:00
|
|
|
self.outbuf += b'NOT_FOUND\r\n'
|
2016-06-23 10:46:27 -07:00
|
|
|
|
2018-02-01 14:30:32 -08:00
|
|
|
def handle_decr(self, key, value, noreply=b''):
|
2024-03-14 13:10:00 -07:00
|
|
|
if key in self.cache and not self._is_expired(key):
|
2016-06-23 10:46:27 -07:00
|
|
|
current = self.cache[key][2]
|
2018-02-01 14:30:32 -08:00
|
|
|
new_val = str(int(current) - int(value)).encode('ascii')
|
|
|
|
if new_val[:1] == b'-': # ie, val is negative
|
|
|
|
new_val = b'0'
|
2016-06-23 10:46:27 -07:00
|
|
|
self.cache[key] = self.cache[key][:2] + (new_val, )
|
2018-02-01 14:30:32 -08:00
|
|
|
self.outbuf += new_val + b'\r\n'
|
2016-06-23 10:46:27 -07:00
|
|
|
else:
|
2018-02-01 14:30:32 -08:00
|
|
|
self.outbuf += b'NOT_FOUND\r\n'
|
2012-09-04 23:01:02 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def readline(self):
|
2015-09-03 12:19:05 +10:00
|
|
|
if self.read_return_empty_str:
|
2018-02-01 14:30:32 -08:00
|
|
|
return b''
|
2011-02-23 11:44:36 -08:00
|
|
|
if self.read_return_none:
|
|
|
|
return None
|
2010-07-12 17:03:45 -05:00
|
|
|
if self.down:
|
|
|
|
raise Exception('mock is down')
|
2018-02-01 14:30:32 -08:00
|
|
|
if b'\n' in self.outbuf:
|
|
|
|
response, self.outbuf = self.outbuf.split(b'\n', 1)
|
|
|
|
return response + b'\n'
|
2012-09-04 23:01:02 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def read(self, size):
|
|
|
|
if self.down:
|
|
|
|
raise Exception('mock is down')
|
|
|
|
if len(self.outbuf) >= size:
|
|
|
|
response = self.outbuf[:size]
|
|
|
|
self.outbuf = self.outbuf[size:]
|
|
|
|
return response
|
|
|
|
|
2013-05-07 09:40:53 -07:00
|
|
|
def close(self):
|
|
|
|
self.close_called = True
|
|
|
|
pass
|
|
|
|
|
2012-09-04 23:01:02 +08:00
|
|
|
|
2023-05-18 12:21:31 +01:00
|
|
|
class TestMemcacheCommand(unittest.TestCase):
|
|
|
|
def test_init(self):
|
|
|
|
cmd = MemcacheCommand("set", "shard-updating-v2/a/c")
|
|
|
|
self.assertEqual(cmd.method, "set")
|
|
|
|
self.assertEqual(cmd.command, b"set")
|
|
|
|
self.assertEqual(cmd.key, "shard-updating-v2/a/c")
|
|
|
|
self.assertEqual(cmd.key_prefix, "shard-updating-v2/a")
|
|
|
|
self.assertEqual(cmd.hash_key, md5hash("shard-updating-v2/a/c"))
|
|
|
|
|
|
|
|
def test_get_key_prefix(self):
|
|
|
|
cmd = MemcacheCommand("set", "shard-updating-v2/a/c")
|
|
|
|
self.assertEqual(cmd.key_prefix, "shard-updating-v2/a")
|
|
|
|
cmd = MemcacheCommand("set", "shard-listing-v2/accout/container3")
|
|
|
|
self.assertEqual(cmd.key_prefix, "shard-listing-v2/accout")
|
|
|
|
cmd = MemcacheCommand(
|
|
|
|
"set", "auth_reseller_name/token/X58E34EL2SDFLEY3")
|
|
|
|
self.assertEqual(cmd.key_prefix, "auth_reseller_name/token")
|
|
|
|
cmd = MemcacheCommand("set", "nvratelimit/v2/wf/2345392374")
|
|
|
|
self.assertEqual(cmd.key_prefix, "nvratelimit/v2/wf")
|
|
|
|
cmd = MemcacheCommand("set", "some_key")
|
|
|
|
self.assertEqual(cmd.key_prefix, "some_key")
|
|
|
|
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestMemcached(unittest.TestCase):
|
2013-08-31 22:36:58 -04:00
|
|
|
"""Tests for swift.common.memcached"""
|
2010-07-12 17:03:45 -05:00
|
|
|
|
Socket errors don't warrant tracebacks when talking to memcached
Currently, timeouts when talking to memcached cause log lines like
ERROR:root:Timeout connecting to memcached: 192.168.23.62:11211
Meanwhile, socket errors (which you'd expect to be about as common
as timeouts) cause log lines like
ERROR:root:Error talking to memcached: 192.168.23.70:11211
Traceback (most recent call last):
File "/usr/lib/pymodules/python2.7/swift/common/memcached.py", line 293, in set
(key, flags, timeout, len(value), value))
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 390, in sendall
tail = self.send(data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 384, in send
return self._send_loop(self.fd.send, data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 371, in _send_loop
return send_method(data, *args)
error: [Errno 32] Broken pipe
... or ...
ERROR:root:Error connecting to memcached: 192.168.23.73:11211
Traceback (most recent call last):
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 239, in _get_conns
fp, sock = self._client_cache[server].get()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 135, in get
fp, sock = self.create()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 128, in create
sock.connect((host, int(port)))
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 237, in connect
while not socket_connect(fd, address):
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 39, in socket_connect
raise socket.error(err, errno.errorcode[err])
error: [Errno 101] ENETUNREACH
... which seems excessive. Now, socket errors are logged as normal
errors, without the tracebacks.
Change-Id: I71a2c4786c0406dbc43b829cad5a0c7e2c45de21
2017-08-22 05:31:59 +00:00
|
|
|
def setUp(self):
|
|
|
|
self.logger = debug_logger()
|
2023-05-18 12:21:31 +01:00
|
|
|
self.set_cmd = MemcacheCommand('set', 'key')
|
2023-04-19 15:54:50 -07:00
|
|
|
|
2020-06-09 10:50:07 -07:00
|
|
|
def test_logger_kwarg(self):
|
|
|
|
server_socket = '%s:%s' % ('[::1]', 11211)
|
|
|
|
client = memcached.MemcacheRing([server_socket])
|
|
|
|
self.assertIs(client.logger, logging.getLogger())
|
|
|
|
|
|
|
|
client = memcached.MemcacheRing([server_socket], logger=self.logger)
|
|
|
|
self.assertIs(client.logger, self.logger)
|
Socket errors don't warrant tracebacks when talking to memcached
Currently, timeouts when talking to memcached cause log lines like
ERROR:root:Timeout connecting to memcached: 192.168.23.62:11211
Meanwhile, socket errors (which you'd expect to be about as common
as timeouts) cause log lines like
ERROR:root:Error talking to memcached: 192.168.23.70:11211
Traceback (most recent call last):
File "/usr/lib/pymodules/python2.7/swift/common/memcached.py", line 293, in set
(key, flags, timeout, len(value), value))
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 390, in sendall
tail = self.send(data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 384, in send
return self._send_loop(self.fd.send, data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 371, in _send_loop
return send_method(data, *args)
error: [Errno 32] Broken pipe
... or ...
ERROR:root:Error connecting to memcached: 192.168.23.73:11211
Traceback (most recent call last):
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 239, in _get_conns
fp, sock = self._client_cache[server].get()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 135, in get
fp, sock = self.create()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 128, in create
sock.connect((host, int(port)))
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 237, in connect
while not socket_connect(fd, address):
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 39, in socket_connect
raise socket.error(err, errno.errorcode[err])
error: [Errno 101] ENETUNREACH
... which seems excessive. Now, socket errors are logged as normal
errors, without the tracebacks.
Change-Id: I71a2c4786c0406dbc43b829cad5a0c7e2c45de21
2017-08-22 05:31:59 +00:00
|
|
|
|
2020-12-04 16:27:11 +01:00
|
|
|
def test_tls_context_kwarg(self):
|
|
|
|
with patch('swift.common.memcached.socket.socket'):
|
|
|
|
server = '%s:%s' % ('[::1]', 11211)
|
|
|
|
client = memcached.MemcacheRing([server])
|
|
|
|
self.assertIsNone(client._client_cache[server]._tls_context)
|
|
|
|
|
|
|
|
context = mock.Mock()
|
|
|
|
client = memcached.MemcacheRing([server], tls_context=context)
|
|
|
|
self.assertIs(client._client_cache[server]._tls_context, context)
|
|
|
|
|
2023-05-18 12:21:31 +01:00
|
|
|
list(client._get_conns(self.set_cmd))
|
2020-12-04 16:27:11 +01:00
|
|
|
context.wrap_socket.assert_called_once()
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def test_get_conns(self):
|
|
|
|
sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
|
|
|
sock1.bind(('127.0.0.1', 0))
|
|
|
|
sock1.listen(1)
|
|
|
|
sock1ipport = '%s:%s' % sock1.getsockname()
|
|
|
|
sock2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
|
|
|
sock2.bind(('127.0.0.1', 0))
|
|
|
|
sock2.listen(1)
|
2012-01-12 22:30:32 +00:00
|
|
|
orig_port = memcached.DEFAULT_MEMCACHED_PORT
|
|
|
|
try:
|
|
|
|
sock2ip, memcached.DEFAULT_MEMCACHED_PORT = sock2.getsockname()
|
|
|
|
sock2ipport = '%s:%s' % (sock2ip, memcached.DEFAULT_MEMCACHED_PORT)
|
|
|
|
# We're deliberately using sock2ip (no port) here to test that the
|
|
|
|
# default port is used.
|
2020-06-09 10:50:07 -07:00
|
|
|
memcache_client = memcached.MemcacheRing([sock1ipport, sock2ip],
|
|
|
|
logger=self.logger)
|
2012-01-12 22:30:32 +00:00
|
|
|
one = two = True
|
|
|
|
while one or two: # Run until we match hosts one and two
|
2023-05-18 12:21:31 +01:00
|
|
|
for conn in memcache_client._get_conns(self.set_cmd):
|
2019-05-29 18:14:17 -07:00
|
|
|
if 'b' not in getattr(conn[1], 'mode', ''):
|
|
|
|
self.assertIsInstance(conn[1], (
|
|
|
|
io.RawIOBase, io.BufferedIOBase))
|
2012-01-12 22:30:32 +00:00
|
|
|
peeripport = '%s:%s' % conn[2].getpeername()
|
2015-07-21 19:23:00 +05:30
|
|
|
self.assertTrue(peeripport in (sock1ipport, sock2ipport))
|
2012-01-12 22:30:32 +00:00
|
|
|
if peeripport == sock1ipport:
|
|
|
|
one = False
|
|
|
|
if peeripport == sock2ipport:
|
|
|
|
two = False
|
Fix IPv6 handling in MemcacheConnPool.
The patch removes the assumption of IPv4-only addresses in the
MemcacheConnPool. The changes are around address handling.
Namely, if a server is specified with an address
[<address>]:port (port is optional), it is assumed to be an IPv6
address [1]. If an IPv6 address is specified without "[]", an exception
is raised as it is impossible to parse such addresses correctly.
For testing, memcache can be configured to listen on the link-local,
unique-local, or ::1 (equivalent to 127.0.0.1) addresses. Link-local
addresses are assigned by default to each interface and are of the form
"fe80::dead:beef". These addresses require a scope ID, which would look
like "fe80::dead:beef%eth0" (replacing eth0 with the correct interface).
Unique-local addresses are any addresses in the fc00::/7 subnet. To add
a ULA to an interface use the "ip" utility. For example:
"ip -6 address add fc01::dead:beef dev eth0". Lastly, and probably
simplest, memcache can be configured to listen on "::1". The same
address would be used in the swift configuration, e.g. "[::1]:11211".
Note: only memcached version 1.4.25 or greater supports binding to an
IPv6 address.
Fixes #1526570
[1] IPv6 host literals:
https://tools.ietf.org/html/rfc3986#section-3.2.2
Change-Id: I8408143c1d47d24e70df56a08167c529825276a2
2015-12-16 12:07:27 -08:00
|
|
|
self.assertEqual(len(memcache_client._errors[sock1ipport]), 0)
|
|
|
|
self.assertEqual(len(memcache_client._errors[sock2ip]), 0)
|
2012-01-12 22:30:32 +00:00
|
|
|
finally:
|
|
|
|
memcached.DEFAULT_MEMCACHED_PORT = orig_port
|
2010-07-12 17:03:45 -05:00
|
|
|
|
Fix IPv6 handling in MemcacheConnPool.
The patch removes the assumption of IPv4-only addresses in the
MemcacheConnPool. The changes are around address handling.
Namely, if a server is specified with an address
[<address>]:port (port is optional), it is assumed to be an IPv6
address [1]. If an IPv6 address is specified without "[]", an exception
is raised as it is impossible to parse such addresses correctly.
For testing, memcache can be configured to listen on the link-local,
unique-local, or ::1 (equivalent to 127.0.0.1) addresses. Link-local
addresses are assigned by default to each interface and are of the form
"fe80::dead:beef". These addresses require a scope ID, which would look
like "fe80::dead:beef%eth0" (replacing eth0 with the correct interface).
Unique-local addresses are any addresses in the fc00::/7 subnet. To add
a ULA to an interface use the "ip" utility. For example:
"ip -6 address add fc01::dead:beef dev eth0". Lastly, and probably
simplest, memcache can be configured to listen on "::1". The same
address would be used in the swift configuration, e.g. "[::1]:11211".
Note: only memcached version 1.4.25 or greater supports binding to an
IPv6 address.
Fixes #1526570
[1] IPv6 host literals:
https://tools.ietf.org/html/rfc3986#section-3.2.2
Change-Id: I8408143c1d47d24e70df56a08167c529825276a2
2015-12-16 12:07:27 -08:00
|
|
|
def test_get_conns_v6(self):
|
|
|
|
if not socket.has_ipv6:
|
|
|
|
return
|
|
|
|
try:
|
|
|
|
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
|
|
|
|
sock.bind(('::1', 0, 0, 0))
|
|
|
|
sock.listen(1)
|
|
|
|
sock_addr = sock.getsockname()
|
|
|
|
server_socket = '[%s]:%s' % (sock_addr[0], sock_addr[1])
|
2020-06-09 10:50:07 -07:00
|
|
|
memcache_client = memcached.MemcacheRing([server_socket],
|
|
|
|
logger=self.logger)
|
2023-05-18 12:21:31 +01:00
|
|
|
for conn in memcache_client._get_conns(self.set_cmd):
|
Fix IPv6 handling in MemcacheConnPool.
The patch removes the assumption of IPv4-only addresses in the
MemcacheConnPool. The changes are around address handling.
Namely, if a server is specified with an address
[<address>]:port (port is optional), it is assumed to be an IPv6
address [1]. If an IPv6 address is specified without "[]", an exception
is raised as it is impossible to parse such addresses correctly.
For testing, memcache can be configured to listen on the link-local,
unique-local, or ::1 (equivalent to 127.0.0.1) addresses. Link-local
addresses are assigned by default to each interface and are of the form
"fe80::dead:beef". These addresses require a scope ID, which would look
like "fe80::dead:beef%eth0" (replacing eth0 with the correct interface).
Unique-local addresses are any addresses in the fc00::/7 subnet. To add
a ULA to an interface use the "ip" utility. For example:
"ip -6 address add fc01::dead:beef dev eth0". Lastly, and probably
simplest, memcache can be configured to listen on "::1". The same
address would be used in the swift configuration, e.g. "[::1]:11211".
Note: only memcached version 1.4.25 or greater supports binding to an
IPv6 address.
Fixes #1526570
[1] IPv6 host literals:
https://tools.ietf.org/html/rfc3986#section-3.2.2
Change-Id: I8408143c1d47d24e70df56a08167c529825276a2
2015-12-16 12:07:27 -08:00
|
|
|
peer_sockaddr = conn[2].getpeername()
|
|
|
|
peer_socket = '[%s]:%s' % (peer_sockaddr[0], peer_sockaddr[1])
|
|
|
|
self.assertEqual(peer_socket, server_socket)
|
|
|
|
self.assertEqual(len(memcache_client._errors[server_socket]), 0)
|
|
|
|
finally:
|
|
|
|
sock.close()
|
|
|
|
|
|
|
|
def test_get_conns_v6_default(self):
|
|
|
|
if not socket.has_ipv6:
|
|
|
|
return
|
|
|
|
try:
|
|
|
|
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
|
|
|
|
sock.bind(('::1', 0))
|
|
|
|
sock.listen(1)
|
|
|
|
sock_addr = sock.getsockname()
|
|
|
|
server_socket = '[%s]:%s' % (sock_addr[0], sock_addr[1])
|
|
|
|
server_host = '[%s]' % sock_addr[0]
|
|
|
|
memcached.DEFAULT_MEMCACHED_PORT = sock_addr[1]
|
2020-06-09 10:50:07 -07:00
|
|
|
memcache_client = memcached.MemcacheRing([server_host],
|
|
|
|
logger=self.logger)
|
2023-05-18 12:21:31 +01:00
|
|
|
for conn in memcache_client._get_conns(self.set_cmd):
|
Fix IPv6 handling in MemcacheConnPool.
The patch removes the assumption of IPv4-only addresses in the
MemcacheConnPool. The changes are around address handling.
Namely, if a server is specified with an address
[<address>]:port (port is optional), it is assumed to be an IPv6
address [1]. If an IPv6 address is specified without "[]", an exception
is raised as it is impossible to parse such addresses correctly.
For testing, memcache can be configured to listen on the link-local,
unique-local, or ::1 (equivalent to 127.0.0.1) addresses. Link-local
addresses are assigned by default to each interface and are of the form
"fe80::dead:beef". These addresses require a scope ID, which would look
like "fe80::dead:beef%eth0" (replacing eth0 with the correct interface).
Unique-local addresses are any addresses in the fc00::/7 subnet. To add
a ULA to an interface use the "ip" utility. For example:
"ip -6 address add fc01::dead:beef dev eth0". Lastly, and probably
simplest, memcache can be configured to listen on "::1". The same
address would be used in the swift configuration, e.g. "[::1]:11211".
Note: only memcached version 1.4.25 or greater supports binding to an
IPv6 address.
Fixes #1526570
[1] IPv6 host literals:
https://tools.ietf.org/html/rfc3986#section-3.2.2
Change-Id: I8408143c1d47d24e70df56a08167c529825276a2
2015-12-16 12:07:27 -08:00
|
|
|
peer_sockaddr = conn[2].getpeername()
|
|
|
|
peer_socket = '[%s]:%s' % (peer_sockaddr[0], peer_sockaddr[1])
|
|
|
|
self.assertEqual(peer_socket, server_socket)
|
|
|
|
self.assertEqual(len(memcache_client._errors[server_host]), 0)
|
|
|
|
finally:
|
|
|
|
sock.close()
|
|
|
|
|
|
|
|
def test_get_conns_bad_v6(self):
|
2016-01-08 14:54:56 -08:00
|
|
|
with self.assertRaises(ValueError):
|
Fix IPv6 handling in MemcacheConnPool.
The patch removes the assumption of IPv4-only addresses in the
MemcacheConnPool. The changes are around address handling.
Namely, if a server is specified with an address
[<address>]:port (port is optional), it is assumed to be an IPv6
address [1]. If an IPv6 address is specified without "[]", an exception
is raised as it is impossible to parse such addresses correctly.
For testing, memcache can be configured to listen on the link-local,
unique-local, or ::1 (equivalent to 127.0.0.1) addresses. Link-local
addresses are assigned by default to each interface and are of the form
"fe80::dead:beef". These addresses require a scope ID, which would look
like "fe80::dead:beef%eth0" (replacing eth0 with the correct interface).
Unique-local addresses are any addresses in the fc00::/7 subnet. To add
a ULA to an interface use the "ip" utility. For example:
"ip -6 address add fc01::dead:beef dev eth0". Lastly, and probably
simplest, memcache can be configured to listen on "::1". The same
address would be used in the swift configuration, e.g. "[::1]:11211".
Note: only memcached version 1.4.25 or greater supports binding to an
IPv6 address.
Fixes #1526570
[1] IPv6 host literals:
https://tools.ietf.org/html/rfc3986#section-3.2.2
Change-Id: I8408143c1d47d24e70df56a08167c529825276a2
2015-12-16 12:07:27 -08:00
|
|
|
# IPv6 address with missing [] is invalid
|
2016-01-08 14:54:56 -08:00
|
|
|
server_socket = '%s:%s' % ('::1', 11211)
|
2020-06-09 10:50:07 -07:00
|
|
|
memcached.MemcacheRing([server_socket], logger=self.logger)
|
Fix IPv6 handling in MemcacheConnPool.
The patch removes the assumption of IPv4-only addresses in the
MemcacheConnPool. The changes are around address handling.
Namely, if a server is specified with an address
[<address>]:port (port is optional), it is assumed to be an IPv6
address [1]. If an IPv6 address is specified without "[]", an exception
is raised as it is impossible to parse such addresses correctly.
For testing, memcache can be configured to listen on the link-local,
unique-local, or ::1 (equivalent to 127.0.0.1) addresses. Link-local
addresses are assigned by default to each interface and are of the form
"fe80::dead:beef". These addresses require a scope ID, which would look
like "fe80::dead:beef%eth0" (replacing eth0 with the correct interface).
Unique-local addresses are any addresses in the fc00::/7 subnet. To add
a ULA to an interface use the "ip" utility. For example:
"ip -6 address add fc01::dead:beef dev eth0". Lastly, and probably
simplest, memcache can be configured to listen on "::1". The same
address would be used in the swift configuration, e.g. "[::1]:11211".
Note: only memcached version 1.4.25 or greater supports binding to an
IPv6 address.
Fixes #1526570
[1] IPv6 host literals:
https://tools.ietf.org/html/rfc3986#section-3.2.2
Change-Id: I8408143c1d47d24e70df56a08167c529825276a2
2015-12-16 12:07:27 -08:00
|
|
|
|
|
|
|
def test_get_conns_hostname(self):
|
|
|
|
with patch('swift.common.memcached.socket.getaddrinfo') as addrinfo:
|
|
|
|
try:
|
|
|
|
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
|
|
|
sock.bind(('127.0.0.1', 0))
|
|
|
|
sock.listen(1)
|
|
|
|
sock_addr = sock.getsockname()
|
|
|
|
fqdn = socket.getfqdn()
|
|
|
|
server_socket = '%s:%s' % (fqdn, sock_addr[1])
|
|
|
|
addrinfo.return_value = [(socket.AF_INET,
|
|
|
|
socket.SOCK_STREAM, 0, '',
|
|
|
|
('127.0.0.1', sock_addr[1]))]
|
2020-06-09 10:50:07 -07:00
|
|
|
memcache_client = memcached.MemcacheRing([server_socket],
|
|
|
|
logger=self.logger)
|
2023-05-18 12:21:31 +01:00
|
|
|
for conn in memcache_client._get_conns(self.set_cmd):
|
Fix IPv6 handling in MemcacheConnPool.
The patch removes the assumption of IPv4-only addresses in the
MemcacheConnPool. The changes are around address handling.
Namely, if a server is specified with an address
[<address>]:port (port is optional), it is assumed to be an IPv6
address [1]. If an IPv6 address is specified without "[]", an exception
is raised as it is impossible to parse such addresses correctly.
For testing, memcache can be configured to listen on the link-local,
unique-local, or ::1 (equivalent to 127.0.0.1) addresses. Link-local
addresses are assigned by default to each interface and are of the form
"fe80::dead:beef". These addresses require a scope ID, which would look
like "fe80::dead:beef%eth0" (replacing eth0 with the correct interface).
Unique-local addresses are any addresses in the fc00::/7 subnet. To add
a ULA to an interface use the "ip" utility. For example:
"ip -6 address add fc01::dead:beef dev eth0". Lastly, and probably
simplest, memcache can be configured to listen on "::1". The same
address would be used in the swift configuration, e.g. "[::1]:11211".
Note: only memcached version 1.4.25 or greater supports binding to an
IPv6 address.
Fixes #1526570
[1] IPv6 host literals:
https://tools.ietf.org/html/rfc3986#section-3.2.2
Change-Id: I8408143c1d47d24e70df56a08167c529825276a2
2015-12-16 12:07:27 -08:00
|
|
|
peer_sockaddr = conn[2].getpeername()
|
|
|
|
peer_socket = '%s:%s' % (peer_sockaddr[0],
|
|
|
|
peer_sockaddr[1])
|
|
|
|
self.assertEqual(peer_socket,
|
|
|
|
'127.0.0.1:%d' % sock_addr[1])
|
|
|
|
self.assertEqual(len(memcache_client._errors[server_socket]),
|
|
|
|
0)
|
|
|
|
finally:
|
|
|
|
sock.close()
|
|
|
|
|
|
|
|
def test_get_conns_hostname6(self):
|
|
|
|
with patch('swift.common.memcached.socket.getaddrinfo') as addrinfo:
|
|
|
|
try:
|
|
|
|
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
|
|
|
|
sock.bind(('::1', 0))
|
|
|
|
sock.listen(1)
|
|
|
|
sock_addr = sock.getsockname()
|
|
|
|
fqdn = socket.getfqdn()
|
|
|
|
server_socket = '%s:%s' % (fqdn, sock_addr[1])
|
|
|
|
addrinfo.return_value = [(socket.AF_INET6,
|
|
|
|
socket.SOCK_STREAM, 0, '',
|
|
|
|
('::1', sock_addr[1]))]
|
2020-06-09 10:50:07 -07:00
|
|
|
memcache_client = memcached.MemcacheRing([server_socket],
|
|
|
|
logger=self.logger)
|
2023-05-18 12:21:31 +01:00
|
|
|
for conn in memcache_client._get_conns(self.set_cmd):
|
Fix IPv6 handling in MemcacheConnPool.
The patch removes the assumption of IPv4-only addresses in the
MemcacheConnPool. The changes are around address handling.
Namely, if a server is specified with an address
[<address>]:port (port is optional), it is assumed to be an IPv6
address [1]. If an IPv6 address is specified without "[]", an exception
is raised as it is impossible to parse such addresses correctly.
For testing, memcache can be configured to listen on the link-local,
unique-local, or ::1 (equivalent to 127.0.0.1) addresses. Link-local
addresses are assigned by default to each interface and are of the form
"fe80::dead:beef". These addresses require a scope ID, which would look
like "fe80::dead:beef%eth0" (replacing eth0 with the correct interface).
Unique-local addresses are any addresses in the fc00::/7 subnet. To add
a ULA to an interface use the "ip" utility. For example:
"ip -6 address add fc01::dead:beef dev eth0". Lastly, and probably
simplest, memcache can be configured to listen on "::1". The same
address would be used in the swift configuration, e.g. "[::1]:11211".
Note: only memcached version 1.4.25 or greater supports binding to an
IPv6 address.
Fixes #1526570
[1] IPv6 host literals:
https://tools.ietf.org/html/rfc3986#section-3.2.2
Change-Id: I8408143c1d47d24e70df56a08167c529825276a2
2015-12-16 12:07:27 -08:00
|
|
|
peer_sockaddr = conn[2].getpeername()
|
|
|
|
peer_socket = '[%s]:%s' % (peer_sockaddr[0],
|
|
|
|
peer_sockaddr[1])
|
|
|
|
self.assertEqual(peer_socket, '[::1]:%d' % sock_addr[1])
|
|
|
|
self.assertEqual(len(memcache_client._errors[server_socket]),
|
|
|
|
0)
|
|
|
|
finally:
|
|
|
|
sock.close()
|
|
|
|
|
2016-06-23 12:22:02 -07:00
|
|
|
def test_set_get_json(self):
|
2020-06-09 10:50:07 -07:00
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
|
|
|
logger=self.logger)
|
2010-07-12 17:03:45 -05:00
|
|
|
mock = MockMemcached()
|
2013-09-04 22:20:44 +00:00
|
|
|
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock, mock)] * 2)
|
2020-09-11 16:28:11 -04:00
|
|
|
cache_key = md5(b'some_key',
|
|
|
|
usedforsecurity=False).hexdigest().encode('ascii')
|
2016-06-23 12:22:02 -07:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
memcache_client.set('some_key', [1, 2, 3])
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(memcache_client.get('some_key'), [1, 2, 3])
|
2016-06-23 12:22:02 -07:00
|
|
|
# See JSON_FLAG
|
2024-03-14 13:10:00 -07:00
|
|
|
self.assertEqual(mock.cache, {cache_key: (b'2', 0, b'[1, 2, 3]')})
|
2016-06-23 12:22:02 -07:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
memcache_client.set('some_key', [4, 5, 6])
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(memcache_client.get('some_key'), [4, 5, 6])
|
2024-03-14 13:10:00 -07:00
|
|
|
self.assertEqual(mock.cache, {cache_key: (b'2', 0, b'[4, 5, 6]')})
|
2016-06-23 12:22:02 -07:00
|
|
|
|
Do not use pickle for serialization in memcache, but JSON
We don't want to use pickle as it can execute arbitrary code. JSON is
safer. However, note that it supports serialization for only some
specific subset of object types; this should be enough for what we need,
though.
To avoid issues on upgrades (unability to read pickled values, and cache
poisoning for old servers not understanding JSON), we add a
memcache_serialization_support configuration option, with the following
values:
0 = older, insecure pickle serialization
1 = json serialization but pickles can still be read (still insecure)
2 = json serialization only (secure and the default)
To avoid an instant full cache flush, existing installations should
upgrade with 0, then set to 1 and reload, then after some time (24
hours) set to 2 and reload. Support for 0 and 1 will be removed in
future versions.
Part of bug 1006414.
Change-Id: Id7d6d547b103b4f23ebf5be98b88f09ec6027ce4
2012-06-21 14:37:41 +02:00
|
|
|
memcache_client.set('some_key', ['simple str', 'utf8 str éà'])
|
2012-09-04 23:01:02 +08:00
|
|
|
# As per http://wiki.openstack.org/encoding,
|
|
|
|
# we should expect to have unicode
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(
|
2012-09-04 23:01:02 +08:00
|
|
|
memcache_client.get('some_key'), ['simple str', u'utf8 str éà'])
|
2016-06-23 12:22:02 -07:00
|
|
|
self.assertEqual(mock.cache, {cache_key: (
|
2024-03-14 13:10:00 -07:00
|
|
|
b'2', 0, b'["simple str", "utf8 str \\u00e9\\u00e0"]')})
|
2016-06-23 12:22:02 -07:00
|
|
|
|
2024-03-14 13:10:00 -07:00
|
|
|
now = time.time()
|
|
|
|
with patch('time.time', return_value=now):
|
|
|
|
memcache_client.set('some_key', [1, 2, 3], time=20)
|
|
|
|
self.assertEqual(
|
|
|
|
mock.cache, {cache_key: (b'2', now + 20, b'[1, 2, 3]')})
|
Swift MemcacheRing (set) interface is incompatible fixes
This patch fixes the Swift MemcacheRing set and set_multi
interface incompatible problem with python memcache. The fix
added two extra named parameters to both set and set_multi
method. When only time or timeout parameter is present, then one
of the value will be used. When both time and timeout are present,
the time parameter will be used.
Named parameter min_compress_len is added for pure compatibility
purposes. The current implementation ignores this parameter.
To make swift memcached methods all consistent cross the board,
method incr and decr have also been changed to include a new
named parameter time.
In future OpenStack releases, the named parameter timeout will be
removed, keep the named parameter timeout around for now is
to make sure that mismatched releases between client and server
will still work.
From now on, when a call is made to set, set_multi, decr, incr
by using timeout parametner, a warning message will be logged to
indicate the deprecation of the parameter.
Fixes: bug #1095730
Change-Id: I07af784a54d7d79395fc3265e74145f92f38a893
2013-02-13 13:54:51 -05:00
|
|
|
|
2012-11-16 00:09:14 -05:00
|
|
|
sixtydays = 60 * 24 * 60 * 60
|
|
|
|
esttimeout = time.time() + sixtydays
|
Swift MemcacheRing (set) interface is incompatible fixes
This patch fixes the Swift MemcacheRing set and set_multi
interface incompatible problem with python memcache. The fix
added two extra named parameters to both set and set_multi
method. When only time or timeout parameter is present, then one
of the value will be used. When both time and timeout are present,
the time parameter will be used.
Named parameter min_compress_len is added for pure compatibility
purposes. The current implementation ignores this parameter.
To make swift memcached methods all consistent cross the board,
method incr and decr have also been changed to include a new
named parameter time.
In future OpenStack releases, the named parameter timeout will be
removed, keep the named parameter timeout around for now is
to make sure that mismatched releases between client and server
will still work.
From now on, when a call is made to set, set_multi, decr, incr
by using timeout parametner, a warning message will be logged to
indicate the deprecation of the parameter.
Fixes: bug #1095730
Change-Id: I07af784a54d7d79395fc3265e74145f92f38a893
2013-02-13 13:54:51 -05:00
|
|
|
memcache_client.set('some_key', [1, 2, 3], time=sixtydays)
|
2016-06-23 12:22:02 -07:00
|
|
|
_junk, cache_timeout, _junk = mock.cache[cache_key]
|
|
|
|
self.assertAlmostEqual(float(cache_timeout), esttimeout, delta=1)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2021-01-07 21:00:59 -08:00
|
|
|
def test_set_error(self):
|
2023-10-20 14:40:06 +01:00
|
|
|
memcache_client = memcached.MemcacheRing(
|
|
|
|
['1.2.3.4:11211'], logger=self.logger,
|
|
|
|
item_size_warning_threshold=1)
|
2021-01-07 21:00:59 -08:00
|
|
|
mock = MockMemcached()
|
|
|
|
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock, mock)] * 2)
|
2023-10-20 14:40:06 +01:00
|
|
|
now = time.time()
|
|
|
|
with patch('time.time', return_value=now):
|
|
|
|
memcache_client.set('too-big', [1, 2, 3])
|
2021-01-07 21:00:59 -08:00
|
|
|
self.assertEqual(
|
|
|
|
self.logger.get_lines_for_level('error'),
|
2023-10-20 14:40:06 +01:00
|
|
|
['Error talking to memcached: 1.2.3.4:11211: '
|
|
|
|
'with key_prefix too-big, method set, time_spent 0.0, '
|
|
|
|
'failed set: SERVER_ERROR object too large for cache'])
|
|
|
|
warning_lines = self.logger.get_lines_for_level('warning')
|
|
|
|
self.assertEqual(1, len(warning_lines))
|
|
|
|
self.assertIn('Item size larger than warning threshold',
|
|
|
|
warning_lines[0])
|
|
|
|
self.assertTrue(mock.close_called)
|
|
|
|
|
|
|
|
def test_set_error_raise_on_error(self):
|
|
|
|
memcache_client = memcached.MemcacheRing(
|
|
|
|
['1.2.3.4:11211'], logger=self.logger,
|
|
|
|
item_size_warning_threshold=1)
|
|
|
|
mock = MockMemcached()
|
|
|
|
memcache_client._client_cache[
|
|
|
|
'1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock, mock)] * 2)
|
|
|
|
now = time.time()
|
|
|
|
|
|
|
|
with self.assertRaises(MemcacheConnectionError) as cm:
|
|
|
|
with patch('time.time', return_value=now):
|
|
|
|
memcache_client.set('too-big', [1, 2, 3], raise_on_error=True)
|
|
|
|
self.assertIn("No memcached connections succeeded", str(cm.exception))
|
|
|
|
self.assertEqual(
|
|
|
|
self.logger.get_lines_for_level('error'),
|
|
|
|
['Error talking to memcached: 1.2.3.4:11211: '
|
|
|
|
'with key_prefix too-big, method set, time_spent 0.0, '
|
|
|
|
'failed set: SERVER_ERROR object too large for cache'])
|
|
|
|
warning_lines = self.logger.get_lines_for_level('warning')
|
|
|
|
self.assertEqual(1, len(warning_lines))
|
|
|
|
self.assertIn('Item size larger than warning threshold',
|
|
|
|
warning_lines[0])
|
|
|
|
self.assertTrue(mock.close_called)
|
2021-01-07 21:00:59 -08:00
|
|
|
|
2015-09-03 12:19:05 +10:00
|
|
|
def test_get_failed_connection_mid_request(self):
|
2020-06-09 10:50:07 -07:00
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
|
|
|
logger=self.logger)
|
2015-09-03 12:19:05 +10:00
|
|
|
mock = MockMemcached()
|
|
|
|
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock, mock)] * 2)
|
|
|
|
memcache_client.set('some_key', [1, 2, 3])
|
|
|
|
self.assertEqual(memcache_client.get('some_key'), [1, 2, 3])
|
2018-02-01 14:30:32 -08:00
|
|
|
self.assertEqual(list(mock.cache.values()),
|
2024-03-14 13:10:00 -07:00
|
|
|
[(b'2', 0, b'[1, 2, 3]')])
|
2015-09-03 12:19:05 +10:00
|
|
|
|
|
|
|
# Now lets return an empty string, and make sure we aren't logging
|
|
|
|
# the error.
|
|
|
|
fake_stdout = six.StringIO()
|
2017-09-26 11:43:53 +01:00
|
|
|
# force the logging through the DebugLogger instead of the nose
|
|
|
|
# handler. This will use stdout, so we can assert that no stack trace
|
|
|
|
# is logged.
|
2020-06-09 10:50:07 -07:00
|
|
|
with patch("sys.stdout", fake_stdout):
|
2015-09-03 12:19:05 +10:00
|
|
|
mock.read_return_empty_str = True
|
2017-11-09 14:40:18 +08:00
|
|
|
self.assertIsNone(memcache_client.get('some_key'))
|
2020-06-09 10:50:07 -07:00
|
|
|
log_lines = self.logger.get_lines_for_level('error')
|
2017-09-26 11:43:53 +01:00
|
|
|
self.assertIn('Error talking to memcached', log_lines[0])
|
|
|
|
self.assertFalse(log_lines[1:])
|
|
|
|
self.assertNotIn("Traceback", fake_stdout.getvalue())
|
2015-09-03 12:19:05 +10:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def test_incr(self):
|
2020-06-09 10:50:07 -07:00
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
|
|
|
logger=self.logger)
|
2010-07-12 17:03:45 -05:00
|
|
|
mock = MockMemcached()
|
2013-09-04 22:20:44 +00:00
|
|
|
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock, mock)] * 2)
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(memcache_client.incr('some_key', delta=5), 5)
|
2018-02-01 14:30:32 -08:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), b'5')
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(memcache_client.incr('some_key', delta=5), 10)
|
2018-02-01 14:30:32 -08:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), b'10')
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(memcache_client.incr('some_key', delta=1), 11)
|
2018-02-01 14:30:32 -08:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), b'11')
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(memcache_client.incr('some_key', delta=-5), 6)
|
2018-02-01 14:30:32 -08:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), b'6')
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(memcache_client.incr('some_key', delta=-15), 0)
|
2018-02-01 14:30:32 -08:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), b'0')
|
2011-02-23 11:44:36 -08:00
|
|
|
mock.read_return_none = True
|
|
|
|
self.assertRaises(memcached.MemcacheConnectionError,
|
|
|
|
memcache_client.incr, 'some_key', delta=-15)
|
2013-05-07 09:40:53 -07:00
|
|
|
self.assertTrue(mock.close_called)
|
2010-10-25 12:52:25 -07:00
|
|
|
|
2015-09-03 12:19:05 +10:00
|
|
|
def test_incr_failed_connection_mid_request(self):
|
2020-06-09 10:50:07 -07:00
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
|
|
|
logger=self.logger)
|
2015-09-03 12:19:05 +10:00
|
|
|
mock = MockMemcached()
|
|
|
|
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock, mock)] * 2)
|
|
|
|
self.assertEqual(memcache_client.incr('some_key', delta=5), 5)
|
2018-02-01 14:30:32 -08:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), b'5')
|
2015-09-03 12:19:05 +10:00
|
|
|
self.assertEqual(memcache_client.incr('some_key', delta=5), 10)
|
2018-02-01 14:30:32 -08:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), b'10')
|
2015-09-03 12:19:05 +10:00
|
|
|
|
|
|
|
# Now lets return an empty string, and make sure we aren't logging
|
|
|
|
# the error.
|
|
|
|
fake_stdout = six.StringIO()
|
2017-09-26 11:43:53 +01:00
|
|
|
# force the logging through the DebugLogger instead of the nose
|
|
|
|
# handler. This will use stdout, so we can assert that no stack trace
|
|
|
|
# is logged.
|
2020-06-09 10:50:07 -07:00
|
|
|
with patch("sys.stdout", fake_stdout):
|
2015-09-03 12:19:05 +10:00
|
|
|
mock.read_return_empty_str = True
|
|
|
|
self.assertRaises(memcached.MemcacheConnectionError,
|
|
|
|
memcache_client.incr, 'some_key', delta=1)
|
2020-06-09 10:50:07 -07:00
|
|
|
log_lines = self.logger.get_lines_for_level('error')
|
2017-09-26 11:43:53 +01:00
|
|
|
self.assertIn('Error talking to memcached', log_lines[0])
|
|
|
|
self.assertFalse(log_lines[1:])
|
|
|
|
self.assertNotIn('Traceback', fake_stdout.getvalue())
|
2015-09-03 12:19:05 +10:00
|
|
|
|
2012-11-16 00:09:14 -05:00
|
|
|
def test_incr_w_timeout(self):
|
2020-06-09 10:50:07 -07:00
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
|
|
|
logger=self.logger)
|
2012-11-16 00:09:14 -05:00
|
|
|
mock = MockMemcached()
|
2013-09-04 22:20:44 +00:00
|
|
|
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock, mock)] * 2)
|
2020-09-11 16:28:11 -04:00
|
|
|
cache_key = md5(b'some_key',
|
|
|
|
usedforsecurity=False).hexdigest().encode('ascii')
|
2016-06-23 12:22:02 -07:00
|
|
|
|
2024-03-14 13:10:00 -07:00
|
|
|
now = time.time()
|
|
|
|
with patch('time.time', return_value=now):
|
|
|
|
memcache_client.incr('some_key', delta=5, time=55)
|
2018-02-01 14:30:32 -08:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), b'5')
|
2024-03-14 13:10:00 -07:00
|
|
|
self.assertEqual(mock.cache, {cache_key: (b'0', now + 55, b'5')})
|
2016-06-23 12:22:02 -07:00
|
|
|
|
2012-11-16 00:09:14 -05:00
|
|
|
memcache_client.delete('some_key')
|
2017-06-07 11:37:01 +08:00
|
|
|
self.assertIsNone(memcache_client.get('some_key'))
|
2016-06-23 12:22:02 -07:00
|
|
|
|
2012-11-16 00:09:14 -05:00
|
|
|
fiftydays = 50 * 24 * 60 * 60
|
|
|
|
esttimeout = time.time() + fiftydays
|
Swift MemcacheRing (set) interface is incompatible fixes
This patch fixes the Swift MemcacheRing set and set_multi
interface incompatible problem with python memcache. The fix
added two extra named parameters to both set and set_multi
method. When only time or timeout parameter is present, then one
of the value will be used. When both time and timeout are present,
the time parameter will be used.
Named parameter min_compress_len is added for pure compatibility
purposes. The current implementation ignores this parameter.
To make swift memcached methods all consistent cross the board,
method incr and decr have also been changed to include a new
named parameter time.
In future OpenStack releases, the named parameter timeout will be
removed, keep the named parameter timeout around for now is
to make sure that mismatched releases between client and server
will still work.
From now on, when a call is made to set, set_multi, decr, incr
by using timeout parametner, a warning message will be logged to
indicate the deprecation of the parameter.
Fixes: bug #1095730
Change-Id: I07af784a54d7d79395fc3265e74145f92f38a893
2013-02-13 13:54:51 -05:00
|
|
|
memcache_client.incr('some_key', delta=5, time=fiftydays)
|
2018-02-01 14:30:32 -08:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), b'5')
|
2016-06-23 12:22:02 -07:00
|
|
|
_junk, cache_timeout, _junk = mock.cache[cache_key]
|
|
|
|
self.assertAlmostEqual(float(cache_timeout), esttimeout, delta=1)
|
|
|
|
|
2012-11-16 00:09:14 -05:00
|
|
|
memcache_client.delete('some_key')
|
2017-06-07 11:37:01 +08:00
|
|
|
self.assertIsNone(memcache_client.get('some_key'))
|
2016-06-23 12:22:02 -07:00
|
|
|
|
2012-11-16 00:09:14 -05:00
|
|
|
memcache_client.incr('some_key', delta=5)
|
2018-02-01 14:30:32 -08:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), b'5')
|
2024-03-14 13:10:00 -07:00
|
|
|
self.assertEqual(mock.cache, {cache_key: (b'0', 0, b'5')})
|
2016-06-23 12:22:02 -07:00
|
|
|
|
Swift MemcacheRing (set) interface is incompatible fixes
This patch fixes the Swift MemcacheRing set and set_multi
interface incompatible problem with python memcache. The fix
added two extra named parameters to both set and set_multi
method. When only time or timeout parameter is present, then one
of the value will be used. When both time and timeout are present,
the time parameter will be used.
Named parameter min_compress_len is added for pure compatibility
purposes. The current implementation ignores this parameter.
To make swift memcached methods all consistent cross the board,
method incr and decr have also been changed to include a new
named parameter time.
In future OpenStack releases, the named parameter timeout will be
removed, keep the named parameter timeout around for now is
to make sure that mismatched releases between client and server
will still work.
From now on, when a call is made to set, set_multi, decr, incr
by using timeout parametner, a warning message will be logged to
indicate the deprecation of the parameter.
Fixes: bug #1095730
Change-Id: I07af784a54d7d79395fc3265e74145f92f38a893
2013-02-13 13:54:51 -05:00
|
|
|
memcache_client.incr('some_key', delta=5, time=55)
|
2018-02-01 14:30:32 -08:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), b'10')
|
2024-03-14 13:10:00 -07:00
|
|
|
self.assertEqual(mock.cache, {cache_key: (b'0', 0, b'10')})
|
|
|
|
|
|
|
|
def test_incr_expiration_time(self):
|
|
|
|
# Test increment with different expiration times
|
|
|
|
memcache_client = memcached.MemcacheRing(
|
|
|
|
['1.2.3.4:11211'], logger=self.logger)
|
|
|
|
mock = MockMemcached()
|
|
|
|
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock, mock)] * 2)
|
|
|
|
|
|
|
|
now = time.time()
|
|
|
|
# Test expiration time < 'EXPTIME_MAXDELTA'
|
|
|
|
with patch('time.time', return_value=now):
|
|
|
|
memcache_client.incr('expiring_key', delta=5, time=1)
|
|
|
|
self.assertEqual(memcache_client.get('expiring_key'), b'5')
|
|
|
|
with patch('time.time', return_value=now + 2):
|
|
|
|
self.assertIsNone(memcache_client.get('expiring_key'))
|
|
|
|
# Test expiration time is 0
|
|
|
|
with patch('time.time', return_value=now):
|
|
|
|
memcache_client.incr('expiring_key', delta=5, time=0)
|
|
|
|
self.assertEqual(memcache_client.get('expiring_key'), b'5')
|
|
|
|
with patch('time.time', return_value=now + 100):
|
|
|
|
self.assertEqual(memcache_client.get('expiring_key'), b'5')
|
|
|
|
memcache_client.delete('expiring_key')
|
|
|
|
# Test expiration time > 'EXPTIME_MAXDELTA'
|
|
|
|
with patch('time.time', return_value=now):
|
|
|
|
memcache_client.incr(
|
|
|
|
'expiring_key', delta=5, time=(EXPTIME_MAXDELTA + 10))
|
|
|
|
with patch('time.time', return_value=(now + EXPTIME_MAXDELTA + 2)):
|
|
|
|
self.assertEqual(memcache_client.get('expiring_key'), b'5')
|
|
|
|
with patch('time.time', return_value=(now + EXPTIME_MAXDELTA + 11)):
|
|
|
|
self.assertIsNone(memcache_client.get('expiring_key'))
|
|
|
|
|
|
|
|
def test_set_expiration_time(self):
|
|
|
|
# Test set with different expiration times
|
|
|
|
memcache_client = memcached.MemcacheRing(
|
|
|
|
['1.2.3.4:11211'], logger=self.logger)
|
|
|
|
mock = MockMemcached()
|
|
|
|
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock, mock)] * 2)
|
|
|
|
|
|
|
|
now = time.time()
|
|
|
|
# Test expiration time < 'EXPTIME_MAXDELTA'
|
|
|
|
with patch('time.time', return_value=now):
|
|
|
|
memcache_client.set('expiring_key', value=5, time=1)
|
|
|
|
self.assertEqual(memcache_client.get('expiring_key'), 5)
|
|
|
|
with patch('time.time', return_value=now + 2):
|
|
|
|
self.assertIsNone(memcache_client.get('expiring_key'))
|
|
|
|
# Test expiration time is 0
|
|
|
|
with patch('time.time', return_value=now):
|
|
|
|
memcache_client.set('expiring_key', value=5, time=0)
|
|
|
|
self.assertEqual(memcache_client.get('expiring_key'), 5)
|
|
|
|
with patch('time.time', return_value=now + 100):
|
|
|
|
self.assertEqual(memcache_client.get('expiring_key'), 5)
|
|
|
|
memcache_client.delete('expiring_key')
|
|
|
|
# Test expiration time > 'EXPTIME_MAXDELTA'
|
|
|
|
with patch('time.time', return_value=now):
|
|
|
|
memcache_client.set(
|
|
|
|
'expiring_key', value=5, time=(EXPTIME_MAXDELTA + 10))
|
|
|
|
with patch('time.time', return_value=(now + EXPTIME_MAXDELTA + 2)):
|
|
|
|
self.assertEqual(memcache_client.get('expiring_key'), 5)
|
|
|
|
with patch('time.time', return_value=(now + EXPTIME_MAXDELTA + 11)):
|
|
|
|
self.assertIsNone(memcache_client.get('expiring_key'))
|
2012-11-16 00:09:14 -05:00
|
|
|
|
2010-10-25 12:52:25 -07:00
|
|
|
def test_decr(self):
|
2020-06-09 10:50:07 -07:00
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
|
|
|
logger=self.logger)
|
2010-10-25 12:52:25 -07:00
|
|
|
mock = MockMemcached()
|
2013-09-04 22:20:44 +00:00
|
|
|
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock, mock)] * 2)
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(memcache_client.decr('some_key', delta=5), 0)
|
2018-02-01 14:30:32 -08:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), b'0')
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(memcache_client.incr('some_key', delta=15), 15)
|
2018-02-01 14:30:32 -08:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), b'15')
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(memcache_client.decr('some_key', delta=4), 11)
|
2018-02-01 14:30:32 -08:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), b'11')
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(memcache_client.decr('some_key', delta=15), 0)
|
2018-02-01 14:30:32 -08:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), b'0')
|
2011-02-23 11:44:36 -08:00
|
|
|
mock.read_return_none = True
|
|
|
|
self.assertRaises(memcached.MemcacheConnectionError,
|
|
|
|
memcache_client.decr, 'some_key', delta=15)
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def test_retry(self):
|
2012-09-04 23:01:02 +08:00
|
|
|
memcache_client = memcached.MemcacheRing(
|
2020-06-09 10:50:07 -07:00
|
|
|
['1.2.3.4:11211', '1.2.3.5:11211'], logger=self.logger)
|
2010-07-12 17:03:45 -05:00
|
|
|
mock1 = ExplodingMockMemcached()
|
|
|
|
mock2 = MockMemcached()
|
2013-09-04 22:20:44 +00:00
|
|
|
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock2, mock2)])
|
|
|
|
memcache_client._client_cache['1.2.3.5:11211'] = MockedMemcachePool(
|
Socket errors don't warrant tracebacks when talking to memcached
Currently, timeouts when talking to memcached cause log lines like
ERROR:root:Timeout connecting to memcached: 192.168.23.62:11211
Meanwhile, socket errors (which you'd expect to be about as common
as timeouts) cause log lines like
ERROR:root:Error talking to memcached: 192.168.23.70:11211
Traceback (most recent call last):
File "/usr/lib/pymodules/python2.7/swift/common/memcached.py", line 293, in set
(key, flags, timeout, len(value), value))
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 390, in sendall
tail = self.send(data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 384, in send
return self._send_loop(self.fd.send, data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 371, in _send_loop
return send_method(data, *args)
error: [Errno 32] Broken pipe
... or ...
ERROR:root:Error connecting to memcached: 192.168.23.73:11211
Traceback (most recent call last):
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 239, in _get_conns
fp, sock = self._client_cache[server].get()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 135, in get
fp, sock = self.create()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 128, in create
sock.connect((host, int(port)))
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 237, in connect
while not socket_connect(fd, address):
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 39, in socket_connect
raise socket.error(err, errno.errorcode[err])
error: [Errno 101] ENETUNREACH
... which seems excessive. Now, socket errors are logged as normal
errors, without the tracebacks.
Change-Id: I71a2c4786c0406dbc43b829cad5a0c7e2c45de21
2017-08-22 05:31:59 +00:00
|
|
|
[(mock1, mock1), (mock1, mock1)])
|
2023-04-17 19:08:32 +01:00
|
|
|
now = time.time()
|
|
|
|
with patch('time.time', return_value=now):
|
|
|
|
memcache_client.set('some_key', [1, 2, 3])
|
Socket errors don't warrant tracebacks when talking to memcached
Currently, timeouts when talking to memcached cause log lines like
ERROR:root:Timeout connecting to memcached: 192.168.23.62:11211
Meanwhile, socket errors (which you'd expect to be about as common
as timeouts) cause log lines like
ERROR:root:Error talking to memcached: 192.168.23.70:11211
Traceback (most recent call last):
File "/usr/lib/pymodules/python2.7/swift/common/memcached.py", line 293, in set
(key, flags, timeout, len(value), value))
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 390, in sendall
tail = self.send(data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 384, in send
return self._send_loop(self.fd.send, data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 371, in _send_loop
return send_method(data, *args)
error: [Errno 32] Broken pipe
... or ...
ERROR:root:Error connecting to memcached: 192.168.23.73:11211
Traceback (most recent call last):
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 239, in _get_conns
fp, sock = self._client_cache[server].get()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 135, in get
fp, sock = self.create()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 128, in create
sock.connect((host, int(port)))
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 237, in connect
while not socket_connect(fd, address):
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 39, in socket_connect
raise socket.error(err, errno.errorcode[err])
error: [Errno 101] ENETUNREACH
... which seems excessive. Now, socket errors are logged as normal
errors, without the tracebacks.
Change-Id: I71a2c4786c0406dbc43b829cad5a0c7e2c45de21
2017-08-22 05:31:59 +00:00
|
|
|
self.assertEqual(mock1.exploded, True)
|
|
|
|
self.assertEqual(self.logger.get_lines_for_level('error'), [
|
|
|
|
'Error talking to memcached: 1.2.3.5:11211: '
|
2023-04-17 19:08:32 +01:00
|
|
|
'with key_prefix some_key, method set, time_spent 0.0, '
|
|
|
|
'[Errno 32] Broken pipe',
|
Socket errors don't warrant tracebacks when talking to memcached
Currently, timeouts when talking to memcached cause log lines like
ERROR:root:Timeout connecting to memcached: 192.168.23.62:11211
Meanwhile, socket errors (which you'd expect to be about as common
as timeouts) cause log lines like
ERROR:root:Error talking to memcached: 192.168.23.70:11211
Traceback (most recent call last):
File "/usr/lib/pymodules/python2.7/swift/common/memcached.py", line 293, in set
(key, flags, timeout, len(value), value))
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 390, in sendall
tail = self.send(data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 384, in send
return self._send_loop(self.fd.send, data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 371, in _send_loop
return send_method(data, *args)
error: [Errno 32] Broken pipe
... or ...
ERROR:root:Error connecting to memcached: 192.168.23.73:11211
Traceback (most recent call last):
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 239, in _get_conns
fp, sock = self._client_cache[server].get()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 135, in get
fp, sock = self.create()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 128, in create
sock.connect((host, int(port)))
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 237, in connect
while not socket_connect(fd, address):
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 39, in socket_connect
raise socket.error(err, errno.errorcode[err])
error: [Errno 101] ENETUNREACH
... which seems excessive. Now, socket errors are logged as normal
errors, without the tracebacks.
Change-Id: I71a2c4786c0406dbc43b829cad5a0c7e2c45de21
2017-08-22 05:31:59 +00:00
|
|
|
])
|
|
|
|
|
|
|
|
self.logger.clear()
|
|
|
|
mock1.exploded = False
|
2023-04-17 19:08:32 +01:00
|
|
|
now = time.time()
|
|
|
|
with patch('time.time', return_value=now):
|
|
|
|
self.assertEqual(memcache_client.get('some_key'), [1, 2, 3])
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(mock1.exploded, True)
|
Socket errors don't warrant tracebacks when talking to memcached
Currently, timeouts when talking to memcached cause log lines like
ERROR:root:Timeout connecting to memcached: 192.168.23.62:11211
Meanwhile, socket errors (which you'd expect to be about as common
as timeouts) cause log lines like
ERROR:root:Error talking to memcached: 192.168.23.70:11211
Traceback (most recent call last):
File "/usr/lib/pymodules/python2.7/swift/common/memcached.py", line 293, in set
(key, flags, timeout, len(value), value))
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 390, in sendall
tail = self.send(data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 384, in send
return self._send_loop(self.fd.send, data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 371, in _send_loop
return send_method(data, *args)
error: [Errno 32] Broken pipe
... or ...
ERROR:root:Error connecting to memcached: 192.168.23.73:11211
Traceback (most recent call last):
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 239, in _get_conns
fp, sock = self._client_cache[server].get()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 135, in get
fp, sock = self.create()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 128, in create
sock.connect((host, int(port)))
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 237, in connect
while not socket_connect(fd, address):
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 39, in socket_connect
raise socket.error(err, errno.errorcode[err])
error: [Errno 101] ENETUNREACH
... which seems excessive. Now, socket errors are logged as normal
errors, without the tracebacks.
Change-Id: I71a2c4786c0406dbc43b829cad5a0c7e2c45de21
2017-08-22 05:31:59 +00:00
|
|
|
self.assertEqual(self.logger.get_lines_for_level('error'), [
|
|
|
|
'Error talking to memcached: 1.2.3.5:11211: '
|
2023-04-17 19:08:32 +01:00
|
|
|
'with key_prefix some_key, method get, time_spent 0.0, '
|
|
|
|
'[Errno 32] Broken pipe',
|
Socket errors don't warrant tracebacks when talking to memcached
Currently, timeouts when talking to memcached cause log lines like
ERROR:root:Timeout connecting to memcached: 192.168.23.62:11211
Meanwhile, socket errors (which you'd expect to be about as common
as timeouts) cause log lines like
ERROR:root:Error talking to memcached: 192.168.23.70:11211
Traceback (most recent call last):
File "/usr/lib/pymodules/python2.7/swift/common/memcached.py", line 293, in set
(key, flags, timeout, len(value), value))
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 390, in sendall
tail = self.send(data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 384, in send
return self._send_loop(self.fd.send, data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 371, in _send_loop
return send_method(data, *args)
error: [Errno 32] Broken pipe
... or ...
ERROR:root:Error connecting to memcached: 192.168.23.73:11211
Traceback (most recent call last):
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 239, in _get_conns
fp, sock = self._client_cache[server].get()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 135, in get
fp, sock = self.create()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 128, in create
sock.connect((host, int(port)))
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 237, in connect
while not socket_connect(fd, address):
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 39, in socket_connect
raise socket.error(err, errno.errorcode[err])
error: [Errno 101] ENETUNREACH
... which seems excessive. Now, socket errors are logged as normal
errors, without the tracebacks.
Change-Id: I71a2c4786c0406dbc43b829cad5a0c7e2c45de21
2017-08-22 05:31:59 +00:00
|
|
|
])
|
|
|
|
# Check that we really did call create() twice
|
|
|
|
self.assertEqual(memcache_client._client_cache['1.2.3.5:11211'].mocks,
|
|
|
|
[])
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2020-11-02 15:27:16 -08:00
|
|
|
def test_error_limiting(self):
|
|
|
|
memcache_client = memcached.MemcacheRing(
|
|
|
|
['1.2.3.4:11211', '1.2.3.5:11211'], logger=self.logger)
|
|
|
|
mock1 = ExplodingMockMemcached()
|
|
|
|
mock2 = ExplodingMockMemcached()
|
|
|
|
mock2.should_explode = False
|
|
|
|
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock2, mock2)] * 12)
|
|
|
|
memcache_client._client_cache['1.2.3.5:11211'] = MockedMemcachePool(
|
|
|
|
[(mock1, mock1)] * 12)
|
|
|
|
|
2023-04-17 19:08:32 +01:00
|
|
|
now = time.time()
|
|
|
|
with patch('time.time', return_value=now):
|
|
|
|
for _ in range(12):
|
|
|
|
memcache_client.set('some_key', [1, 2, 3])
|
2020-11-02 15:27:16 -08:00
|
|
|
# twelfth one skips .5 because of error limiting and goes straight
|
|
|
|
# to .4
|
|
|
|
self.assertEqual(self.logger.get_lines_for_level('error'), [
|
|
|
|
'Error talking to memcached: 1.2.3.5:11211: '
|
2023-04-17 19:08:32 +01:00
|
|
|
'with key_prefix some_key, method set, time_spent 0.0, '
|
|
|
|
'[Errno 32] Broken pipe',
|
2020-11-02 15:27:16 -08:00
|
|
|
] * 11 + [
|
|
|
|
'Error limiting server 1.2.3.5:11211'
|
|
|
|
])
|
|
|
|
self.logger.clear()
|
|
|
|
|
|
|
|
mock2.should_explode = True
|
2023-04-17 19:08:32 +01:00
|
|
|
now = time.time()
|
|
|
|
with patch('time.time', return_value=now):
|
|
|
|
for _ in range(12):
|
|
|
|
memcache_client.set('some_key', [1, 2, 3])
|
2020-11-02 15:27:16 -08:00
|
|
|
# as we keep going, eventually .4 gets error limited, too
|
|
|
|
self.assertEqual(self.logger.get_lines_for_level('error'), [
|
|
|
|
'Error talking to memcached: 1.2.3.4:11211: '
|
2023-04-17 19:08:32 +01:00
|
|
|
'with key_prefix some_key, method set, time_spent 0.0, '
|
|
|
|
'[Errno 32] Broken pipe',
|
2022-04-26 16:12:49 -07:00
|
|
|
] * 10 + [
|
|
|
|
'Error talking to memcached: 1.2.3.4:11211: '
|
2023-04-17 19:08:32 +01:00
|
|
|
'with key_prefix some_key, method set, time_spent 0.0, '
|
|
|
|
'[Errno 32] Broken pipe',
|
2022-04-26 16:12:49 -07:00
|
|
|
'Error limiting server 1.2.3.4:11211',
|
2023-10-20 14:40:06 +01:00
|
|
|
'Error connecting to memcached: ALL: with key_prefix some_key, '
|
2024-03-05 14:44:37 +11:00
|
|
|
'method set: No more memcached servers to try',
|
2020-11-02 15:27:16 -08:00
|
|
|
])
|
|
|
|
self.logger.clear()
|
|
|
|
|
|
|
|
# continued requests just keep bypassing memcache
|
|
|
|
for _ in range(12):
|
|
|
|
memcache_client.set('some_key', [1, 2, 3])
|
2022-04-26 16:12:49 -07:00
|
|
|
self.assertEqual(self.logger.get_lines_for_level('error'), [
|
2023-10-20 14:40:06 +01:00
|
|
|
'Error connecting to memcached: ALL: with key_prefix some_key, '
|
2024-03-05 14:44:37 +11:00
|
|
|
'method set: No more memcached servers to try',
|
2022-04-26 16:12:49 -07:00
|
|
|
] * 12)
|
|
|
|
self.logger.clear()
|
2020-11-02 15:27:16 -08:00
|
|
|
|
|
|
|
# and get()s are all a "cache miss"
|
|
|
|
self.assertIsNone(memcache_client.get('some_key'))
|
2022-04-26 16:12:49 -07:00
|
|
|
self.assertEqual(self.logger.get_lines_for_level('error'), [
|
2023-10-20 14:40:06 +01:00
|
|
|
'Error connecting to memcached: ALL: with key_prefix some_key, '
|
2024-03-05 14:44:37 +11:00
|
|
|
'method get: No more memcached servers to try',
|
2022-04-26 16:12:49 -07:00
|
|
|
])
|
2020-11-02 15:27:16 -08:00
|
|
|
|
|
|
|
def test_error_disabled(self):
|
|
|
|
memcache_client = memcached.MemcacheRing(
|
|
|
|
['1.2.3.4:11211'], logger=self.logger, error_limit_time=0)
|
|
|
|
mock1 = ExplodingMockMemcached()
|
|
|
|
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock1, mock1)] * 20)
|
|
|
|
|
2023-04-17 19:08:32 +01:00
|
|
|
now = time.time()
|
|
|
|
with patch('time.time', return_value=now):
|
|
|
|
for _ in range(20):
|
|
|
|
memcache_client.set('some_key', [1, 2, 3])
|
2020-11-02 15:27:16 -08:00
|
|
|
# twelfth one skips .5 because of error limiting and goes straight
|
|
|
|
# to .4
|
|
|
|
self.assertEqual(self.logger.get_lines_for_level('error'), [
|
|
|
|
'Error talking to memcached: 1.2.3.4:11211: '
|
2023-04-17 19:08:32 +01:00
|
|
|
'with key_prefix some_key, method set, time_spent 0.0, '
|
|
|
|
'[Errno 32] Broken pipe',
|
2020-11-02 15:27:16 -08:00
|
|
|
] * 20)
|
|
|
|
|
2022-04-26 16:12:49 -07:00
|
|
|
def test_error_raising(self):
|
|
|
|
memcache_client = memcached.MemcacheRing(
|
|
|
|
['1.2.3.4:11211'], logger=self.logger, error_limit_time=0)
|
|
|
|
mock1 = ExplodingMockMemcached()
|
|
|
|
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock1, mock1)] * 20)
|
|
|
|
|
|
|
|
# expect exception when requested...
|
2023-04-17 19:08:32 +01:00
|
|
|
now = time.time()
|
|
|
|
with patch('time.time', return_value=now):
|
|
|
|
with self.assertRaises(MemcacheConnectionError):
|
|
|
|
memcache_client.set('some_key', [1, 2, 3], raise_on_error=True)
|
2022-04-26 16:12:49 -07:00
|
|
|
self.assertEqual(self.logger.get_lines_for_level('error'), [
|
|
|
|
'Error talking to memcached: 1.2.3.4:11211: '
|
2023-04-17 19:08:32 +01:00
|
|
|
'with key_prefix some_key, method set, time_spent 0.0, '
|
|
|
|
'[Errno 32] Broken pipe',
|
2022-04-26 16:12:49 -07:00
|
|
|
])
|
|
|
|
self.logger.clear()
|
|
|
|
|
2023-04-17 19:08:32 +01:00
|
|
|
with patch('time.time', return_value=now):
|
|
|
|
with self.assertRaises(MemcacheConnectionError):
|
|
|
|
memcache_client.get('some_key', raise_on_error=True)
|
2022-04-26 16:12:49 -07:00
|
|
|
self.assertEqual(self.logger.get_lines_for_level('error'), [
|
|
|
|
'Error talking to memcached: 1.2.3.4:11211: '
|
2023-04-17 19:08:32 +01:00
|
|
|
'with key_prefix some_key, method get, time_spent 0.0, '
|
|
|
|
'[Errno 32] Broken pipe',
|
2023-04-19 15:54:50 -07:00
|
|
|
])
|
|
|
|
self.logger.clear()
|
|
|
|
|
2023-04-17 19:08:32 +01:00
|
|
|
with patch('time.time', return_value=now):
|
|
|
|
with self.assertRaises(MemcacheConnectionError):
|
|
|
|
memcache_client.set(
|
|
|
|
'shard-updating-v2/acc/container', [1, 2, 3],
|
|
|
|
raise_on_error=True)
|
2023-04-19 15:54:50 -07:00
|
|
|
self.assertEqual(self.logger.get_lines_for_level('error'), [
|
|
|
|
'Error talking to memcached: 1.2.3.4:11211: '
|
2023-04-17 19:08:32 +01:00
|
|
|
'with key_prefix shard-updating-v2/acc, method set, '
|
|
|
|
'time_spent 0.0, [Errno 32] Broken pipe',
|
2022-04-26 16:12:49 -07:00
|
|
|
])
|
|
|
|
self.logger.clear()
|
|
|
|
|
|
|
|
# ...but default is no exception
|
2023-04-17 19:08:32 +01:00
|
|
|
with patch('time.time', return_value=now):
|
|
|
|
memcache_client.set('some_key', [1, 2, 3])
|
2022-04-26 16:12:49 -07:00
|
|
|
self.assertEqual(self.logger.get_lines_for_level('error'), [
|
|
|
|
'Error talking to memcached: 1.2.3.4:11211: '
|
2023-04-17 19:08:32 +01:00
|
|
|
'with key_prefix some_key, method set, time_spent 0.0, '
|
|
|
|
'[Errno 32] Broken pipe',
|
2022-04-26 16:12:49 -07:00
|
|
|
])
|
|
|
|
self.logger.clear()
|
|
|
|
|
2023-04-17 19:08:32 +01:00
|
|
|
with patch('time.time', return_value=now):
|
|
|
|
memcache_client.get('some_key')
|
2022-04-26 16:12:49 -07:00
|
|
|
self.assertEqual(self.logger.get_lines_for_level('error'), [
|
|
|
|
'Error talking to memcached: 1.2.3.4:11211: '
|
2023-04-17 19:08:32 +01:00
|
|
|
'with key_prefix some_key, method get, time_spent 0.0, '
|
|
|
|
'[Errno 32] Broken pipe',
|
2023-04-19 15:54:50 -07:00
|
|
|
])
|
|
|
|
self.logger.clear()
|
|
|
|
|
2023-04-17 19:08:32 +01:00
|
|
|
with patch('time.time', return_value=now):
|
|
|
|
memcache_client.set('shard-updating-v2/acc/container', [1, 2, 3])
|
2023-04-19 15:54:50 -07:00
|
|
|
self.assertEqual(self.logger.get_lines_for_level('error'), [
|
|
|
|
'Error talking to memcached: 1.2.3.4:11211: '
|
2023-04-17 19:08:32 +01:00
|
|
|
'with key_prefix shard-updating-v2/acc, method set, '
|
|
|
|
'time_spent 0.0, [Errno 32] Broken pipe',
|
2022-04-26 16:12:49 -07:00
|
|
|
])
|
|
|
|
|
2020-11-02 15:27:16 -08:00
|
|
|
def test_error_limiting_custom_config(self):
|
|
|
|
def do_calls(time_step, num_calls, **memcache_kwargs):
|
|
|
|
self.logger.clear()
|
|
|
|
memcache_client = memcached.MemcacheRing(
|
|
|
|
['1.2.3.5:11211'], logger=self.logger,
|
|
|
|
**memcache_kwargs)
|
|
|
|
mock1 = ExplodingMockMemcached()
|
|
|
|
memcache_client._client_cache['1.2.3.5:11211'] = \
|
|
|
|
MockedMemcachePool([(mock1, mock1)] * num_calls)
|
|
|
|
|
|
|
|
for n in range(num_calls):
|
2023-04-17 19:08:32 +01:00
|
|
|
with mock.patch.object(memcached.tm, 'time',
|
2020-11-02 15:27:16 -08:00
|
|
|
return_value=time_step * n):
|
|
|
|
memcache_client.set('some_key', [1, 2, 3])
|
|
|
|
|
|
|
|
# with default error_limit_time of 60, one call per 5 secs, twelfth one
|
|
|
|
# triggers error limit
|
2023-04-17 19:08:32 +01:00
|
|
|
do_calls(5.0, 12)
|
2020-11-02 15:27:16 -08:00
|
|
|
self.assertEqual(self.logger.get_lines_for_level('error'), [
|
|
|
|
'Error talking to memcached: 1.2.3.5:11211: '
|
2023-04-17 19:08:32 +01:00
|
|
|
'with key_prefix some_key, method set, time_spent 0.0, '
|
|
|
|
'[Errno 32] Broken pipe',
|
2022-04-26 16:12:49 -07:00
|
|
|
] * 10 + [
|
|
|
|
'Error talking to memcached: 1.2.3.5:11211: '
|
2023-04-17 19:08:32 +01:00
|
|
|
'with key_prefix some_key, method set, time_spent 0.0, '
|
|
|
|
'[Errno 32] Broken pipe',
|
2022-04-26 16:12:49 -07:00
|
|
|
'Error limiting server 1.2.3.5:11211',
|
2023-10-20 14:40:06 +01:00
|
|
|
'Error connecting to memcached: ALL: with key_prefix some_key, '
|
2024-03-05 14:44:37 +11:00
|
|
|
'method set: No more memcached servers to try',
|
2020-11-02 15:27:16 -08:00
|
|
|
])
|
|
|
|
|
|
|
|
# with default error_limit_time of 60, one call per 6 secs, error limit
|
|
|
|
# is not triggered
|
2023-04-17 19:08:32 +01:00
|
|
|
do_calls(6.0, 20)
|
2020-11-02 15:27:16 -08:00
|
|
|
self.assertEqual(self.logger.get_lines_for_level('error'), [
|
|
|
|
'Error talking to memcached: 1.2.3.5:11211: '
|
2023-04-17 19:08:32 +01:00
|
|
|
'with key_prefix some_key, method set, time_spent 0.0, '
|
|
|
|
'[Errno 32] Broken pipe',
|
2020-11-02 15:27:16 -08:00
|
|
|
] * 20)
|
|
|
|
|
|
|
|
# with error_limit_time of 66, one call per 6 secs, twelfth one
|
|
|
|
# triggers error limit
|
2023-04-17 19:08:32 +01:00
|
|
|
do_calls(6.0, 12, error_limit_time=66)
|
2020-11-02 15:27:16 -08:00
|
|
|
self.assertEqual(self.logger.get_lines_for_level('error'), [
|
|
|
|
'Error talking to memcached: 1.2.3.5:11211: '
|
2023-04-17 19:08:32 +01:00
|
|
|
'with key_prefix some_key, method set, time_spent 0.0, '
|
|
|
|
'[Errno 32] Broken pipe',
|
2022-04-26 16:12:49 -07:00
|
|
|
] * 10 + [
|
|
|
|
'Error talking to memcached: 1.2.3.5:11211: '
|
2023-04-17 19:08:32 +01:00
|
|
|
'with key_prefix some_key, method set, time_spent 0.0, '
|
|
|
|
'[Errno 32] Broken pipe',
|
2022-04-26 16:12:49 -07:00
|
|
|
'Error limiting server 1.2.3.5:11211',
|
2023-10-20 14:40:06 +01:00
|
|
|
'Error connecting to memcached: ALL: with key_prefix some_key, '
|
2024-03-05 14:44:37 +11:00
|
|
|
'method set: No more memcached servers to try'])
|
2020-11-02 15:27:16 -08:00
|
|
|
|
|
|
|
# with error_limit_time of 70, one call per 6 secs, error_limit_count
|
|
|
|
# of 11, 13th call triggers error limit
|
2023-04-17 19:08:32 +01:00
|
|
|
do_calls(6.0, 13, error_limit_time=70, error_limit_count=11)
|
2020-11-02 15:27:16 -08:00
|
|
|
self.assertEqual(self.logger.get_lines_for_level('error'), [
|
|
|
|
'Error talking to memcached: 1.2.3.5:11211: '
|
2023-04-17 19:08:32 +01:00
|
|
|
'with key_prefix some_key, method set, time_spent 0.0, '
|
|
|
|
'[Errno 32] Broken pipe',
|
2022-04-26 16:12:49 -07:00
|
|
|
] * 11 + [
|
|
|
|
'Error talking to memcached: 1.2.3.5:11211: '
|
2023-04-17 19:08:32 +01:00
|
|
|
'with key_prefix some_key, method set, time_spent 0.0, '
|
|
|
|
'[Errno 32] Broken pipe',
|
2022-04-26 16:12:49 -07:00
|
|
|
'Error limiting server 1.2.3.5:11211',
|
2023-10-20 14:40:06 +01:00
|
|
|
'Error connecting to memcached: ALL: with key_prefix some_key, '
|
2024-03-05 14:44:37 +11:00
|
|
|
'method set: No more memcached servers to try'])
|
2020-11-02 15:27:16 -08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def test_delete(self):
|
2020-06-09 10:50:07 -07:00
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
|
|
|
logger=self.logger)
|
2010-07-12 17:03:45 -05:00
|
|
|
mock = MockMemcached()
|
2013-09-04 22:20:44 +00:00
|
|
|
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock, mock)] * 2)
|
2010-07-12 17:03:45 -05:00
|
|
|
memcache_client.set('some_key', [1, 2, 3])
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(memcache_client.get('some_key'), [1, 2, 3])
|
2010-07-12 17:03:45 -05:00
|
|
|
memcache_client.delete('some_key')
|
2017-06-07 11:37:01 +08:00
|
|
|
self.assertIsNone(memcache_client.get('some_key'))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def test_multi(self):
|
2020-06-09 10:50:07 -07:00
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
|
|
|
logger=self.logger)
|
2010-07-12 17:03:45 -05:00
|
|
|
mock = MockMemcached()
|
2013-09-04 22:20:44 +00:00
|
|
|
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock, mock)] * 2)
|
2016-06-23 12:22:02 -07:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
memcache_client.set_multi(
|
|
|
|
{'some_key1': [1, 2, 3], 'some_key2': [4, 5, 6]}, 'multi_key')
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(
|
2010-07-12 17:03:45 -05:00
|
|
|
memcache_client.get_multi(('some_key2', 'some_key1'), 'multi_key'),
|
|
|
|
[[4, 5, 6], [1, 2, 3]])
|
2018-02-01 14:30:32 -08:00
|
|
|
for key in (b'some_key1', b'some_key2'):
|
2020-09-11 16:28:11 -04:00
|
|
|
key = md5(key, usedforsecurity=False).hexdigest().encode('ascii')
|
2016-06-23 12:22:02 -07:00
|
|
|
self.assertIn(key, mock.cache)
|
|
|
|
_junk, cache_timeout, _junk = mock.cache[key]
|
2024-03-14 13:10:00 -07:00
|
|
|
self.assertEqual(cache_timeout, 0)
|
2016-06-23 12:22:02 -07:00
|
|
|
|
2024-03-14 13:10:00 -07:00
|
|
|
now = time.time()
|
|
|
|
with patch('time.time', return_value=now):
|
|
|
|
memcache_client.set_multi(
|
|
|
|
{'some_key1': [1, 2, 3], 'some_key2': [4, 5, 6]}, 'multi_key',
|
|
|
|
time=20)
|
2018-02-01 14:30:32 -08:00
|
|
|
for key in (b'some_key1', b'some_key2'):
|
2020-09-11 16:28:11 -04:00
|
|
|
key = md5(key, usedforsecurity=False).hexdigest().encode('ascii')
|
2016-06-23 12:22:02 -07:00
|
|
|
_junk, cache_timeout, _junk = mock.cache[key]
|
2024-03-14 13:10:00 -07:00
|
|
|
self.assertEqual(cache_timeout, now + 20)
|
Swift MemcacheRing (set) interface is incompatible fixes
This patch fixes the Swift MemcacheRing set and set_multi
interface incompatible problem with python memcache. The fix
added two extra named parameters to both set and set_multi
method. When only time or timeout parameter is present, then one
of the value will be used. When both time and timeout are present,
the time parameter will be used.
Named parameter min_compress_len is added for pure compatibility
purposes. The current implementation ignores this parameter.
To make swift memcached methods all consistent cross the board,
method incr and decr have also been changed to include a new
named parameter time.
In future OpenStack releases, the named parameter timeout will be
removed, keep the named parameter timeout around for now is
to make sure that mismatched releases between client and server
will still work.
From now on, when a call is made to set, set_multi, decr, incr
by using timeout parametner, a warning message will be logged to
indicate the deprecation of the parameter.
Fixes: bug #1095730
Change-Id: I07af784a54d7d79395fc3265e74145f92f38a893
2013-02-13 13:54:51 -05:00
|
|
|
|
2012-11-16 00:09:14 -05:00
|
|
|
fortydays = 50 * 24 * 60 * 60
|
|
|
|
esttimeout = time.time() + fortydays
|
|
|
|
memcache_client.set_multi(
|
|
|
|
{'some_key1': [1, 2, 3], 'some_key2': [4, 5, 6]}, 'multi_key',
|
2015-12-13 21:13:42 +01:00
|
|
|
time=fortydays)
|
2018-02-01 14:30:32 -08:00
|
|
|
for key in (b'some_key1', b'some_key2'):
|
2020-09-11 16:28:11 -04:00
|
|
|
key = md5(key, usedforsecurity=False).hexdigest().encode('ascii')
|
2016-06-23 12:22:02 -07:00
|
|
|
_junk, cache_timeout, _junk = mock.cache[key]
|
|
|
|
self.assertAlmostEqual(float(cache_timeout), esttimeout, delta=1)
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(memcache_client.get_multi(
|
2012-09-04 23:01:02 +08:00
|
|
|
('some_key2', 'some_key1', 'not_exists'), 'multi_key'),
|
|
|
|
[[4, 5, 6], [1, 2, 3], None])
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2015-09-03 12:19:05 +10:00
|
|
|
# Now lets simulate a lost connection and make sure we don't get
|
|
|
|
# the index out of range stack trace when it does
|
|
|
|
mock_stderr = six.StringIO()
|
|
|
|
not_expected = "IndexError: list index out of range"
|
|
|
|
with patch("sys.stderr", mock_stderr):
|
|
|
|
mock.read_return_empty_str = True
|
|
|
|
self.assertEqual(memcache_client.get_multi(
|
|
|
|
('some_key2', 'some_key1', 'not_exists'), 'multi_key'),
|
|
|
|
None)
|
|
|
|
self.assertFalse(not_expected in mock_stderr.getvalue())
|
|
|
|
|
2019-10-07 18:02:21 +02:00
|
|
|
def test_multi_delete(self):
|
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211',
|
2020-06-09 10:50:07 -07:00
|
|
|
'1.2.3.5:11211'],
|
|
|
|
logger=self.logger)
|
2019-10-07 18:02:21 +02:00
|
|
|
mock1 = MockMemcached()
|
|
|
|
mock2 = MockMemcached()
|
|
|
|
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock1, mock1)] * 2)
|
|
|
|
memcache_client._client_cache['1.2.3.5:11211'] = MockedMemcachePool(
|
|
|
|
[(mock2, mock2)] * 2)
|
|
|
|
|
|
|
|
# MemcacheRing will put 'some_key0' on server 1.2.3.5:11211 and
|
|
|
|
# 'some_key1' and 'multi_key' on '1.2.3.4:11211'
|
|
|
|
memcache_client.set_multi(
|
|
|
|
{'some_key0': [1, 2, 3], 'some_key1': [4, 5, 6]}, 'multi_key')
|
|
|
|
self.assertEqual(
|
|
|
|
memcache_client.get_multi(('some_key1', 'some_key0'), 'multi_key'),
|
|
|
|
[[4, 5, 6], [1, 2, 3]])
|
|
|
|
for key in (b'some_key0', b'some_key1'):
|
2020-09-11 16:28:11 -04:00
|
|
|
key = md5(key, usedforsecurity=False).hexdigest().encode('ascii')
|
2019-10-07 18:02:21 +02:00
|
|
|
self.assertIn(key, mock1.cache)
|
|
|
|
_junk, cache_timeout, _junk = mock1.cache[key]
|
2024-03-14 13:10:00 -07:00
|
|
|
self.assertEqual(cache_timeout, 0)
|
2019-10-07 18:02:21 +02:00
|
|
|
|
|
|
|
memcache_client.set('some_key0', [7, 8, 9])
|
|
|
|
self.assertEqual(memcache_client.get('some_key0'), [7, 8, 9])
|
2020-09-11 16:28:11 -04:00
|
|
|
key = md5(b'some_key0',
|
|
|
|
usedforsecurity=False).hexdigest().encode('ascii')
|
2019-10-07 18:02:21 +02:00
|
|
|
self.assertIn(key, mock2.cache)
|
|
|
|
|
|
|
|
# Delete 'some_key0' with server_key='multi_key'
|
|
|
|
memcache_client.delete('some_key0', server_key='multi_key')
|
|
|
|
self.assertEqual(memcache_client.get_multi(
|
|
|
|
('some_key0', 'some_key1'), 'multi_key'),
|
|
|
|
[None, [4, 5, 6]])
|
|
|
|
|
|
|
|
# 'some_key0' have to be available on 1.2.3.5:11211
|
|
|
|
self.assertEqual(memcache_client.get('some_key0'), [7, 8, 9])
|
|
|
|
self.assertIn(key, mock2.cache)
|
|
|
|
|
Do not use pickle for serialization in memcache, but JSON
We don't want to use pickle as it can execute arbitrary code. JSON is
safer. However, note that it supports serialization for only some
specific subset of object types; this should be enough for what we need,
though.
To avoid issues on upgrades (unability to read pickled values, and cache
poisoning for old servers not understanding JSON), we add a
memcache_serialization_support configuration option, with the following
values:
0 = older, insecure pickle serialization
1 = json serialization but pickles can still be read (still insecure)
2 = json serialization only (secure and the default)
To avoid an instant full cache flush, existing installations should
upgrade with 0, then set to 1 and reload, then after some time (24
hours) set to 2 and reload. Support for 0 and 1 will be removed in
future versions.
Part of bug 1006414.
Change-Id: Id7d6d547b103b4f23ebf5be98b88f09ec6027ce4
2012-06-21 14:37:41 +02:00
|
|
|
def test_serialization(self):
|
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
2020-06-09 10:50:07 -07:00
|
|
|
logger=self.logger)
|
Do not use pickle for serialization in memcache, but JSON
We don't want to use pickle as it can execute arbitrary code. JSON is
safer. However, note that it supports serialization for only some
specific subset of object types; this should be enough for what we need,
though.
To avoid issues on upgrades (unability to read pickled values, and cache
poisoning for old servers not understanding JSON), we add a
memcache_serialization_support configuration option, with the following
values:
0 = older, insecure pickle serialization
1 = json serialization but pickles can still be read (still insecure)
2 = json serialization only (secure and the default)
To avoid an instant full cache flush, existing installations should
upgrade with 0, then set to 1 and reload, then after some time (24
hours) set to 2 and reload. Support for 0 and 1 will be removed in
future versions.
Part of bug 1006414.
Change-Id: Id7d6d547b103b4f23ebf5be98b88f09ec6027ce4
2012-06-21 14:37:41 +02:00
|
|
|
mock = MockMemcached()
|
2013-09-04 22:20:44 +00:00
|
|
|
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock, mock)] * 2)
|
Do not use pickle for serialization in memcache, but JSON
We don't want to use pickle as it can execute arbitrary code. JSON is
safer. However, note that it supports serialization for only some
specific subset of object types; this should be enough for what we need,
though.
To avoid issues on upgrades (unability to read pickled values, and cache
poisoning for old servers not understanding JSON), we add a
memcache_serialization_support configuration option, with the following
values:
0 = older, insecure pickle serialization
1 = json serialization but pickles can still be read (still insecure)
2 = json serialization only (secure and the default)
To avoid an instant full cache flush, existing installations should
upgrade with 0, then set to 1 and reload, then after some time (24
hours) set to 2 and reload. Support for 0 and 1 will be removed in
future versions.
Part of bug 1006414.
Change-Id: Id7d6d547b103b4f23ebf5be98b88f09ec6027ce4
2012-06-21 14:37:41 +02:00
|
|
|
memcache_client.set('some_key', [1, 2, 3])
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(memcache_client.get('some_key'), [1, 2, 3])
|
2020-06-18 10:29:15 -07:00
|
|
|
self.assertEqual(len(mock.cache), 1)
|
|
|
|
key = next(iter(mock.cache))
|
|
|
|
self.assertEqual(mock.cache[key][0], b'2') # JSON_FLAG
|
|
|
|
# Pretend we've got some really old pickle data in there
|
|
|
|
mock.cache[key] = (b'1',) + mock.cache[key][1:]
|
2017-06-07 11:37:01 +08:00
|
|
|
self.assertIsNone(memcache_client.get('some_key'))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-09-04 22:20:44 +00:00
|
|
|
def test_connection_pooling(self):
|
|
|
|
with patch('swift.common.memcached.socket') as mock_module:
|
Fix IPv6 handling in MemcacheConnPool.
The patch removes the assumption of IPv4-only addresses in the
MemcacheConnPool. The changes are around address handling.
Namely, if a server is specified with an address
[<address>]:port (port is optional), it is assumed to be an IPv6
address [1]. If an IPv6 address is specified without "[]", an exception
is raised as it is impossible to parse such addresses correctly.
For testing, memcache can be configured to listen on the link-local,
unique-local, or ::1 (equivalent to 127.0.0.1) addresses. Link-local
addresses are assigned by default to each interface and are of the form
"fe80::dead:beef". These addresses require a scope ID, which would look
like "fe80::dead:beef%eth0" (replacing eth0 with the correct interface).
Unique-local addresses are any addresses in the fc00::/7 subnet. To add
a ULA to an interface use the "ip" utility. For example:
"ip -6 address add fc01::dead:beef dev eth0". Lastly, and probably
simplest, memcache can be configured to listen on "::1". The same
address would be used in the swift configuration, e.g. "[::1]:11211".
Note: only memcached version 1.4.25 or greater supports binding to an
IPv6 address.
Fixes #1526570
[1] IPv6 host literals:
https://tools.ietf.org/html/rfc3986#section-3.2.2
Change-Id: I8408143c1d47d24e70df56a08167c529825276a2
2015-12-16 12:07:27 -08:00
|
|
|
def mock_getaddrinfo(host, port, family=socket.AF_INET,
|
|
|
|
socktype=socket.SOCK_STREAM, proto=0,
|
|
|
|
flags=0):
|
|
|
|
return [(family, socktype, proto, '', (host, port))]
|
|
|
|
|
|
|
|
mock_module.getaddrinfo = mock_getaddrinfo
|
|
|
|
|
2013-09-04 22:20:44 +00:00
|
|
|
# patch socket, stub socket.socket, mock sock
|
|
|
|
mock_sock = mock_module.socket.return_value
|
|
|
|
|
|
|
|
# track clients waiting for connections
|
|
|
|
connected = []
|
|
|
|
connections = Queue()
|
2013-10-04 08:04:42 -04:00
|
|
|
errors = []
|
2013-09-04 22:20:44 +00:00
|
|
|
|
|
|
|
def wait_connect(addr):
|
|
|
|
connected.append(addr)
|
2013-11-14 15:58:52 -08:00
|
|
|
sleep(0.1) # yield
|
2013-10-04 08:04:42 -04:00
|
|
|
val = connections.get()
|
|
|
|
if val is not None:
|
|
|
|
errors.append(val)
|
|
|
|
|
2013-09-04 22:20:44 +00:00
|
|
|
mock_sock.connect = wait_connect
|
|
|
|
|
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
2020-06-09 10:50:07 -07:00
|
|
|
connect_timeout=10,
|
|
|
|
logger=self.logger)
|
2013-09-04 22:20:44 +00:00
|
|
|
# sanity
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(1, len(memcache_client._client_cache))
|
2013-09-04 22:20:44 +00:00
|
|
|
for server, pool in memcache_client._client_cache.items():
|
2013-10-04 08:04:42 -04:00
|
|
|
self.assertEqual(2, pool.max_size)
|
2013-09-04 22:20:44 +00:00
|
|
|
|
|
|
|
# make 10 requests "at the same time"
|
|
|
|
p = GreenPool()
|
|
|
|
for i in range(10):
|
|
|
|
p.spawn(memcache_client.set, 'key', 'value')
|
|
|
|
for i in range(3):
|
|
|
|
sleep(0.1)
|
2013-10-04 08:04:42 -04:00
|
|
|
self.assertEqual(2, len(connected))
|
|
|
|
|
2013-09-04 22:20:44 +00:00
|
|
|
# give out a connection
|
|
|
|
connections.put(None)
|
2013-10-04 08:04:42 -04:00
|
|
|
|
|
|
|
# at this point, only one connection should have actually been
|
|
|
|
# created, the other is in the creation step, and the rest of the
|
|
|
|
# clients are not attempting to connect. we let this play out a
|
|
|
|
# bit to verify.
|
2013-09-04 22:20:44 +00:00
|
|
|
for i in range(3):
|
|
|
|
sleep(0.1)
|
2013-10-04 08:04:42 -04:00
|
|
|
self.assertEqual(2, len(connected))
|
|
|
|
|
|
|
|
# finish up, this allows the final connection to be created, so
|
|
|
|
# that all the other clients can use the two existing connections
|
|
|
|
# and no others will be created.
|
|
|
|
connections.put(None)
|
|
|
|
connections.put('nono')
|
|
|
|
self.assertEqual(2, len(connected))
|
2013-09-04 22:20:44 +00:00
|
|
|
p.waitall()
|
2013-10-04 08:04:42 -04:00
|
|
|
self.assertEqual(2, len(connected))
|
|
|
|
self.assertEqual(0, len(errors),
|
|
|
|
"A client was allowed a third connection")
|
|
|
|
connections.get_nowait()
|
|
|
|
self.assertTrue(connections.empty())
|
2013-09-04 22:20:44 +00:00
|
|
|
|
2013-10-07 21:28:48 -07:00
|
|
|
def test_connection_pool_timeout(self):
|
Socket errors don't warrant tracebacks when talking to memcached
Currently, timeouts when talking to memcached cause log lines like
ERROR:root:Timeout connecting to memcached: 192.168.23.62:11211
Meanwhile, socket errors (which you'd expect to be about as common
as timeouts) cause log lines like
ERROR:root:Error talking to memcached: 192.168.23.70:11211
Traceback (most recent call last):
File "/usr/lib/pymodules/python2.7/swift/common/memcached.py", line 293, in set
(key, flags, timeout, len(value), value))
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 390, in sendall
tail = self.send(data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 384, in send
return self._send_loop(self.fd.send, data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 371, in _send_loop
return send_method(data, *args)
error: [Errno 32] Broken pipe
... or ...
ERROR:root:Error connecting to memcached: 192.168.23.73:11211
Traceback (most recent call last):
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 239, in _get_conns
fp, sock = self._client_cache[server].get()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 135, in get
fp, sock = self.create()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 128, in create
sock.connect((host, int(port)))
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 237, in connect
while not socket_connect(fd, address):
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 39, in socket_connect
raise socket.error(err, errno.errorcode[err])
error: [Errno 101] ENETUNREACH
... which seems excessive. Now, socket errors are logged as normal
errors, without the tracebacks.
Change-Id: I71a2c4786c0406dbc43b829cad5a0c7e2c45de21
2017-08-22 05:31:59 +00:00
|
|
|
connections = defaultdict(Queue)
|
|
|
|
pending = defaultdict(int)
|
|
|
|
served = defaultdict(int)
|
|
|
|
|
|
|
|
class MockConnectionPool(memcached.MemcacheConnPool):
|
|
|
|
def get(self):
|
|
|
|
pending[self.host] += 1
|
|
|
|
conn = connections[self.host].get()
|
|
|
|
pending[self.host] -= 1
|
|
|
|
return conn
|
|
|
|
|
|
|
|
def put(self, *args, **kwargs):
|
|
|
|
connections[self.host].put(*args, **kwargs)
|
|
|
|
served[self.host] += 1
|
|
|
|
|
|
|
|
with mock.patch.object(memcached, 'MemcacheConnPool',
|
|
|
|
MockConnectionPool):
|
2013-10-07 21:28:48 -07:00
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211',
|
|
|
|
'1.2.3.5:11211'],
|
|
|
|
io_timeout=0.5,
|
2020-06-09 10:50:07 -07:00
|
|
|
pool_timeout=0.1,
|
|
|
|
logger=self.logger)
|
2013-10-07 21:28:48 -07:00
|
|
|
|
2014-02-03 00:42:39 -05:00
|
|
|
# Hand out a couple slow connections to 1.2.3.5, leaving 1.2.3.4
|
|
|
|
# fast. All ten (10) clients should try to talk to .5 first, and
|
|
|
|
# then move on to .4, and we'll assert all that below.
|
2013-10-07 21:28:48 -07:00
|
|
|
mock_conn = MagicMock(), MagicMock()
|
2021-01-07 21:00:59 -08:00
|
|
|
mock_conn[0].readline = lambda: b'STORED\r\n'
|
2013-10-07 21:28:48 -07:00
|
|
|
mock_conn[1].sendall = lambda x: sleep(0.2)
|
2016-01-08 14:54:56 -08:00
|
|
|
connections['1.2.3.5'].put(mock_conn)
|
|
|
|
connections['1.2.3.5'].put(mock_conn)
|
2013-10-07 21:28:48 -07:00
|
|
|
|
|
|
|
mock_conn = MagicMock(), MagicMock()
|
2021-01-07 21:00:59 -08:00
|
|
|
mock_conn[0].readline = lambda: b'STORED\r\n'
|
2016-01-08 14:54:56 -08:00
|
|
|
connections['1.2.3.4'].put(mock_conn)
|
|
|
|
connections['1.2.3.4'].put(mock_conn)
|
2014-02-03 00:42:39 -05:00
|
|
|
|
|
|
|
p = GreenPool()
|
|
|
|
for i in range(10):
|
|
|
|
p.spawn(memcache_client.set, 'key', 'value')
|
|
|
|
|
|
|
|
# Wait for the dust to settle.
|
2013-10-07 21:28:48 -07:00
|
|
|
p.waitall()
|
2014-02-03 00:42:39 -05:00
|
|
|
|
Socket errors don't warrant tracebacks when talking to memcached
Currently, timeouts when talking to memcached cause log lines like
ERROR:root:Timeout connecting to memcached: 192.168.23.62:11211
Meanwhile, socket errors (which you'd expect to be about as common
as timeouts) cause log lines like
ERROR:root:Error talking to memcached: 192.168.23.70:11211
Traceback (most recent call last):
File "/usr/lib/pymodules/python2.7/swift/common/memcached.py", line 293, in set
(key, flags, timeout, len(value), value))
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 390, in sendall
tail = self.send(data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 384, in send
return self._send_loop(self.fd.send, data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 371, in _send_loop
return send_method(data, *args)
error: [Errno 32] Broken pipe
... or ...
ERROR:root:Error connecting to memcached: 192.168.23.73:11211
Traceback (most recent call last):
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 239, in _get_conns
fp, sock = self._client_cache[server].get()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 135, in get
fp, sock = self.create()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 128, in create
sock.connect((host, int(port)))
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 237, in connect
while not socket_connect(fd, address):
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 39, in socket_connect
raise socket.error(err, errno.errorcode[err])
error: [Errno 101] ENETUNREACH
... which seems excessive. Now, socket errors are logged as normal
errors, without the tracebacks.
Change-Id: I71a2c4786c0406dbc43b829cad5a0c7e2c45de21
2017-08-22 05:31:59 +00:00
|
|
|
self.assertEqual(pending['1.2.3.5'], 8)
|
|
|
|
self.assertEqual(len(memcache_client._errors['1.2.3.5:11211']), 8)
|
2023-04-17 19:08:32 +01:00
|
|
|
error_logs = self.logger.get_lines_for_level('error')
|
|
|
|
self.assertEqual(len(error_logs), 8)
|
|
|
|
for each_log in error_logs:
|
|
|
|
self.assertIn(
|
|
|
|
'Timeout getting a connection to memcached: 1.2.3.5:11211: '
|
|
|
|
'with key_prefix key',
|
|
|
|
each_log)
|
Socket errors don't warrant tracebacks when talking to memcached
Currently, timeouts when talking to memcached cause log lines like
ERROR:root:Timeout connecting to memcached: 192.168.23.62:11211
Meanwhile, socket errors (which you'd expect to be about as common
as timeouts) cause log lines like
ERROR:root:Error talking to memcached: 192.168.23.70:11211
Traceback (most recent call last):
File "/usr/lib/pymodules/python2.7/swift/common/memcached.py", line 293, in set
(key, flags, timeout, len(value), value))
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 390, in sendall
tail = self.send(data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 384, in send
return self._send_loop(self.fd.send, data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 371, in _send_loop
return send_method(data, *args)
error: [Errno 32] Broken pipe
... or ...
ERROR:root:Error connecting to memcached: 192.168.23.73:11211
Traceback (most recent call last):
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 239, in _get_conns
fp, sock = self._client_cache[server].get()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 135, in get
fp, sock = self.create()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 128, in create
sock.connect((host, int(port)))
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 237, in connect
while not socket_connect(fd, address):
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 39, in socket_connect
raise socket.error(err, errno.errorcode[err])
error: [Errno 101] ENETUNREACH
... which seems excessive. Now, socket errors are logged as normal
errors, without the tracebacks.
Change-Id: I71a2c4786c0406dbc43b829cad5a0c7e2c45de21
2017-08-22 05:31:59 +00:00
|
|
|
self.assertEqual(served['1.2.3.5'], 2)
|
|
|
|
self.assertEqual(pending['1.2.3.4'], 0)
|
|
|
|
self.assertEqual(len(memcache_client._errors['1.2.3.4:11211']), 0)
|
|
|
|
self.assertEqual(served['1.2.3.4'], 8)
|
|
|
|
|
|
|
|
# and we never got more put in that we gave out
|
|
|
|
self.assertEqual(connections['1.2.3.5'].qsize(), 2)
|
|
|
|
self.assertEqual(connections['1.2.3.4'].qsize(), 2)
|
2013-10-07 21:28:48 -07:00
|
|
|
|
2016-07-05 17:27:31 -07:00
|
|
|
def test_connection_slow_connect(self):
|
|
|
|
with patch('swift.common.memcached.socket') as mock_module:
|
|
|
|
def mock_getaddrinfo(host, port, family=socket.AF_INET,
|
|
|
|
socktype=socket.SOCK_STREAM, proto=0,
|
|
|
|
flags=0):
|
|
|
|
return [(family, socktype, proto, '', (host, port))]
|
|
|
|
|
|
|
|
mock_module.getaddrinfo = mock_getaddrinfo
|
|
|
|
|
|
|
|
# patch socket, stub socket.socket, mock sock
|
|
|
|
mock_sock = mock_module.socket.return_value
|
|
|
|
|
|
|
|
def wait_connect(addr):
|
|
|
|
# slow connect gives Timeout Exception
|
|
|
|
sleep(1)
|
|
|
|
|
|
|
|
# patch connect method
|
|
|
|
mock_sock.connect = wait_connect
|
|
|
|
|
|
|
|
memcache_client = memcached.MemcacheRing(
|
2023-04-17 19:08:32 +01:00
|
|
|
['1.2.3.4:11211'], connect_timeout=0.1, logger=self.logger)
|
2016-07-05 17:27:31 -07:00
|
|
|
|
|
|
|
# sanity
|
|
|
|
self.assertEqual(1, len(memcache_client._client_cache))
|
|
|
|
for server, pool in memcache_client._client_cache.items():
|
|
|
|
self.assertEqual(2, pool.max_size)
|
|
|
|
|
|
|
|
# try to get connect and no connection found
|
|
|
|
# so it will result in StopIteration
|
2023-05-18 12:21:31 +01:00
|
|
|
conn_generator = memcache_client._get_conns(self.set_cmd)
|
2016-07-05 17:27:31 -07:00
|
|
|
with self.assertRaises(StopIteration):
|
|
|
|
next(conn_generator)
|
|
|
|
|
|
|
|
self.assertEqual(1, mock_sock.close.call_count)
|
|
|
|
|
2021-06-11 11:29:40 +10:00
|
|
|
def test_item_size_warning_threshold(self):
|
|
|
|
mock = MockMemcached()
|
|
|
|
mocked_pool = MockedMemcachePool([(mock, mock)] * 2)
|
|
|
|
|
|
|
|
def do_test(d, threshold, should_warn, error=False):
|
|
|
|
self.logger.clear()
|
|
|
|
try:
|
|
|
|
memcache_client = memcached.MemcacheRing(
|
|
|
|
['1.2.3.4:11211'], item_size_warning_threshold=threshold,
|
|
|
|
logger=self.logger)
|
|
|
|
memcache_client._client_cache['1.2.3.4:11211'] = mocked_pool
|
|
|
|
memcache_client.set('some_key', d, serialize=False)
|
|
|
|
warning_lines = self.logger.get_lines_for_level('warning')
|
|
|
|
if should_warn:
|
|
|
|
self.assertIn(
|
|
|
|
'Item size larger than warning threshold: '
|
|
|
|
'%d (%s) >= %d (%s)' % (
|
|
|
|
len(d), human_readable(len(d)), threshold,
|
|
|
|
human_readable(threshold)),
|
|
|
|
warning_lines[0])
|
|
|
|
else:
|
|
|
|
self.assertFalse(warning_lines)
|
|
|
|
except ValueError as err:
|
|
|
|
if not err:
|
|
|
|
self.fail(err)
|
|
|
|
else:
|
|
|
|
self.assertIn(
|
|
|
|
'Config option must be a number, greater than 0, '
|
|
|
|
'less than 100, not "%s".' % threshold,
|
|
|
|
str(err))
|
|
|
|
|
|
|
|
data = '1' * 100
|
|
|
|
# let's start with something easy, say warning at 80
|
|
|
|
for data_size, warn in ((79, False), (80, True), (81, True),
|
|
|
|
(99, True), (100, True)):
|
|
|
|
do_test(data[:data_size], 80, warn)
|
|
|
|
|
|
|
|
# if we set the threshold to -1 will turn off the warning
|
|
|
|
for data_size, warn in ((79, False), (80, False), (81, False),
|
|
|
|
(99, False), (100, False)):
|
|
|
|
do_test(data[:data_size], -1, warn)
|
|
|
|
|
|
|
|
# Changing to 0 should warn on everything
|
|
|
|
for data_size, warn in ((0, True), (1, True), (50, True),
|
|
|
|
(99, True), (100, True)):
|
|
|
|
do_test(data[:data_size], 0, warn)
|
|
|
|
|
|
|
|
# Let's do a big number
|
|
|
|
do_test('1' * 2048576, 1000000, True)
|
|
|
|
|
2023-01-05 08:57:34 -08:00
|
|
|
def test_operations_timing_stats(self):
|
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
|
|
|
logger=self.logger)
|
|
|
|
mock = MockMemcached()
|
|
|
|
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock, mock)] * 2)
|
|
|
|
|
|
|
|
with patch('time.time',) as mock_time:
|
|
|
|
mock_time.return_value = 1000.99
|
|
|
|
memcache_client.set('some_key', [1, 2, 3])
|
2023-05-19 15:35:27 +10:00
|
|
|
last_stats = self.logger.statsd_client.calls['timing_since'][-1]
|
2023-01-05 08:57:34 -08:00
|
|
|
self.assertEqual('memcached.set.timing', last_stats[0][0])
|
|
|
|
self.assertEqual(last_stats[0][1], 1000.99)
|
|
|
|
mock_time.return_value = 2000.99
|
|
|
|
self.assertEqual(memcache_client.get('some_key'), [1, 2, 3])
|
2023-05-19 15:35:27 +10:00
|
|
|
last_stats = self.logger.statsd_client.calls['timing_since'][-1]
|
2023-01-05 08:57:34 -08:00
|
|
|
self.assertEqual('memcached.get.timing', last_stats[0][0])
|
|
|
|
self.assertEqual(last_stats[0][1], 2000.99)
|
|
|
|
mock_time.return_value = 3000.99
|
|
|
|
self.assertEqual(memcache_client.decr('decr_key', delta=5), 0)
|
2023-05-19 15:35:27 +10:00
|
|
|
last_stats = self.logger.statsd_client.calls['timing_since'][-1]
|
2023-01-05 08:57:34 -08:00
|
|
|
self.assertEqual('memcached.decr.timing', last_stats[0][0])
|
|
|
|
self.assertEqual(last_stats[0][1], 3000.99)
|
|
|
|
mock_time.return_value = 4000.99
|
|
|
|
self.assertEqual(memcache_client.incr('decr_key', delta=5), 5)
|
2023-05-19 15:35:27 +10:00
|
|
|
last_stats = self.logger.statsd_client.calls['timing_since'][-1]
|
2023-01-05 08:57:34 -08:00
|
|
|
self.assertEqual('memcached.incr.timing', last_stats[0][0])
|
|
|
|
self.assertEqual(last_stats[0][1], 4000.99)
|
|
|
|
mock_time.return_value = 5000.99
|
|
|
|
memcache_client.set_multi(
|
|
|
|
{'some_key1': [1, 2, 3], 'some_key2': [4, 5, 6]}, 'multi_key')
|
2023-05-19 15:35:27 +10:00
|
|
|
last_stats = self.logger.statsd_client.calls['timing_since'][-1]
|
2023-01-05 08:57:34 -08:00
|
|
|
self.assertEqual('memcached.set_multi.timing', last_stats[0][0])
|
|
|
|
self.assertEqual(last_stats[0][1], 5000.99)
|
|
|
|
mock_time.return_value = 6000.99
|
|
|
|
self.assertEqual(
|
|
|
|
memcache_client.get_multi(
|
|
|
|
('some_key2', 'some_key1'),
|
|
|
|
'multi_key'),
|
|
|
|
[[4, 5, 6],
|
|
|
|
[1, 2, 3]])
|
2023-05-19 15:35:27 +10:00
|
|
|
last_stats = self.logger.statsd_client.calls['timing_since'][-1]
|
2023-01-05 08:57:34 -08:00
|
|
|
self.assertEqual('memcached.get_multi.timing', last_stats[0][0])
|
|
|
|
self.assertEqual(last_stats[0][1], 6000.99)
|
|
|
|
mock_time.return_value = 7000.99
|
|
|
|
memcache_client.delete('some_key')
|
2023-05-19 15:35:27 +10:00
|
|
|
last_stats = self.logger.statsd_client.calls['timing_since'][-1]
|
2023-01-05 08:57:34 -08:00
|
|
|
self.assertEqual('memcached.delete.timing', last_stats[0][0])
|
|
|
|
self.assertEqual(last_stats[0][1], 7000.99)
|
|
|
|
|
2023-04-17 19:08:32 +01:00
|
|
|
def test_operations_timing_stats_with_incr_exception(self):
|
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
|
|
|
logger=self.logger)
|
|
|
|
mock_memcache = MockMemcached()
|
|
|
|
memcache_client._client_cache[
|
|
|
|
'1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock_memcache, mock_memcache)] * 2)
|
|
|
|
|
|
|
|
def handle_add(key, flags, exptime, num_bytes, noreply=b''):
|
|
|
|
raise Exception('add failed')
|
|
|
|
|
|
|
|
with patch('time.time', ) as mock_time:
|
|
|
|
with mock.patch.object(mock_memcache, 'handle_add', handle_add):
|
|
|
|
mock_time.return_value = 4000.99
|
|
|
|
with self.assertRaises(MemcacheConnectionError):
|
|
|
|
memcache_client.incr('incr_key', delta=5)
|
2023-05-19 15:35:27 +10:00
|
|
|
self.assertTrue(
|
|
|
|
self.logger.statsd_client.calls['timing_since'])
|
|
|
|
last_stats = \
|
|
|
|
self.logger.statsd_client.calls['timing_since'][-1]
|
2023-04-17 19:08:32 +01:00
|
|
|
self.assertEqual('memcached.incr.errors.timing',
|
|
|
|
last_stats[0][0])
|
|
|
|
self.assertEqual(last_stats[0][1], 4000.99)
|
2023-05-26 16:25:48 +01:00
|
|
|
self.assertEqual(
|
|
|
|
'Error talking to memcached: 1.2.3.4:11211: '
|
|
|
|
'with key_prefix incr_key, method incr, time_spent 0.0: ',
|
|
|
|
self.logger.get_lines_for_level('error')[0])
|
2023-04-17 19:08:32 +01:00
|
|
|
|
|
|
|
def test_operations_timing_stats_with_set_exception(self):
|
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
|
|
|
logger=self.logger)
|
|
|
|
mock_memcache = MockMemcached()
|
|
|
|
memcache_client._client_cache[
|
|
|
|
'1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock_memcache, mock_memcache)] * 2)
|
|
|
|
|
|
|
|
def handle_set(key, flags, exptime, num_bytes, noreply=b''):
|
|
|
|
raise Exception('set failed')
|
|
|
|
|
|
|
|
with patch('time.time', ) as mock_time:
|
|
|
|
with mock.patch.object(mock_memcache, 'handle_set', handle_set):
|
|
|
|
mock_time.return_value = 4000.99
|
|
|
|
with self.assertRaises(MemcacheConnectionError):
|
|
|
|
memcache_client.set(
|
|
|
|
'set_key', [1, 2, 3],
|
|
|
|
raise_on_error=True)
|
2023-05-19 15:35:27 +10:00
|
|
|
self.assertTrue(
|
|
|
|
self.logger.statsd_client.calls['timing_since'])
|
|
|
|
last_stats = \
|
|
|
|
self.logger.statsd_client.calls['timing_since'][-1]
|
2023-04-17 19:08:32 +01:00
|
|
|
self.assertEqual('memcached.set.errors.timing',
|
|
|
|
last_stats[0][0])
|
|
|
|
self.assertEqual(last_stats[0][1], 4000.99)
|
2023-05-26 16:25:48 +01:00
|
|
|
self.assertEqual(
|
|
|
|
'Error talking to memcached: 1.2.3.4:11211: '
|
|
|
|
'with key_prefix set_key, method set, time_spent 0.0: ',
|
|
|
|
self.logger.get_lines_for_level('error')[0])
|
2023-04-17 19:08:32 +01:00
|
|
|
|
|
|
|
def test_operations_timing_stats_with_get_exception(self):
|
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
|
|
|
logger=self.logger)
|
|
|
|
mock_memcache = MockMemcached()
|
|
|
|
memcache_client._client_cache[
|
|
|
|
'1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock_memcache, mock_memcache)] * 2)
|
|
|
|
|
|
|
|
def handle_get(*keys):
|
|
|
|
raise Exception('get failed')
|
|
|
|
|
|
|
|
with patch('time.time', ) as mock_time:
|
|
|
|
with mock.patch.object(mock_memcache, 'handle_get', handle_get):
|
|
|
|
mock_time.return_value = 4000.99
|
|
|
|
with self.assertRaises(MemcacheConnectionError):
|
|
|
|
memcache_client.get('get_key', raise_on_error=True)
|
2023-05-19 15:35:27 +10:00
|
|
|
self.assertTrue(
|
|
|
|
self.logger.statsd_client.calls['timing_since'])
|
|
|
|
last_stats = \
|
|
|
|
self.logger.statsd_client.calls['timing_since'][-1]
|
2023-04-17 19:08:32 +01:00
|
|
|
self.assertEqual('memcached.get.errors.timing',
|
|
|
|
last_stats[0][0])
|
|
|
|
self.assertEqual(last_stats[0][1], 4000.99)
|
2023-05-26 16:25:48 +01:00
|
|
|
self.assertEqual(
|
|
|
|
'Error talking to memcached: 1.2.3.4:11211: '
|
|
|
|
'with key_prefix get_key, method get, time_spent 0.0: ',
|
|
|
|
self.logger.get_lines_for_level('error')[0])
|
2023-04-17 19:08:32 +01:00
|
|
|
|
|
|
|
def test_operations_timing_stats_with_get_error(self):
|
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
|
|
|
logger=self.logger)
|
|
|
|
mock_memcache = MockMemcached()
|
|
|
|
memcache_client._client_cache[
|
|
|
|
'1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock_memcache, mock_memcache)] * 2)
|
|
|
|
|
|
|
|
def handle_get(*keys):
|
|
|
|
raise MemcacheConnectionError('failed to connect')
|
|
|
|
|
|
|
|
with patch('time.time', ) as mock_time:
|
|
|
|
with mock.patch.object(mock_memcache, 'handle_get', handle_get):
|
|
|
|
mock_time.return_value = 4000.99
|
|
|
|
with self.assertRaises(MemcacheConnectionError):
|
|
|
|
memcache_client.get('get_key', raise_on_error=True)
|
2023-05-19 15:35:27 +10:00
|
|
|
self.assertTrue(
|
|
|
|
self.logger.statsd_client.calls['timing_since'])
|
|
|
|
last_stats = \
|
|
|
|
self.logger.statsd_client.calls['timing_since'][-1]
|
2023-04-17 19:08:32 +01:00
|
|
|
self.assertEqual('memcached.get.conn_err.timing',
|
|
|
|
last_stats[0][0])
|
|
|
|
self.assertEqual(last_stats[0][1], 4000.99)
|
|
|
|
self.assertEqual('Error talking to memcached: 1.2.3.4:11211: '
|
|
|
|
'with key_prefix get_key, method get, '
|
|
|
|
'time_spent 0.0, failed to connect',
|
|
|
|
self.logger.get_lines_for_level('error')[0])
|
|
|
|
|
|
|
|
def test_operations_timing_stats_with_incr_timeout(self):
|
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
|
|
|
io_timeout=0.01,
|
|
|
|
logger=self.logger)
|
|
|
|
mock_memcache = MockMemcached()
|
|
|
|
memcache_client._client_cache[
|
|
|
|
'1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock_memcache, mock_memcache)] * 2)
|
|
|
|
|
|
|
|
def handle_add(key, flags, exptime, num_bytes, noreply=b''):
|
|
|
|
sleep(0.05)
|
|
|
|
|
|
|
|
with patch('time.time', ) as mock_time:
|
|
|
|
with mock.patch.object(mock_memcache, 'handle_add', handle_add):
|
|
|
|
mock_time.side_effect = itertools.count(4000.99, 1.0)
|
|
|
|
with self.assertRaises(MemcacheConnectionError):
|
|
|
|
memcache_client.incr('nvratelimit/v2/wf/124593', delta=5)
|
2023-05-19 15:35:27 +10:00
|
|
|
self.assertTrue(
|
|
|
|
self.logger.statsd_client.calls['timing_since'])
|
|
|
|
last_stats = \
|
|
|
|
self.logger.statsd_client.calls['timing_since'][-1]
|
2023-04-17 19:08:32 +01:00
|
|
|
self.assertEqual('memcached.incr.timeout.timing',
|
|
|
|
last_stats[0][0])
|
|
|
|
self.assertEqual(last_stats[0][1], 4002.99)
|
|
|
|
error_logs = self.logger.get_lines_for_level('error')
|
|
|
|
self.assertIn('Timeout talking to memcached: 1.2.3.4:11211: ',
|
|
|
|
error_logs[0])
|
|
|
|
self.assertIn(
|
|
|
|
'with key_prefix nvratelimit/v2/wf, ', error_logs[0])
|
|
|
|
self.assertIn('method incr, ', error_logs[0])
|
|
|
|
self.assertIn(
|
|
|
|
'config_timeout 0.01, time_spent 1.0', error_logs[0])
|
|
|
|
|
|
|
|
def test_operations_timing_stats_with_set_timeout(self):
|
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
|
|
|
io_timeout=0.01,
|
|
|
|
logger=self.logger)
|
|
|
|
mock_memcache = MockMemcached()
|
|
|
|
memcache_client._client_cache[
|
|
|
|
'1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock_memcache, mock_memcache)] * 2)
|
|
|
|
|
|
|
|
def handle_set(key, flags, exptime, num_bytes, noreply=b''):
|
|
|
|
sleep(0.05)
|
|
|
|
|
|
|
|
with patch('time.time', ) as mock_time:
|
|
|
|
with mock.patch.object(mock_memcache, 'handle_set', handle_set):
|
|
|
|
mock_time.side_effect = itertools.count(4000.99, 1.0)
|
|
|
|
with self.assertRaises(MemcacheConnectionError):
|
|
|
|
memcache_client.set(
|
|
|
|
'shard-updating-v2/acc/container', [1, 2, 3],
|
|
|
|
raise_on_error=True)
|
2023-05-19 15:35:27 +10:00
|
|
|
self.assertTrue(
|
|
|
|
self.logger.statsd_client.calls['timing_since'])
|
|
|
|
last_stats = \
|
|
|
|
self.logger.statsd_client.calls['timing_since'][-1]
|
2023-04-17 19:08:32 +01:00
|
|
|
self.assertEqual('memcached.set.timeout.timing',
|
|
|
|
last_stats[0][0])
|
|
|
|
self.assertEqual(last_stats[0][1], 4002.99)
|
|
|
|
error_logs = self.logger.get_lines_for_level('error')
|
|
|
|
self.assertIn('Timeout talking to memcached: 1.2.3.4:11211: ',
|
|
|
|
error_logs[0])
|
|
|
|
self.assertIn(
|
|
|
|
'with key_prefix shard-updating-v2/acc, ', error_logs[0])
|
|
|
|
self.assertIn('method set, ', error_logs[0])
|
|
|
|
self.assertIn(
|
|
|
|
'config_timeout 0.01, time_spent 1.0', error_logs[0])
|
|
|
|
|
|
|
|
def test_operations_timing_stats_with_get_timeout(self):
|
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
|
|
|
io_timeout=0.01,
|
|
|
|
logger=self.logger)
|
|
|
|
mock_memcache = MockMemcached()
|
|
|
|
memcache_client._client_cache[
|
|
|
|
'1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock_memcache, mock_memcache)] * 2)
|
|
|
|
|
|
|
|
def handle_get(*keys):
|
|
|
|
sleep(0.05)
|
|
|
|
|
|
|
|
with patch('time.time', ) as mock_time:
|
|
|
|
with mock.patch.object(mock_memcache, 'handle_get', handle_get):
|
|
|
|
mock_time.side_effect = itertools.count(4000.99, 1.0)
|
|
|
|
with self.assertRaises(MemcacheConnectionError):
|
|
|
|
memcache_client.get(
|
|
|
|
'shard-updating-v2/acc/container', raise_on_error=True)
|
2023-05-19 15:35:27 +10:00
|
|
|
self.assertTrue(
|
|
|
|
self.logger.statsd_client.calls['timing_since'])
|
|
|
|
last_stats = \
|
|
|
|
self.logger.statsd_client.calls['timing_since'][-1]
|
2023-04-17 19:08:32 +01:00
|
|
|
self.assertEqual('memcached.get.timeout.timing',
|
|
|
|
last_stats[0][0])
|
|
|
|
self.assertEqual(last_stats[0][1], 4002.99)
|
|
|
|
error_logs = self.logger.get_lines_for_level('error')
|
|
|
|
self.assertIn('Timeout talking to memcached: 1.2.3.4:11211: ',
|
|
|
|
error_logs[0])
|
|
|
|
self.assertIn(
|
|
|
|
'with key_prefix shard-updating-v2/acc, ', error_logs[0])
|
|
|
|
self.assertIn('method get, ', error_logs[0])
|
|
|
|
self.assertIn(
|
|
|
|
'config_timeout 0.01, time_spent 1.0', error_logs[0])
|
|
|
|
|
2023-04-17 19:10:48 +01:00
|
|
|
def test_incr_add_expires(self):
|
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
|
|
|
io_timeout=0.01,
|
|
|
|
logger=self.logger)
|
|
|
|
mock_memcache = MockMemcached()
|
|
|
|
memcache_client._client_cache[
|
|
|
|
'1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock_memcache, mock_memcache)] * 2)
|
|
|
|
incr_calls = []
|
|
|
|
orig_incr = mock_memcache.handle_incr
|
|
|
|
orig_add = mock_memcache.handle_add
|
|
|
|
|
|
|
|
def handle_incr(key, value, noreply=b''):
|
|
|
|
if incr_calls:
|
|
|
|
mock_memcache.cache.clear()
|
|
|
|
incr_calls.append(key)
|
|
|
|
orig_incr(key, value, noreply)
|
|
|
|
|
|
|
|
def handle_add(key, flags, exptime, num_bytes, noreply=b''):
|
|
|
|
mock_memcache.cache[key] = 'already set!'
|
|
|
|
orig_add(key, flags, exptime, num_bytes, noreply)
|
|
|
|
mock_memcache.cache.clear()
|
|
|
|
|
|
|
|
with patch('time.time', ) as mock_time:
|
|
|
|
mock_time.side_effect = itertools.count(4000.99, 1.0)
|
|
|
|
with mock.patch.object(mock_memcache, 'handle_incr', handle_incr):
|
|
|
|
with mock.patch.object(mock_memcache, 'handle_add',
|
|
|
|
handle_add):
|
|
|
|
with self.assertRaises(MemcacheConnectionError):
|
|
|
|
memcache_client.incr(
|
|
|
|
'shard-updating-v2/acc/container', time=1.23)
|
2023-05-19 15:35:27 +10:00
|
|
|
self.assertTrue(self.logger.statsd_client.calls['timing_since'])
|
|
|
|
last_stats = self.logger.statsd_client.calls['timing_since'][-1]
|
2023-04-17 19:10:48 +01:00
|
|
|
self.assertEqual('memcached.incr.conn_err.timing',
|
|
|
|
last_stats[0][0])
|
|
|
|
self.assertEqual(last_stats[0][1], 4002.99)
|
|
|
|
error_logs = self.logger.get_lines_for_level('error')
|
|
|
|
self.assertIn('Error talking to memcached: 1.2.3.4:11211: ',
|
|
|
|
error_logs[0])
|
|
|
|
self.assertIn('with key_prefix shard-updating-v2/acc, method incr, '
|
|
|
|
'time_spent 1.0, expired ttl=1.23',
|
|
|
|
error_logs[0])
|
|
|
|
self.assertIn('1.2.3.4:11211', memcache_client._errors)
|
|
|
|
self.assertFalse(memcache_client._errors['1.2.3.4:11211'])
|
|
|
|
|
|
|
|
def test_incr_unexpected_response(self):
|
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
|
|
|
io_timeout=0.01,
|
|
|
|
logger=self.logger)
|
|
|
|
mock_memcache = MockMemcached()
|
|
|
|
memcache_client._client_cache[
|
|
|
|
'1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock_memcache, mock_memcache)] * 2)
|
|
|
|
resp = b'UNEXPECTED RESPONSE\r\n'
|
|
|
|
|
|
|
|
def handle_incr(key, value, noreply=b''):
|
|
|
|
mock_memcache.outbuf += resp
|
|
|
|
|
|
|
|
with patch('time.time') as mock_time:
|
|
|
|
mock_time.side_effect = itertools.count(4000.99, 1.0)
|
|
|
|
with mock.patch.object(mock_memcache, 'handle_incr', handle_incr):
|
|
|
|
with self.assertRaises(MemcacheConnectionError):
|
|
|
|
memcache_client.incr(
|
|
|
|
'shard-updating-v2/acc/container', time=1.23)
|
2023-05-19 15:35:27 +10:00
|
|
|
self.assertTrue(self.logger.statsd_client.calls['timing_since'])
|
|
|
|
last_stats = self.logger.statsd_client.calls['timing_since'][-1]
|
2023-04-17 19:10:48 +01:00
|
|
|
self.assertEqual('memcached.incr.errors.timing',
|
|
|
|
last_stats[0][0])
|
|
|
|
self.assertEqual(last_stats[0][1], 4002.99)
|
|
|
|
error_logs = self.logger.get_lines_for_level('error')
|
|
|
|
self.assertIn('Error talking to memcached: 1.2.3.4:11211: ',
|
|
|
|
error_logs[0])
|
2023-05-26 16:25:48 +01:00
|
|
|
self.assertIn("with key_prefix shard-updating-v2/acc, method incr, "
|
|
|
|
"time_spent 1.0" % resp.split(), error_logs[0])
|
2023-04-17 19:10:48 +01:00
|
|
|
self.assertIn('1.2.3.4:11211', memcache_client._errors)
|
2023-05-19 15:35:27 +10:00
|
|
|
self.assertEqual([4005.99], memcache_client._errors['1.2.3.4:11211'])
|
2023-04-17 19:10:48 +01:00
|
|
|
|
2012-09-04 23:01:02 +08:00
|
|
|
|
2021-12-07 13:24:19 +11:00
|
|
|
class ExcConfigParser(object):
|
|
|
|
|
|
|
|
def read(self, path):
|
|
|
|
raise Exception('read called with %r' % path)
|
|
|
|
|
|
|
|
|
|
|
|
class EmptyConfigParser(object):
|
|
|
|
|
|
|
|
def read(self, path):
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
|
|
def get_config_parser(memcache_servers='1.2.3.4:5',
|
|
|
|
memcache_max_connections='4',
|
|
|
|
section='memcache',
|
|
|
|
item_size_warning_threshold='75'):
|
|
|
|
_srvs = memcache_servers
|
|
|
|
_maxc = memcache_max_connections
|
|
|
|
_section = section
|
|
|
|
_warn_threshold = item_size_warning_threshold
|
|
|
|
|
|
|
|
class SetConfigParser(object):
|
|
|
|
|
|
|
|
def items(self, section_name):
|
|
|
|
if section_name != section:
|
|
|
|
raise NoSectionError(section_name)
|
|
|
|
return {
|
|
|
|
'memcache_servers': memcache_servers,
|
|
|
|
'memcache_max_connections': memcache_max_connections
|
|
|
|
}
|
|
|
|
|
|
|
|
def read(self, path):
|
|
|
|
return True
|
|
|
|
|
|
|
|
def get(self, section, option):
|
|
|
|
if _section == section:
|
|
|
|
if option == 'memcache_servers':
|
|
|
|
if _srvs == 'error':
|
|
|
|
raise NoOptionError(option, section)
|
|
|
|
return _srvs
|
|
|
|
elif option in ('memcache_max_connections',
|
|
|
|
'max_connections'):
|
|
|
|
if _maxc == 'error':
|
|
|
|
raise NoOptionError(option, section)
|
|
|
|
return _maxc
|
|
|
|
elif option == 'item_size_warning_threshold':
|
|
|
|
if _warn_threshold == 'error':
|
|
|
|
raise NoOptionError(option, section)
|
|
|
|
return _warn_threshold
|
|
|
|
else:
|
|
|
|
raise NoOptionError(option, section)
|
|
|
|
else:
|
|
|
|
raise NoSectionError(option)
|
|
|
|
|
|
|
|
return SetConfigParser
|
|
|
|
|
|
|
|
|
|
|
|
def start_response(*args):
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
class TestLoadMemcache(unittest.TestCase):
|
|
|
|
def setUp(self):
|
|
|
|
self.logger = debug_logger()
|
|
|
|
|
|
|
|
def test_conf_default_read(self):
|
|
|
|
with mock.patch.object(memcached, 'ConfigParser', ExcConfigParser):
|
|
|
|
for d in ({},
|
|
|
|
{'memcache_servers': '6.7.8.9:10'},
|
|
|
|
{'memcache_max_connections': '30'},
|
|
|
|
{'item_size_warning_threshold': 75},
|
|
|
|
{'memcache_servers': '6.7.8.9:10',
|
|
|
|
'item_size_warning_threshold': '75'},
|
|
|
|
{'item_size_warning_threshold': '75',
|
|
|
|
'memcache_max_connections': '30'},
|
|
|
|
):
|
|
|
|
with self.assertRaises(Exception) as catcher:
|
|
|
|
memcached.load_memcache(d, self.logger)
|
|
|
|
self.assertEqual(
|
|
|
|
str(catcher.exception),
|
|
|
|
"read called with '/etc/swift/memcache.conf'")
|
|
|
|
|
|
|
|
def test_conf_set_no_read(self):
|
|
|
|
with mock.patch.object(memcached, 'ConfigParser', ExcConfigParser):
|
|
|
|
exc = None
|
|
|
|
try:
|
|
|
|
memcached.load_memcache({
|
|
|
|
'memcache_servers': '1.2.3.4:5',
|
|
|
|
'memcache_max_connections': '30',
|
|
|
|
'item_size_warning_threshold': '80'
|
|
|
|
|
|
|
|
}, self.logger)
|
|
|
|
except Exception as err:
|
|
|
|
exc = err
|
|
|
|
self.assertIsNone(exc)
|
|
|
|
|
|
|
|
def test_conf_default(self):
|
|
|
|
with mock.patch.object(memcached, 'ConfigParser', EmptyConfigParser):
|
|
|
|
memcache = memcached.load_memcache({}, self.logger)
|
|
|
|
self.assertEqual(memcache.memcache_servers, ['127.0.0.1:11211'])
|
|
|
|
self.assertEqual(
|
|
|
|
memcache._client_cache['127.0.0.1:11211'].max_size, 2)
|
|
|
|
self.assertEqual(memcache.item_size_warning_threshold, -1)
|
|
|
|
|
|
|
|
def test_conf_inline(self):
|
|
|
|
with mock.patch.object(memcached, 'ConfigParser', get_config_parser()):
|
|
|
|
memcache = memcached.load_memcache({
|
|
|
|
'memcache_servers': '6.7.8.9:10',
|
|
|
|
'memcache_max_connections': '5',
|
|
|
|
'item_size_warning_threshold': '75'
|
|
|
|
}, self.logger)
|
|
|
|
self.assertEqual(memcache.memcache_servers, ['6.7.8.9:10'])
|
|
|
|
self.assertEqual(
|
|
|
|
memcache._client_cache['6.7.8.9:10'].max_size, 5)
|
|
|
|
self.assertEqual(memcache.item_size_warning_threshold, 75)
|
|
|
|
|
|
|
|
def test_conf_inline_ratelimiting(self):
|
|
|
|
with mock.patch.object(memcached, 'ConfigParser', get_config_parser()):
|
|
|
|
memcache = memcached.load_memcache({
|
|
|
|
'error_suppression_limit': '5',
|
|
|
|
'error_suppression_interval': '2.5',
|
|
|
|
}, self.logger)
|
|
|
|
self.assertEqual(memcache._error_limit_count, 5)
|
|
|
|
self.assertEqual(memcache._error_limit_time, 2.5)
|
|
|
|
self.assertEqual(memcache._error_limit_duration, 2.5)
|
|
|
|
|
|
|
|
def test_conf_inline_tls(self):
|
|
|
|
fake_context = mock.Mock()
|
|
|
|
with mock.patch.object(ssl, 'create_default_context',
|
|
|
|
return_value=fake_context):
|
|
|
|
with mock.patch.object(memcached, 'ConfigParser',
|
|
|
|
get_config_parser()):
|
|
|
|
memcached.load_memcache({
|
|
|
|
'tls_enabled': 'true',
|
|
|
|
'tls_cafile': 'cafile',
|
|
|
|
'tls_certfile': 'certfile',
|
|
|
|
'tls_keyfile': 'keyfile',
|
|
|
|
}, self.logger)
|
|
|
|
ssl.create_default_context.assert_called_with(cafile='cafile')
|
|
|
|
fake_context.load_cert_chain.assert_called_with('certfile',
|
|
|
|
'keyfile')
|
|
|
|
|
|
|
|
def test_conf_extra_no_section(self):
|
|
|
|
with mock.patch.object(memcached, 'ConfigParser',
|
|
|
|
get_config_parser(section='foobar')):
|
|
|
|
memcache = memcached.load_memcache({}, self.logger)
|
|
|
|
self.assertEqual(memcache.memcache_servers, ['127.0.0.1:11211'])
|
|
|
|
self.assertEqual(
|
|
|
|
memcache._client_cache['127.0.0.1:11211'].max_size, 2)
|
|
|
|
|
|
|
|
def test_conf_extra_no_option(self):
|
|
|
|
replacement_parser = get_config_parser(
|
|
|
|
memcache_servers='error',
|
|
|
|
memcache_max_connections='error')
|
|
|
|
with mock.patch.object(memcached, 'ConfigParser', replacement_parser):
|
|
|
|
memcache = memcached.load_memcache({}, self.logger)
|
|
|
|
self.assertEqual(memcache.memcache_servers, ['127.0.0.1:11211'])
|
|
|
|
self.assertEqual(
|
|
|
|
memcache._client_cache['127.0.0.1:11211'].max_size, 2)
|
|
|
|
|
|
|
|
def test_conf_inline_other_max_conn(self):
|
|
|
|
with mock.patch.object(memcached, 'ConfigParser', get_config_parser()):
|
|
|
|
memcache = memcached.load_memcache({
|
|
|
|
'memcache_servers': '6.7.8.9:10',
|
|
|
|
'max_connections': '5'
|
|
|
|
}, self.logger)
|
|
|
|
self.assertEqual(memcache.memcache_servers, ['6.7.8.9:10'])
|
|
|
|
self.assertEqual(
|
|
|
|
memcache._client_cache['6.7.8.9:10'].max_size, 5)
|
|
|
|
|
|
|
|
def test_conf_inline_bad_max_conn(self):
|
|
|
|
with mock.patch.object(memcached, 'ConfigParser', get_config_parser()):
|
|
|
|
memcache = memcached.load_memcache({
|
|
|
|
'memcache_servers': '6.7.8.9:10',
|
|
|
|
'max_connections': 'bad42',
|
|
|
|
}, self.logger)
|
|
|
|
self.assertEqual(memcache.memcache_servers, ['6.7.8.9:10'])
|
|
|
|
self.assertEqual(
|
|
|
|
memcache._client_cache['6.7.8.9:10'].max_size, 4)
|
|
|
|
|
|
|
|
def test_conf_inline_bad_item_warning_threshold(self):
|
|
|
|
with mock.patch.object(memcached, 'ConfigParser', get_config_parser()):
|
|
|
|
with self.assertRaises(ValueError) as err:
|
|
|
|
memcached.load_memcache({
|
|
|
|
'memcache_servers': '6.7.8.9:10',
|
|
|
|
'item_size_warning_threshold': 'bad42',
|
|
|
|
}, self.logger)
|
|
|
|
self.assertIn('invalid literal for int() with base 10:',
|
|
|
|
str(err.exception))
|
|
|
|
|
|
|
|
def test_conf_from_extra_conf(self):
|
|
|
|
with mock.patch.object(memcached, 'ConfigParser', get_config_parser()):
|
|
|
|
memcache = memcached.load_memcache({}, self.logger)
|
|
|
|
self.assertEqual(memcache.memcache_servers, ['1.2.3.4:5'])
|
|
|
|
self.assertEqual(
|
|
|
|
memcache._client_cache['1.2.3.4:5'].max_size, 4)
|
|
|
|
|
|
|
|
def test_conf_from_extra_conf_bad_max_conn(self):
|
|
|
|
with mock.patch.object(memcached, 'ConfigParser', get_config_parser(
|
|
|
|
memcache_max_connections='bad42')):
|
|
|
|
memcache = memcached.load_memcache({}, self.logger)
|
|
|
|
self.assertEqual(memcache.memcache_servers, ['1.2.3.4:5'])
|
|
|
|
self.assertEqual(
|
|
|
|
memcache._client_cache['1.2.3.4:5'].max_size, 2)
|
|
|
|
|
|
|
|
def test_conf_from_inline_and_maxc_from_extra_conf(self):
|
|
|
|
with mock.patch.object(memcached, 'ConfigParser', get_config_parser()):
|
|
|
|
memcache = memcached.load_memcache({
|
|
|
|
'memcache_servers': '6.7.8.9:10'}, self.logger)
|
|
|
|
self.assertEqual(memcache.memcache_servers, ['6.7.8.9:10'])
|
|
|
|
self.assertEqual(
|
|
|
|
memcache._client_cache['6.7.8.9:10'].max_size, 4)
|
|
|
|
|
|
|
|
def test_conf_from_inline_and_sers_from_extra_conf(self):
|
|
|
|
with mock.patch.object(memcached, 'ConfigParser', get_config_parser()):
|
|
|
|
memcache = memcached.load_memcache({
|
|
|
|
'memcache_servers': '6.7.8.9:10',
|
|
|
|
'memcache_max_connections': '42',
|
|
|
|
}, self.logger)
|
|
|
|
self.assertEqual(memcache.memcache_servers, ['6.7.8.9:10'])
|
|
|
|
self.assertEqual(
|
|
|
|
memcache._client_cache['6.7.8.9:10'].max_size, 42)
|
|
|
|
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
if __name__ == '__main__':
|
|
|
|
unittest.main()
|