2013-09-01 02:36:58 +00:00
|
|
|
# -*- coding:utf-8 -*-
|
2013-09-19 17:00:54 +00:00
|
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
|
2010-07-12 22:03:45 +00:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
|
|
# implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
2013-09-01 02:36:58 +00:00
|
|
|
"""Tests for swift.common.utils"""
|
2010-07-12 22:03:45 +00:00
|
|
|
|
2013-10-08 04:28:48 +00:00
|
|
|
from collections import defaultdict
|
Socket errors don't warrant tracebacks when talking to memcached
Currently, timeouts when talking to memcached cause log lines like
ERROR:root:Timeout connecting to memcached: 192.168.23.62:11211
Meanwhile, socket errors (which you'd expect to be about as common
as timeouts) cause log lines like
ERROR:root:Error talking to memcached: 192.168.23.70:11211
Traceback (most recent call last):
File "/usr/lib/pymodules/python2.7/swift/common/memcached.py", line 293, in set
(key, flags, timeout, len(value), value))
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 390, in sendall
tail = self.send(data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 384, in send
return self._send_loop(self.fd.send, data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 371, in _send_loop
return send_method(data, *args)
error: [Errno 32] Broken pipe
... or ...
ERROR:root:Error connecting to memcached: 192.168.23.73:11211
Traceback (most recent call last):
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 239, in _get_conns
fp, sock = self._client_cache[server].get()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 135, in get
fp, sock = self.create()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 128, in create
sock.connect((host, int(port)))
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 237, in connect
while not socket_connect(fd, address):
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 39, in socket_connect
raise socket.error(err, errno.errorcode[err])
error: [Errno 101] ENETUNREACH
... which seems excessive. Now, socket errors are logged as normal
errors, without the tracebacks.
Change-Id: I71a2c4786c0406dbc43b829cad5a0c7e2c45de21
2017-08-22 05:31:59 +00:00
|
|
|
import errno
|
2016-06-23 19:22:02 +00:00
|
|
|
from hashlib import md5
|
2019-05-30 01:14:17 +00:00
|
|
|
import io
|
2015-09-03 02:19:05 +00:00
|
|
|
import six
|
2010-07-12 22:03:45 +00:00
|
|
|
import socket
|
|
|
|
import time
|
|
|
|
import unittest
|
2012-01-12 22:30:32 +00:00
|
|
|
from uuid import uuid4
|
Socket errors don't warrant tracebacks when talking to memcached
Currently, timeouts when talking to memcached cause log lines like
ERROR:root:Timeout connecting to memcached: 192.168.23.62:11211
Meanwhile, socket errors (which you'd expect to be about as common
as timeouts) cause log lines like
ERROR:root:Error talking to memcached: 192.168.23.70:11211
Traceback (most recent call last):
File "/usr/lib/pymodules/python2.7/swift/common/memcached.py", line 293, in set
(key, flags, timeout, len(value), value))
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 390, in sendall
tail = self.send(data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 384, in send
return self._send_loop(self.fd.send, data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 371, in _send_loop
return send_method(data, *args)
error: [Errno 32] Broken pipe
... or ...
ERROR:root:Error connecting to memcached: 192.168.23.73:11211
Traceback (most recent call last):
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 239, in _get_conns
fp, sock = self._client_cache[server].get()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 135, in get
fp, sock = self.create()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 128, in create
sock.connect((host, int(port)))
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 237, in connect
while not socket_connect(fd, address):
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 39, in socket_connect
raise socket.error(err, errno.errorcode[err])
error: [Errno 101] ENETUNREACH
... which seems excessive. Now, socket errors are logged as normal
errors, without the tracebacks.
Change-Id: I71a2c4786c0406dbc43b829cad5a0c7e2c45de21
2017-08-22 05:31:59 +00:00
|
|
|
import os
|
|
|
|
|
|
|
|
import mock
|
2010-07-12 22:03:45 +00:00
|
|
|
|
2013-09-04 22:20:44 +00:00
|
|
|
from eventlet import GreenPool, sleep, Queue
|
|
|
|
from eventlet.pools import Pool
|
|
|
|
|
2010-07-12 22:03:45 +00:00
|
|
|
from swift.common import memcached
|
2013-10-08 04:28:48 +00:00
|
|
|
from mock import patch, MagicMock
|
Socket errors don't warrant tracebacks when talking to memcached
Currently, timeouts when talking to memcached cause log lines like
ERROR:root:Timeout connecting to memcached: 192.168.23.62:11211
Meanwhile, socket errors (which you'd expect to be about as common
as timeouts) cause log lines like
ERROR:root:Error talking to memcached: 192.168.23.70:11211
Traceback (most recent call last):
File "/usr/lib/pymodules/python2.7/swift/common/memcached.py", line 293, in set
(key, flags, timeout, len(value), value))
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 390, in sendall
tail = self.send(data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 384, in send
return self._send_loop(self.fd.send, data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 371, in _send_loop
return send_method(data, *args)
error: [Errno 32] Broken pipe
... or ...
ERROR:root:Error connecting to memcached: 192.168.23.73:11211
Traceback (most recent call last):
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 239, in _get_conns
fp, sock = self._client_cache[server].get()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 135, in get
fp, sock = self.create()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 128, in create
sock.connect((host, int(port)))
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 237, in connect
while not socket_connect(fd, address):
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 39, in socket_connect
raise socket.error(err, errno.errorcode[err])
error: [Errno 101] ENETUNREACH
... which seems excessive. Now, socket errors are logged as normal
errors, without the tracebacks.
Change-Id: I71a2c4786c0406dbc43b829cad5a0c7e2c45de21
2017-08-22 05:31:59 +00:00
|
|
|
from test.unit import debug_logger
|
2010-07-12 22:03:45 +00:00
|
|
|
|
|
|
|
|
2013-09-04 22:20:44 +00:00
|
|
|
class MockedMemcachePool(memcached.MemcacheConnPool):
|
|
|
|
def __init__(self, mocks):
|
|
|
|
Pool.__init__(self, max_size=2)
|
|
|
|
self.mocks = mocks
|
2013-11-14 23:58:52 +00:00
|
|
|
# setting this for the eventlet workaround in the MemcacheConnPool
|
|
|
|
self._parent_class_getter = super(memcached.MemcacheConnPool, self).get
|
2013-09-04 22:20:44 +00:00
|
|
|
|
|
|
|
def create(self):
|
|
|
|
return self.mocks.pop(0)
|
|
|
|
|
|
|
|
|
2010-07-12 22:03:45 +00:00
|
|
|
class ExplodingMockMemcached(object):
|
|
|
|
exploded = False
|
2012-09-04 15:01:02 +00:00
|
|
|
|
2010-07-12 22:03:45 +00:00
|
|
|
def sendall(self, string):
|
|
|
|
self.exploded = True
|
Socket errors don't warrant tracebacks when talking to memcached
Currently, timeouts when talking to memcached cause log lines like
ERROR:root:Timeout connecting to memcached: 192.168.23.62:11211
Meanwhile, socket errors (which you'd expect to be about as common
as timeouts) cause log lines like
ERROR:root:Error talking to memcached: 192.168.23.70:11211
Traceback (most recent call last):
File "/usr/lib/pymodules/python2.7/swift/common/memcached.py", line 293, in set
(key, flags, timeout, len(value), value))
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 390, in sendall
tail = self.send(data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 384, in send
return self._send_loop(self.fd.send, data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 371, in _send_loop
return send_method(data, *args)
error: [Errno 32] Broken pipe
... or ...
ERROR:root:Error connecting to memcached: 192.168.23.73:11211
Traceback (most recent call last):
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 239, in _get_conns
fp, sock = self._client_cache[server].get()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 135, in get
fp, sock = self.create()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 128, in create
sock.connect((host, int(port)))
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 237, in connect
while not socket_connect(fd, address):
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 39, in socket_connect
raise socket.error(err, errno.errorcode[err])
error: [Errno 101] ENETUNREACH
... which seems excessive. Now, socket errors are logged as normal
errors, without the tracebacks.
Change-Id: I71a2c4786c0406dbc43b829cad5a0c7e2c45de21
2017-08-22 05:31:59 +00:00
|
|
|
raise socket.error(errno.EPIPE, os.strerror(errno.EPIPE))
|
2012-09-04 15:01:02 +00:00
|
|
|
|
2010-07-12 22:03:45 +00:00
|
|
|
def readline(self):
|
|
|
|
self.exploded = True
|
Socket errors don't warrant tracebacks when talking to memcached
Currently, timeouts when talking to memcached cause log lines like
ERROR:root:Timeout connecting to memcached: 192.168.23.62:11211
Meanwhile, socket errors (which you'd expect to be about as common
as timeouts) cause log lines like
ERROR:root:Error talking to memcached: 192.168.23.70:11211
Traceback (most recent call last):
File "/usr/lib/pymodules/python2.7/swift/common/memcached.py", line 293, in set
(key, flags, timeout, len(value), value))
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 390, in sendall
tail = self.send(data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 384, in send
return self._send_loop(self.fd.send, data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 371, in _send_loop
return send_method(data, *args)
error: [Errno 32] Broken pipe
... or ...
ERROR:root:Error connecting to memcached: 192.168.23.73:11211
Traceback (most recent call last):
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 239, in _get_conns
fp, sock = self._client_cache[server].get()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 135, in get
fp, sock = self.create()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 128, in create
sock.connect((host, int(port)))
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 237, in connect
while not socket_connect(fd, address):
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 39, in socket_connect
raise socket.error(err, errno.errorcode[err])
error: [Errno 101] ENETUNREACH
... which seems excessive. Now, socket errors are logged as normal
errors, without the tracebacks.
Change-Id: I71a2c4786c0406dbc43b829cad5a0c7e2c45de21
2017-08-22 05:31:59 +00:00
|
|
|
raise socket.error(errno.EPIPE, os.strerror(errno.EPIPE))
|
2012-09-04 15:01:02 +00:00
|
|
|
|
2010-07-12 22:03:45 +00:00
|
|
|
def read(self, size):
|
|
|
|
self.exploded = True
|
Socket errors don't warrant tracebacks when talking to memcached
Currently, timeouts when talking to memcached cause log lines like
ERROR:root:Timeout connecting to memcached: 192.168.23.62:11211
Meanwhile, socket errors (which you'd expect to be about as common
as timeouts) cause log lines like
ERROR:root:Error talking to memcached: 192.168.23.70:11211
Traceback (most recent call last):
File "/usr/lib/pymodules/python2.7/swift/common/memcached.py", line 293, in set
(key, flags, timeout, len(value), value))
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 390, in sendall
tail = self.send(data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 384, in send
return self._send_loop(self.fd.send, data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 371, in _send_loop
return send_method(data, *args)
error: [Errno 32] Broken pipe
... or ...
ERROR:root:Error connecting to memcached: 192.168.23.73:11211
Traceback (most recent call last):
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 239, in _get_conns
fp, sock = self._client_cache[server].get()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 135, in get
fp, sock = self.create()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 128, in create
sock.connect((host, int(port)))
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 237, in connect
while not socket_connect(fd, address):
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 39, in socket_connect
raise socket.error(err, errno.errorcode[err])
error: [Errno 101] ENETUNREACH
... which seems excessive. Now, socket errors are logged as normal
errors, without the tracebacks.
Change-Id: I71a2c4786c0406dbc43b829cad5a0c7e2c45de21
2017-08-22 05:31:59 +00:00
|
|
|
raise socket.error(errno.EPIPE, os.strerror(errno.EPIPE))
|
2010-07-12 22:03:45 +00:00
|
|
|
|
2013-05-07 16:40:53 +00:00
|
|
|
def close(self):
|
|
|
|
pass
|
2012-09-04 15:01:02 +00:00
|
|
|
|
2013-09-01 02:36:58 +00:00
|
|
|
|
2010-07-12 22:03:45 +00:00
|
|
|
class MockMemcached(object):
|
2016-06-23 17:46:27 +00:00
|
|
|
# See https://github.com/memcached/memcached/blob/master/doc/protocol.txt
|
|
|
|
# In particular, the "Storage commands" section may be interesting.
|
2012-09-04 15:01:02 +00:00
|
|
|
|
2010-07-12 22:03:45 +00:00
|
|
|
def __init__(self):
|
2018-02-01 22:30:32 +00:00
|
|
|
self.inbuf = b''
|
|
|
|
self.outbuf = b''
|
2010-07-12 22:03:45 +00:00
|
|
|
self.cache = {}
|
|
|
|
self.down = False
|
|
|
|
self.exc_on_delete = False
|
2011-02-23 19:44:36 +00:00
|
|
|
self.read_return_none = False
|
2015-09-03 02:19:05 +00:00
|
|
|
self.read_return_empty_str = False
|
2013-05-07 16:40:53 +00:00
|
|
|
self.close_called = False
|
2010-07-12 22:03:45 +00:00
|
|
|
|
|
|
|
def sendall(self, string):
|
|
|
|
if self.down:
|
|
|
|
raise Exception('mock is down')
|
|
|
|
self.inbuf += string
|
2018-02-01 22:30:32 +00:00
|
|
|
while b'\n' in self.inbuf:
|
|
|
|
cmd, self.inbuf = self.inbuf.split(b'\n', 1)
|
2010-07-12 22:03:45 +00:00
|
|
|
parts = cmd.split()
|
2018-02-01 22:30:32 +00:00
|
|
|
cmd_name = parts[0].decode('ascii').lower()
|
|
|
|
handler = getattr(self, 'handle_%s' % cmd_name, None)
|
2016-06-23 17:46:27 +00:00
|
|
|
if handler:
|
|
|
|
handler(*parts[1:])
|
|
|
|
else:
|
|
|
|
raise ValueError('Unhandled command: %s' % parts[0])
|
|
|
|
|
2018-02-01 22:30:32 +00:00
|
|
|
def handle_set(self, key, flags, exptime, num_bytes, noreply=b''):
|
2016-06-23 17:46:27 +00:00
|
|
|
self.cache[key] = flags, exptime, self.inbuf[:int(num_bytes)]
|
|
|
|
self.inbuf = self.inbuf[int(num_bytes) + 2:]
|
2018-02-01 22:30:32 +00:00
|
|
|
if noreply != b'noreply':
|
|
|
|
self.outbuf += b'STORED\r\n'
|
2016-06-23 17:46:27 +00:00
|
|
|
|
2018-02-01 22:30:32 +00:00
|
|
|
def handle_add(self, key, flags, exptime, num_bytes, noreply=b''):
|
2016-06-23 17:46:27 +00:00
|
|
|
value = self.inbuf[:int(num_bytes)]
|
|
|
|
self.inbuf = self.inbuf[int(num_bytes) + 2:]
|
|
|
|
if key in self.cache:
|
2018-02-01 22:30:32 +00:00
|
|
|
if noreply != b'noreply':
|
|
|
|
self.outbuf += b'NOT_STORED\r\n'
|
2016-06-23 17:46:27 +00:00
|
|
|
else:
|
|
|
|
self.cache[key] = flags, exptime, value
|
2018-02-01 22:30:32 +00:00
|
|
|
if noreply != b'noreply':
|
|
|
|
self.outbuf += b'STORED\r\n'
|
2016-06-23 17:46:27 +00:00
|
|
|
|
2018-02-01 22:30:32 +00:00
|
|
|
def handle_delete(self, key, noreply=b''):
|
2016-06-23 17:46:27 +00:00
|
|
|
if self.exc_on_delete:
|
|
|
|
raise Exception('mock is has exc_on_delete set')
|
|
|
|
if key in self.cache:
|
|
|
|
del self.cache[key]
|
2018-02-01 22:30:32 +00:00
|
|
|
if noreply != b'noreply':
|
|
|
|
self.outbuf += b'DELETED\r\n'
|
|
|
|
elif noreply != b'noreply':
|
|
|
|
self.outbuf += b'NOT_FOUND\r\n'
|
2016-06-23 17:46:27 +00:00
|
|
|
|
|
|
|
def handle_get(self, *keys):
|
|
|
|
for key in keys:
|
|
|
|
if key in self.cache:
|
|
|
|
val = self.cache[key]
|
2018-02-01 22:30:32 +00:00
|
|
|
self.outbuf += b' '.join([
|
|
|
|
b'VALUE',
|
|
|
|
key,
|
|
|
|
val[0],
|
|
|
|
str(len(val[2])).encode('ascii')
|
|
|
|
]) + b'\r\n'
|
|
|
|
self.outbuf += val[2] + b'\r\n'
|
|
|
|
self.outbuf += b'END\r\n'
|
|
|
|
|
|
|
|
def handle_incr(self, key, value, noreply=b''):
|
2016-06-23 17:46:27 +00:00
|
|
|
if key in self.cache:
|
|
|
|
current = self.cache[key][2]
|
2018-02-01 22:30:32 +00:00
|
|
|
new_val = str(int(current) + int(value)).encode('ascii')
|
2016-06-23 17:46:27 +00:00
|
|
|
self.cache[key] = self.cache[key][:2] + (new_val, )
|
2018-02-01 22:30:32 +00:00
|
|
|
self.outbuf += new_val + b'\r\n'
|
2016-06-23 17:46:27 +00:00
|
|
|
else:
|
2018-02-01 22:30:32 +00:00
|
|
|
self.outbuf += b'NOT_FOUND\r\n'
|
2016-06-23 17:46:27 +00:00
|
|
|
|
2018-02-01 22:30:32 +00:00
|
|
|
def handle_decr(self, key, value, noreply=b''):
|
2016-06-23 17:46:27 +00:00
|
|
|
if key in self.cache:
|
|
|
|
current = self.cache[key][2]
|
2018-02-01 22:30:32 +00:00
|
|
|
new_val = str(int(current) - int(value)).encode('ascii')
|
|
|
|
if new_val[:1] == b'-': # ie, val is negative
|
|
|
|
new_val = b'0'
|
2016-06-23 17:46:27 +00:00
|
|
|
self.cache[key] = self.cache[key][:2] + (new_val, )
|
2018-02-01 22:30:32 +00:00
|
|
|
self.outbuf += new_val + b'\r\n'
|
2016-06-23 17:46:27 +00:00
|
|
|
else:
|
2018-02-01 22:30:32 +00:00
|
|
|
self.outbuf += b'NOT_FOUND\r\n'
|
2012-09-04 15:01:02 +00:00
|
|
|
|
2010-07-12 22:03:45 +00:00
|
|
|
def readline(self):
|
2015-09-03 02:19:05 +00:00
|
|
|
if self.read_return_empty_str:
|
2018-02-01 22:30:32 +00:00
|
|
|
return b''
|
2011-02-23 19:44:36 +00:00
|
|
|
if self.read_return_none:
|
|
|
|
return None
|
2010-07-12 22:03:45 +00:00
|
|
|
if self.down:
|
|
|
|
raise Exception('mock is down')
|
2018-02-01 22:30:32 +00:00
|
|
|
if b'\n' in self.outbuf:
|
|
|
|
response, self.outbuf = self.outbuf.split(b'\n', 1)
|
|
|
|
return response + b'\n'
|
2012-09-04 15:01:02 +00:00
|
|
|
|
2010-07-12 22:03:45 +00:00
|
|
|
def read(self, size):
|
|
|
|
if self.down:
|
|
|
|
raise Exception('mock is down')
|
|
|
|
if len(self.outbuf) >= size:
|
|
|
|
response = self.outbuf[:size]
|
|
|
|
self.outbuf = self.outbuf[size:]
|
|
|
|
return response
|
|
|
|
|
2013-05-07 16:40:53 +00:00
|
|
|
def close(self):
|
|
|
|
self.close_called = True
|
|
|
|
pass
|
|
|
|
|
2012-09-04 15:01:02 +00:00
|
|
|
|
2010-07-12 22:03:45 +00:00
|
|
|
class TestMemcached(unittest.TestCase):
|
2013-09-01 02:36:58 +00:00
|
|
|
"""Tests for swift.common.memcached"""
|
2010-07-12 22:03:45 +00:00
|
|
|
|
Socket errors don't warrant tracebacks when talking to memcached
Currently, timeouts when talking to memcached cause log lines like
ERROR:root:Timeout connecting to memcached: 192.168.23.62:11211
Meanwhile, socket errors (which you'd expect to be about as common
as timeouts) cause log lines like
ERROR:root:Error talking to memcached: 192.168.23.70:11211
Traceback (most recent call last):
File "/usr/lib/pymodules/python2.7/swift/common/memcached.py", line 293, in set
(key, flags, timeout, len(value), value))
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 390, in sendall
tail = self.send(data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 384, in send
return self._send_loop(self.fd.send, data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 371, in _send_loop
return send_method(data, *args)
error: [Errno 32] Broken pipe
... or ...
ERROR:root:Error connecting to memcached: 192.168.23.73:11211
Traceback (most recent call last):
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 239, in _get_conns
fp, sock = self._client_cache[server].get()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 135, in get
fp, sock = self.create()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 128, in create
sock.connect((host, int(port)))
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 237, in connect
while not socket_connect(fd, address):
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 39, in socket_connect
raise socket.error(err, errno.errorcode[err])
error: [Errno 101] ENETUNREACH
... which seems excessive. Now, socket errors are logged as normal
errors, without the tracebacks.
Change-Id: I71a2c4786c0406dbc43b829cad5a0c7e2c45de21
2017-08-22 05:31:59 +00:00
|
|
|
def setUp(self):
|
|
|
|
self.logger = debug_logger()
|
|
|
|
patcher = mock.patch('swift.common.memcached.logging', self.logger)
|
|
|
|
self.addCleanup(patcher.stop)
|
|
|
|
patcher.start()
|
|
|
|
|
2010-07-12 22:03:45 +00:00
|
|
|
def test_get_conns(self):
|
|
|
|
sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
|
|
|
sock1.bind(('127.0.0.1', 0))
|
|
|
|
sock1.listen(1)
|
|
|
|
sock1ipport = '%s:%s' % sock1.getsockname()
|
|
|
|
sock2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
|
|
|
sock2.bind(('127.0.0.1', 0))
|
|
|
|
sock2.listen(1)
|
2012-01-12 22:30:32 +00:00
|
|
|
orig_port = memcached.DEFAULT_MEMCACHED_PORT
|
|
|
|
try:
|
|
|
|
sock2ip, memcached.DEFAULT_MEMCACHED_PORT = sock2.getsockname()
|
|
|
|
sock2ipport = '%s:%s' % (sock2ip, memcached.DEFAULT_MEMCACHED_PORT)
|
|
|
|
# We're deliberately using sock2ip (no port) here to test that the
|
|
|
|
# default port is used.
|
|
|
|
memcache_client = memcached.MemcacheRing([sock1ipport, sock2ip])
|
|
|
|
one = two = True
|
|
|
|
while one or two: # Run until we match hosts one and two
|
2018-02-01 22:30:32 +00:00
|
|
|
key = uuid4().hex.encode('ascii')
|
2012-01-12 22:30:32 +00:00
|
|
|
for conn in memcache_client._get_conns(key):
|
2019-05-30 01:14:17 +00:00
|
|
|
if 'b' not in getattr(conn[1], 'mode', ''):
|
|
|
|
self.assertIsInstance(conn[1], (
|
|
|
|
io.RawIOBase, io.BufferedIOBase))
|
2012-01-12 22:30:32 +00:00
|
|
|
peeripport = '%s:%s' % conn[2].getpeername()
|
2015-07-21 13:53:00 +00:00
|
|
|
self.assertTrue(peeripport in (sock1ipport, sock2ipport))
|
2012-01-12 22:30:32 +00:00
|
|
|
if peeripport == sock1ipport:
|
|
|
|
one = False
|
|
|
|
if peeripport == sock2ipport:
|
|
|
|
two = False
|
Fix IPv6 handling in MemcacheConnPool.
The patch removes the assumption of IPv4-only addresses in the
MemcacheConnPool. The changes are around address handling.
Namely, if a server is specified with an address
[<address>]:port (port is optional), it is assumed to be an IPv6
address [1]. If an IPv6 address is specified without "[]", an exception
is raised as it is impossible to parse such addresses correctly.
For testing, memcache can be configured to listen on the link-local,
unique-local, or ::1 (equivalent to 127.0.0.1) addresses. Link-local
addresses are assigned by default to each interface and are of the form
"fe80::dead:beef". These addresses require a scope ID, which would look
like "fe80::dead:beef%eth0" (replacing eth0 with the correct interface).
Unique-local addresses are any addresses in the fc00::/7 subnet. To add
a ULA to an interface use the "ip" utility. For example:
"ip -6 address add fc01::dead:beef dev eth0". Lastly, and probably
simplest, memcache can be configured to listen on "::1". The same
address would be used in the swift configuration, e.g. "[::1]:11211".
Note: only memcached version 1.4.25 or greater supports binding to an
IPv6 address.
Fixes #1526570
[1] IPv6 host literals:
https://tools.ietf.org/html/rfc3986#section-3.2.2
Change-Id: I8408143c1d47d24e70df56a08167c529825276a2
2015-12-16 20:07:27 +00:00
|
|
|
self.assertEqual(len(memcache_client._errors[sock1ipport]), 0)
|
|
|
|
self.assertEqual(len(memcache_client._errors[sock2ip]), 0)
|
2012-01-12 22:30:32 +00:00
|
|
|
finally:
|
|
|
|
memcached.DEFAULT_MEMCACHED_PORT = orig_port
|
2010-07-12 22:03:45 +00:00
|
|
|
|
Fix IPv6 handling in MemcacheConnPool.
The patch removes the assumption of IPv4-only addresses in the
MemcacheConnPool. The changes are around address handling.
Namely, if a server is specified with an address
[<address>]:port (port is optional), it is assumed to be an IPv6
address [1]. If an IPv6 address is specified without "[]", an exception
is raised as it is impossible to parse such addresses correctly.
For testing, memcache can be configured to listen on the link-local,
unique-local, or ::1 (equivalent to 127.0.0.1) addresses. Link-local
addresses are assigned by default to each interface and are of the form
"fe80::dead:beef". These addresses require a scope ID, which would look
like "fe80::dead:beef%eth0" (replacing eth0 with the correct interface).
Unique-local addresses are any addresses in the fc00::/7 subnet. To add
a ULA to an interface use the "ip" utility. For example:
"ip -6 address add fc01::dead:beef dev eth0". Lastly, and probably
simplest, memcache can be configured to listen on "::1". The same
address would be used in the swift configuration, e.g. "[::1]:11211".
Note: only memcached version 1.4.25 or greater supports binding to an
IPv6 address.
Fixes #1526570
[1] IPv6 host literals:
https://tools.ietf.org/html/rfc3986#section-3.2.2
Change-Id: I8408143c1d47d24e70df56a08167c529825276a2
2015-12-16 20:07:27 +00:00
|
|
|
def test_get_conns_v6(self):
|
|
|
|
if not socket.has_ipv6:
|
|
|
|
return
|
|
|
|
try:
|
|
|
|
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
|
|
|
|
sock.bind(('::1', 0, 0, 0))
|
|
|
|
sock.listen(1)
|
|
|
|
sock_addr = sock.getsockname()
|
|
|
|
server_socket = '[%s]:%s' % (sock_addr[0], sock_addr[1])
|
|
|
|
memcache_client = memcached.MemcacheRing([server_socket])
|
2018-02-01 22:30:32 +00:00
|
|
|
key = uuid4().hex.encode('ascii')
|
Fix IPv6 handling in MemcacheConnPool.
The patch removes the assumption of IPv4-only addresses in the
MemcacheConnPool. The changes are around address handling.
Namely, if a server is specified with an address
[<address>]:port (port is optional), it is assumed to be an IPv6
address [1]. If an IPv6 address is specified without "[]", an exception
is raised as it is impossible to parse such addresses correctly.
For testing, memcache can be configured to listen on the link-local,
unique-local, or ::1 (equivalent to 127.0.0.1) addresses. Link-local
addresses are assigned by default to each interface and are of the form
"fe80::dead:beef". These addresses require a scope ID, which would look
like "fe80::dead:beef%eth0" (replacing eth0 with the correct interface).
Unique-local addresses are any addresses in the fc00::/7 subnet. To add
a ULA to an interface use the "ip" utility. For example:
"ip -6 address add fc01::dead:beef dev eth0". Lastly, and probably
simplest, memcache can be configured to listen on "::1". The same
address would be used in the swift configuration, e.g. "[::1]:11211".
Note: only memcached version 1.4.25 or greater supports binding to an
IPv6 address.
Fixes #1526570
[1] IPv6 host literals:
https://tools.ietf.org/html/rfc3986#section-3.2.2
Change-Id: I8408143c1d47d24e70df56a08167c529825276a2
2015-12-16 20:07:27 +00:00
|
|
|
for conn in memcache_client._get_conns(key):
|
|
|
|
peer_sockaddr = conn[2].getpeername()
|
|
|
|
peer_socket = '[%s]:%s' % (peer_sockaddr[0], peer_sockaddr[1])
|
|
|
|
self.assertEqual(peer_socket, server_socket)
|
|
|
|
self.assertEqual(len(memcache_client._errors[server_socket]), 0)
|
|
|
|
finally:
|
|
|
|
sock.close()
|
|
|
|
|
|
|
|
def test_get_conns_v6_default(self):
|
|
|
|
if not socket.has_ipv6:
|
|
|
|
return
|
|
|
|
try:
|
|
|
|
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
|
|
|
|
sock.bind(('::1', 0))
|
|
|
|
sock.listen(1)
|
|
|
|
sock_addr = sock.getsockname()
|
|
|
|
server_socket = '[%s]:%s' % (sock_addr[0], sock_addr[1])
|
|
|
|
server_host = '[%s]' % sock_addr[0]
|
|
|
|
memcached.DEFAULT_MEMCACHED_PORT = sock_addr[1]
|
|
|
|
memcache_client = memcached.MemcacheRing([server_host])
|
2018-02-01 22:30:32 +00:00
|
|
|
key = uuid4().hex.encode('ascii')
|
Fix IPv6 handling in MemcacheConnPool.
The patch removes the assumption of IPv4-only addresses in the
MemcacheConnPool. The changes are around address handling.
Namely, if a server is specified with an address
[<address>]:port (port is optional), it is assumed to be an IPv6
address [1]. If an IPv6 address is specified without "[]", an exception
is raised as it is impossible to parse such addresses correctly.
For testing, memcache can be configured to listen on the link-local,
unique-local, or ::1 (equivalent to 127.0.0.1) addresses. Link-local
addresses are assigned by default to each interface and are of the form
"fe80::dead:beef". These addresses require a scope ID, which would look
like "fe80::dead:beef%eth0" (replacing eth0 with the correct interface).
Unique-local addresses are any addresses in the fc00::/7 subnet. To add
a ULA to an interface use the "ip" utility. For example:
"ip -6 address add fc01::dead:beef dev eth0". Lastly, and probably
simplest, memcache can be configured to listen on "::1". The same
address would be used in the swift configuration, e.g. "[::1]:11211".
Note: only memcached version 1.4.25 or greater supports binding to an
IPv6 address.
Fixes #1526570
[1] IPv6 host literals:
https://tools.ietf.org/html/rfc3986#section-3.2.2
Change-Id: I8408143c1d47d24e70df56a08167c529825276a2
2015-12-16 20:07:27 +00:00
|
|
|
for conn in memcache_client._get_conns(key):
|
|
|
|
peer_sockaddr = conn[2].getpeername()
|
|
|
|
peer_socket = '[%s]:%s' % (peer_sockaddr[0], peer_sockaddr[1])
|
|
|
|
self.assertEqual(peer_socket, server_socket)
|
|
|
|
self.assertEqual(len(memcache_client._errors[server_host]), 0)
|
|
|
|
finally:
|
|
|
|
sock.close()
|
|
|
|
|
|
|
|
def test_get_conns_bad_v6(self):
|
2016-01-08 22:54:56 +00:00
|
|
|
with self.assertRaises(ValueError):
|
Fix IPv6 handling in MemcacheConnPool.
The patch removes the assumption of IPv4-only addresses in the
MemcacheConnPool. The changes are around address handling.
Namely, if a server is specified with an address
[<address>]:port (port is optional), it is assumed to be an IPv6
address [1]. If an IPv6 address is specified without "[]", an exception
is raised as it is impossible to parse such addresses correctly.
For testing, memcache can be configured to listen on the link-local,
unique-local, or ::1 (equivalent to 127.0.0.1) addresses. Link-local
addresses are assigned by default to each interface and are of the form
"fe80::dead:beef". These addresses require a scope ID, which would look
like "fe80::dead:beef%eth0" (replacing eth0 with the correct interface).
Unique-local addresses are any addresses in the fc00::/7 subnet. To add
a ULA to an interface use the "ip" utility. For example:
"ip -6 address add fc01::dead:beef dev eth0". Lastly, and probably
simplest, memcache can be configured to listen on "::1". The same
address would be used in the swift configuration, e.g. "[::1]:11211".
Note: only memcached version 1.4.25 or greater supports binding to an
IPv6 address.
Fixes #1526570
[1] IPv6 host literals:
https://tools.ietf.org/html/rfc3986#section-3.2.2
Change-Id: I8408143c1d47d24e70df56a08167c529825276a2
2015-12-16 20:07:27 +00:00
|
|
|
# IPv6 address with missing [] is invalid
|
2016-01-08 22:54:56 +00:00
|
|
|
server_socket = '%s:%s' % ('::1', 11211)
|
|
|
|
memcached.MemcacheRing([server_socket])
|
Fix IPv6 handling in MemcacheConnPool.
The patch removes the assumption of IPv4-only addresses in the
MemcacheConnPool. The changes are around address handling.
Namely, if a server is specified with an address
[<address>]:port (port is optional), it is assumed to be an IPv6
address [1]. If an IPv6 address is specified without "[]", an exception
is raised as it is impossible to parse such addresses correctly.
For testing, memcache can be configured to listen on the link-local,
unique-local, or ::1 (equivalent to 127.0.0.1) addresses. Link-local
addresses are assigned by default to each interface and are of the form
"fe80::dead:beef". These addresses require a scope ID, which would look
like "fe80::dead:beef%eth0" (replacing eth0 with the correct interface).
Unique-local addresses are any addresses in the fc00::/7 subnet. To add
a ULA to an interface use the "ip" utility. For example:
"ip -6 address add fc01::dead:beef dev eth0". Lastly, and probably
simplest, memcache can be configured to listen on "::1". The same
address would be used in the swift configuration, e.g. "[::1]:11211".
Note: only memcached version 1.4.25 or greater supports binding to an
IPv6 address.
Fixes #1526570
[1] IPv6 host literals:
https://tools.ietf.org/html/rfc3986#section-3.2.2
Change-Id: I8408143c1d47d24e70df56a08167c529825276a2
2015-12-16 20:07:27 +00:00
|
|
|
|
|
|
|
def test_get_conns_hostname(self):
|
|
|
|
with patch('swift.common.memcached.socket.getaddrinfo') as addrinfo:
|
|
|
|
try:
|
|
|
|
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
|
|
|
sock.bind(('127.0.0.1', 0))
|
|
|
|
sock.listen(1)
|
|
|
|
sock_addr = sock.getsockname()
|
|
|
|
fqdn = socket.getfqdn()
|
|
|
|
server_socket = '%s:%s' % (fqdn, sock_addr[1])
|
|
|
|
addrinfo.return_value = [(socket.AF_INET,
|
|
|
|
socket.SOCK_STREAM, 0, '',
|
|
|
|
('127.0.0.1', sock_addr[1]))]
|
|
|
|
memcache_client = memcached.MemcacheRing([server_socket])
|
2018-02-01 22:30:32 +00:00
|
|
|
key = uuid4().hex.encode('ascii')
|
Fix IPv6 handling in MemcacheConnPool.
The patch removes the assumption of IPv4-only addresses in the
MemcacheConnPool. The changes are around address handling.
Namely, if a server is specified with an address
[<address>]:port (port is optional), it is assumed to be an IPv6
address [1]. If an IPv6 address is specified without "[]", an exception
is raised as it is impossible to parse such addresses correctly.
For testing, memcache can be configured to listen on the link-local,
unique-local, or ::1 (equivalent to 127.0.0.1) addresses. Link-local
addresses are assigned by default to each interface and are of the form
"fe80::dead:beef". These addresses require a scope ID, which would look
like "fe80::dead:beef%eth0" (replacing eth0 with the correct interface).
Unique-local addresses are any addresses in the fc00::/7 subnet. To add
a ULA to an interface use the "ip" utility. For example:
"ip -6 address add fc01::dead:beef dev eth0". Lastly, and probably
simplest, memcache can be configured to listen on "::1". The same
address would be used in the swift configuration, e.g. "[::1]:11211".
Note: only memcached version 1.4.25 or greater supports binding to an
IPv6 address.
Fixes #1526570
[1] IPv6 host literals:
https://tools.ietf.org/html/rfc3986#section-3.2.2
Change-Id: I8408143c1d47d24e70df56a08167c529825276a2
2015-12-16 20:07:27 +00:00
|
|
|
for conn in memcache_client._get_conns(key):
|
|
|
|
peer_sockaddr = conn[2].getpeername()
|
|
|
|
peer_socket = '%s:%s' % (peer_sockaddr[0],
|
|
|
|
peer_sockaddr[1])
|
|
|
|
self.assertEqual(peer_socket,
|
|
|
|
'127.0.0.1:%d' % sock_addr[1])
|
|
|
|
self.assertEqual(len(memcache_client._errors[server_socket]),
|
|
|
|
0)
|
|
|
|
finally:
|
|
|
|
sock.close()
|
|
|
|
|
|
|
|
def test_get_conns_hostname6(self):
|
|
|
|
with patch('swift.common.memcached.socket.getaddrinfo') as addrinfo:
|
|
|
|
try:
|
|
|
|
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
|
|
|
|
sock.bind(('::1', 0))
|
|
|
|
sock.listen(1)
|
|
|
|
sock_addr = sock.getsockname()
|
|
|
|
fqdn = socket.getfqdn()
|
|
|
|
server_socket = '%s:%s' % (fqdn, sock_addr[1])
|
|
|
|
addrinfo.return_value = [(socket.AF_INET6,
|
|
|
|
socket.SOCK_STREAM, 0, '',
|
|
|
|
('::1', sock_addr[1]))]
|
|
|
|
memcache_client = memcached.MemcacheRing([server_socket])
|
2018-02-01 22:30:32 +00:00
|
|
|
key = uuid4().hex.encode('ascii')
|
Fix IPv6 handling in MemcacheConnPool.
The patch removes the assumption of IPv4-only addresses in the
MemcacheConnPool. The changes are around address handling.
Namely, if a server is specified with an address
[<address>]:port (port is optional), it is assumed to be an IPv6
address [1]. If an IPv6 address is specified without "[]", an exception
is raised as it is impossible to parse such addresses correctly.
For testing, memcache can be configured to listen on the link-local,
unique-local, or ::1 (equivalent to 127.0.0.1) addresses. Link-local
addresses are assigned by default to each interface and are of the form
"fe80::dead:beef". These addresses require a scope ID, which would look
like "fe80::dead:beef%eth0" (replacing eth0 with the correct interface).
Unique-local addresses are any addresses in the fc00::/7 subnet. To add
a ULA to an interface use the "ip" utility. For example:
"ip -6 address add fc01::dead:beef dev eth0". Lastly, and probably
simplest, memcache can be configured to listen on "::1". The same
address would be used in the swift configuration, e.g. "[::1]:11211".
Note: only memcached version 1.4.25 or greater supports binding to an
IPv6 address.
Fixes #1526570
[1] IPv6 host literals:
https://tools.ietf.org/html/rfc3986#section-3.2.2
Change-Id: I8408143c1d47d24e70df56a08167c529825276a2
2015-12-16 20:07:27 +00:00
|
|
|
for conn in memcache_client._get_conns(key):
|
|
|
|
peer_sockaddr = conn[2].getpeername()
|
|
|
|
peer_socket = '[%s]:%s' % (peer_sockaddr[0],
|
|
|
|
peer_sockaddr[1])
|
|
|
|
self.assertEqual(peer_socket, '[::1]:%d' % sock_addr[1])
|
|
|
|
self.assertEqual(len(memcache_client._errors[server_socket]),
|
|
|
|
0)
|
|
|
|
finally:
|
|
|
|
sock.close()
|
|
|
|
|
2016-06-23 19:22:02 +00:00
|
|
|
def test_set_get_json(self):
|
2010-07-12 22:03:45 +00:00
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'])
|
|
|
|
mock = MockMemcached()
|
2013-09-04 22:20:44 +00:00
|
|
|
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock, mock)] * 2)
|
2018-02-01 22:30:32 +00:00
|
|
|
cache_key = md5(b'some_key').hexdigest().encode('ascii')
|
2016-06-23 19:22:02 +00:00
|
|
|
|
2010-07-12 22:03:45 +00:00
|
|
|
memcache_client.set('some_key', [1, 2, 3])
|
2015-08-05 18:28:14 +00:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), [1, 2, 3])
|
2016-06-23 19:22:02 +00:00
|
|
|
# See JSON_FLAG
|
2018-02-01 22:30:32 +00:00
|
|
|
self.assertEqual(mock.cache, {cache_key: (b'2', b'0', b'[1, 2, 3]')})
|
2016-06-23 19:22:02 +00:00
|
|
|
|
2010-07-12 22:03:45 +00:00
|
|
|
memcache_client.set('some_key', [4, 5, 6])
|
2015-08-05 18:28:14 +00:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), [4, 5, 6])
|
2018-02-01 22:30:32 +00:00
|
|
|
self.assertEqual(mock.cache, {cache_key: (b'2', b'0', b'[4, 5, 6]')})
|
2016-06-23 19:22:02 +00:00
|
|
|
|
Do not use pickle for serialization in memcache, but JSON
We don't want to use pickle as it can execute arbitrary code. JSON is
safer. However, note that it supports serialization for only some
specific subset of object types; this should be enough for what we need,
though.
To avoid issues on upgrades (unability to read pickled values, and cache
poisoning for old servers not understanding JSON), we add a
memcache_serialization_support configuration option, with the following
values:
0 = older, insecure pickle serialization
1 = json serialization but pickles can still be read (still insecure)
2 = json serialization only (secure and the default)
To avoid an instant full cache flush, existing installations should
upgrade with 0, then set to 1 and reload, then after some time (24
hours) set to 2 and reload. Support for 0 and 1 will be removed in
future versions.
Part of bug 1006414.
Change-Id: Id7d6d547b103b4f23ebf5be98b88f09ec6027ce4
2012-06-21 12:37:41 +00:00
|
|
|
memcache_client.set('some_key', ['simple str', 'utf8 str éà'])
|
2012-09-04 15:01:02 +00:00
|
|
|
# As per http://wiki.openstack.org/encoding,
|
|
|
|
# we should expect to have unicode
|
2015-08-05 18:28:14 +00:00
|
|
|
self.assertEqual(
|
2012-09-04 15:01:02 +00:00
|
|
|
memcache_client.get('some_key'), ['simple str', u'utf8 str éà'])
|
2016-06-23 19:22:02 +00:00
|
|
|
self.assertEqual(mock.cache, {cache_key: (
|
2018-02-01 22:30:32 +00:00
|
|
|
b'2', b'0', b'["simple str", "utf8 str \\u00e9\\u00e0"]')})
|
2016-06-23 19:22:02 +00:00
|
|
|
|
Swift MemcacheRing (set) interface is incompatible fixes
This patch fixes the Swift MemcacheRing set and set_multi
interface incompatible problem with python memcache. The fix
added two extra named parameters to both set and set_multi
method. When only time or timeout parameter is present, then one
of the value will be used. When both time and timeout are present,
the time parameter will be used.
Named parameter min_compress_len is added for pure compatibility
purposes. The current implementation ignores this parameter.
To make swift memcached methods all consistent cross the board,
method incr and decr have also been changed to include a new
named parameter time.
In future OpenStack releases, the named parameter timeout will be
removed, keep the named parameter timeout around for now is
to make sure that mismatched releases between client and server
will still work.
From now on, when a call is made to set, set_multi, decr, incr
by using timeout parametner, a warning message will be logged to
indicate the deprecation of the parameter.
Fixes: bug #1095730
Change-Id: I07af784a54d7d79395fc3265e74145f92f38a893
2013-02-13 18:54:51 +00:00
|
|
|
memcache_client.set('some_key', [1, 2, 3], time=20)
|
2018-02-01 22:30:32 +00:00
|
|
|
self.assertEqual(mock.cache, {cache_key: (b'2', b'20', b'[1, 2, 3]')})
|
Swift MemcacheRing (set) interface is incompatible fixes
This patch fixes the Swift MemcacheRing set and set_multi
interface incompatible problem with python memcache. The fix
added two extra named parameters to both set and set_multi
method. When only time or timeout parameter is present, then one
of the value will be used. When both time and timeout are present,
the time parameter will be used.
Named parameter min_compress_len is added for pure compatibility
purposes. The current implementation ignores this parameter.
To make swift memcached methods all consistent cross the board,
method incr and decr have also been changed to include a new
named parameter time.
In future OpenStack releases, the named parameter timeout will be
removed, keep the named parameter timeout around for now is
to make sure that mismatched releases between client and server
will still work.
From now on, when a call is made to set, set_multi, decr, incr
by using timeout parametner, a warning message will be logged to
indicate the deprecation of the parameter.
Fixes: bug #1095730
Change-Id: I07af784a54d7d79395fc3265e74145f92f38a893
2013-02-13 18:54:51 +00:00
|
|
|
|
2012-11-16 05:09:14 +00:00
|
|
|
sixtydays = 60 * 24 * 60 * 60
|
|
|
|
esttimeout = time.time() + sixtydays
|
Swift MemcacheRing (set) interface is incompatible fixes
This patch fixes the Swift MemcacheRing set and set_multi
interface incompatible problem with python memcache. The fix
added two extra named parameters to both set and set_multi
method. When only time or timeout parameter is present, then one
of the value will be used. When both time and timeout are present,
the time parameter will be used.
Named parameter min_compress_len is added for pure compatibility
purposes. The current implementation ignores this parameter.
To make swift memcached methods all consistent cross the board,
method incr and decr have also been changed to include a new
named parameter time.
In future OpenStack releases, the named parameter timeout will be
removed, keep the named parameter timeout around for now is
to make sure that mismatched releases between client and server
will still work.
From now on, when a call is made to set, set_multi, decr, incr
by using timeout parametner, a warning message will be logged to
indicate the deprecation of the parameter.
Fixes: bug #1095730
Change-Id: I07af784a54d7d79395fc3265e74145f92f38a893
2013-02-13 18:54:51 +00:00
|
|
|
memcache_client.set('some_key', [1, 2, 3], time=sixtydays)
|
2016-06-23 19:22:02 +00:00
|
|
|
_junk, cache_timeout, _junk = mock.cache[cache_key]
|
|
|
|
self.assertAlmostEqual(float(cache_timeout), esttimeout, delta=1)
|
2010-07-12 22:03:45 +00:00
|
|
|
|
2015-09-03 02:19:05 +00:00
|
|
|
def test_get_failed_connection_mid_request(self):
|
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'])
|
|
|
|
mock = MockMemcached()
|
|
|
|
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock, mock)] * 2)
|
|
|
|
memcache_client.set('some_key', [1, 2, 3])
|
|
|
|
self.assertEqual(memcache_client.get('some_key'), [1, 2, 3])
|
2018-02-01 22:30:32 +00:00
|
|
|
self.assertEqual(list(mock.cache.values()),
|
|
|
|
[(b'2', b'0', b'[1, 2, 3]')])
|
2015-09-03 02:19:05 +00:00
|
|
|
|
|
|
|
# Now lets return an empty string, and make sure we aren't logging
|
|
|
|
# the error.
|
|
|
|
fake_stdout = six.StringIO()
|
2017-09-26 10:43:53 +00:00
|
|
|
# force the logging through the DebugLogger instead of the nose
|
|
|
|
# handler. This will use stdout, so we can assert that no stack trace
|
|
|
|
# is logged.
|
2015-09-03 02:19:05 +00:00
|
|
|
logger = debug_logger()
|
|
|
|
with patch("sys.stdout", fake_stdout),\
|
2017-09-26 10:43:53 +00:00
|
|
|
patch('swift.common.memcached.logging', logger):
|
2015-09-03 02:19:05 +00:00
|
|
|
mock.read_return_empty_str = True
|
2017-11-09 06:40:18 +00:00
|
|
|
self.assertIsNone(memcache_client.get('some_key'))
|
2017-09-26 10:43:53 +00:00
|
|
|
log_lines = logger.get_lines_for_level('error')
|
|
|
|
self.assertIn('Error talking to memcached', log_lines[0])
|
|
|
|
self.assertFalse(log_lines[1:])
|
|
|
|
self.assertNotIn("Traceback", fake_stdout.getvalue())
|
2015-09-03 02:19:05 +00:00
|
|
|
|
2010-07-12 22:03:45 +00:00
|
|
|
def test_incr(self):
|
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'])
|
|
|
|
mock = MockMemcached()
|
2013-09-04 22:20:44 +00:00
|
|
|
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock, mock)] * 2)
|
2015-08-05 18:28:14 +00:00
|
|
|
self.assertEqual(memcache_client.incr('some_key', delta=5), 5)
|
2018-02-01 22:30:32 +00:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), b'5')
|
2015-08-05 18:28:14 +00:00
|
|
|
self.assertEqual(memcache_client.incr('some_key', delta=5), 10)
|
2018-02-01 22:30:32 +00:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), b'10')
|
2015-08-05 18:28:14 +00:00
|
|
|
self.assertEqual(memcache_client.incr('some_key', delta=1), 11)
|
2018-02-01 22:30:32 +00:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), b'11')
|
2015-08-05 18:28:14 +00:00
|
|
|
self.assertEqual(memcache_client.incr('some_key', delta=-5), 6)
|
2018-02-01 22:30:32 +00:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), b'6')
|
2015-08-05 18:28:14 +00:00
|
|
|
self.assertEqual(memcache_client.incr('some_key', delta=-15), 0)
|
2018-02-01 22:30:32 +00:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), b'0')
|
2011-02-23 19:44:36 +00:00
|
|
|
mock.read_return_none = True
|
|
|
|
self.assertRaises(memcached.MemcacheConnectionError,
|
|
|
|
memcache_client.incr, 'some_key', delta=-15)
|
2013-05-07 16:40:53 +00:00
|
|
|
self.assertTrue(mock.close_called)
|
2010-10-25 19:52:25 +00:00
|
|
|
|
2015-09-03 02:19:05 +00:00
|
|
|
def test_incr_failed_connection_mid_request(self):
|
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'])
|
|
|
|
mock = MockMemcached()
|
|
|
|
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock, mock)] * 2)
|
|
|
|
self.assertEqual(memcache_client.incr('some_key', delta=5), 5)
|
2018-02-01 22:30:32 +00:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), b'5')
|
2015-09-03 02:19:05 +00:00
|
|
|
self.assertEqual(memcache_client.incr('some_key', delta=5), 10)
|
2018-02-01 22:30:32 +00:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), b'10')
|
2015-09-03 02:19:05 +00:00
|
|
|
|
|
|
|
# Now lets return an empty string, and make sure we aren't logging
|
|
|
|
# the error.
|
|
|
|
fake_stdout = six.StringIO()
|
2017-09-26 10:43:53 +00:00
|
|
|
# force the logging through the DebugLogger instead of the nose
|
|
|
|
# handler. This will use stdout, so we can assert that no stack trace
|
|
|
|
# is logged.
|
2015-09-03 02:19:05 +00:00
|
|
|
logger = debug_logger()
|
|
|
|
with patch("sys.stdout", fake_stdout), \
|
2017-09-26 10:43:53 +00:00
|
|
|
patch('swift.common.memcached.logging', logger):
|
2015-09-03 02:19:05 +00:00
|
|
|
mock.read_return_empty_str = True
|
|
|
|
self.assertRaises(memcached.MemcacheConnectionError,
|
|
|
|
memcache_client.incr, 'some_key', delta=1)
|
2017-09-26 10:43:53 +00:00
|
|
|
log_lines = logger.get_lines_for_level('error')
|
|
|
|
self.assertIn('Error talking to memcached', log_lines[0])
|
|
|
|
self.assertFalse(log_lines[1:])
|
|
|
|
self.assertNotIn('Traceback', fake_stdout.getvalue())
|
2015-09-03 02:19:05 +00:00
|
|
|
|
2012-11-16 05:09:14 +00:00
|
|
|
def test_incr_w_timeout(self):
|
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'])
|
|
|
|
mock = MockMemcached()
|
2013-09-04 22:20:44 +00:00
|
|
|
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock, mock)] * 2)
|
2018-02-01 22:30:32 +00:00
|
|
|
cache_key = md5(b'some_key').hexdigest().encode('ascii')
|
2016-06-23 19:22:02 +00:00
|
|
|
|
Swift MemcacheRing (set) interface is incompatible fixes
This patch fixes the Swift MemcacheRing set and set_multi
interface incompatible problem with python memcache. The fix
added two extra named parameters to both set and set_multi
method. When only time or timeout parameter is present, then one
of the value will be used. When both time and timeout are present,
the time parameter will be used.
Named parameter min_compress_len is added for pure compatibility
purposes. The current implementation ignores this parameter.
To make swift memcached methods all consistent cross the board,
method incr and decr have also been changed to include a new
named parameter time.
In future OpenStack releases, the named parameter timeout will be
removed, keep the named parameter timeout around for now is
to make sure that mismatched releases between client and server
will still work.
From now on, when a call is made to set, set_multi, decr, incr
by using timeout parametner, a warning message will be logged to
indicate the deprecation of the parameter.
Fixes: bug #1095730
Change-Id: I07af784a54d7d79395fc3265e74145f92f38a893
2013-02-13 18:54:51 +00:00
|
|
|
memcache_client.incr('some_key', delta=5, time=55)
|
2018-02-01 22:30:32 +00:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), b'5')
|
|
|
|
self.assertEqual(mock.cache, {cache_key: (b'0', b'55', b'5')})
|
2016-06-23 19:22:02 +00:00
|
|
|
|
2012-11-16 05:09:14 +00:00
|
|
|
memcache_client.delete('some_key')
|
2017-06-07 03:37:01 +00:00
|
|
|
self.assertIsNone(memcache_client.get('some_key'))
|
2016-06-23 19:22:02 +00:00
|
|
|
|
2012-11-16 05:09:14 +00:00
|
|
|
fiftydays = 50 * 24 * 60 * 60
|
|
|
|
esttimeout = time.time() + fiftydays
|
Swift MemcacheRing (set) interface is incompatible fixes
This patch fixes the Swift MemcacheRing set and set_multi
interface incompatible problem with python memcache. The fix
added two extra named parameters to both set and set_multi
method. When only time or timeout parameter is present, then one
of the value will be used. When both time and timeout are present,
the time parameter will be used.
Named parameter min_compress_len is added for pure compatibility
purposes. The current implementation ignores this parameter.
To make swift memcached methods all consistent cross the board,
method incr and decr have also been changed to include a new
named parameter time.
In future OpenStack releases, the named parameter timeout will be
removed, keep the named parameter timeout around for now is
to make sure that mismatched releases between client and server
will still work.
From now on, when a call is made to set, set_multi, decr, incr
by using timeout parametner, a warning message will be logged to
indicate the deprecation of the parameter.
Fixes: bug #1095730
Change-Id: I07af784a54d7d79395fc3265e74145f92f38a893
2013-02-13 18:54:51 +00:00
|
|
|
memcache_client.incr('some_key', delta=5, time=fiftydays)
|
2018-02-01 22:30:32 +00:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), b'5')
|
2016-06-23 19:22:02 +00:00
|
|
|
_junk, cache_timeout, _junk = mock.cache[cache_key]
|
|
|
|
self.assertAlmostEqual(float(cache_timeout), esttimeout, delta=1)
|
|
|
|
|
2012-11-16 05:09:14 +00:00
|
|
|
memcache_client.delete('some_key')
|
2017-06-07 03:37:01 +00:00
|
|
|
self.assertIsNone(memcache_client.get('some_key'))
|
2016-06-23 19:22:02 +00:00
|
|
|
|
2012-11-16 05:09:14 +00:00
|
|
|
memcache_client.incr('some_key', delta=5)
|
2018-02-01 22:30:32 +00:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), b'5')
|
|
|
|
self.assertEqual(mock.cache, {cache_key: (b'0', b'0', b'5')})
|
2016-06-23 19:22:02 +00:00
|
|
|
|
Swift MemcacheRing (set) interface is incompatible fixes
This patch fixes the Swift MemcacheRing set and set_multi
interface incompatible problem with python memcache. The fix
added two extra named parameters to both set and set_multi
method. When only time or timeout parameter is present, then one
of the value will be used. When both time and timeout are present,
the time parameter will be used.
Named parameter min_compress_len is added for pure compatibility
purposes. The current implementation ignores this parameter.
To make swift memcached methods all consistent cross the board,
method incr and decr have also been changed to include a new
named parameter time.
In future OpenStack releases, the named parameter timeout will be
removed, keep the named parameter timeout around for now is
to make sure that mismatched releases between client and server
will still work.
From now on, when a call is made to set, set_multi, decr, incr
by using timeout parametner, a warning message will be logged to
indicate the deprecation of the parameter.
Fixes: bug #1095730
Change-Id: I07af784a54d7d79395fc3265e74145f92f38a893
2013-02-13 18:54:51 +00:00
|
|
|
memcache_client.incr('some_key', delta=5, time=55)
|
2018-02-01 22:30:32 +00:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), b'10')
|
|
|
|
self.assertEqual(mock.cache, {cache_key: (b'0', b'0', b'10')})
|
2012-11-16 05:09:14 +00:00
|
|
|
|
2010-10-25 19:52:25 +00:00
|
|
|
def test_decr(self):
|
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'])
|
|
|
|
mock = MockMemcached()
|
2013-09-04 22:20:44 +00:00
|
|
|
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock, mock)] * 2)
|
2015-08-05 18:28:14 +00:00
|
|
|
self.assertEqual(memcache_client.decr('some_key', delta=5), 0)
|
2018-02-01 22:30:32 +00:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), b'0')
|
2015-08-05 18:28:14 +00:00
|
|
|
self.assertEqual(memcache_client.incr('some_key', delta=15), 15)
|
2018-02-01 22:30:32 +00:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), b'15')
|
2015-08-05 18:28:14 +00:00
|
|
|
self.assertEqual(memcache_client.decr('some_key', delta=4), 11)
|
2018-02-01 22:30:32 +00:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), b'11')
|
2015-08-05 18:28:14 +00:00
|
|
|
self.assertEqual(memcache_client.decr('some_key', delta=15), 0)
|
2018-02-01 22:30:32 +00:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), b'0')
|
2011-02-23 19:44:36 +00:00
|
|
|
mock.read_return_none = True
|
|
|
|
self.assertRaises(memcached.MemcacheConnectionError,
|
|
|
|
memcache_client.decr, 'some_key', delta=15)
|
|
|
|
|
2010-07-12 22:03:45 +00:00
|
|
|
def test_retry(self):
|
2012-09-04 15:01:02 +00:00
|
|
|
memcache_client = memcached.MemcacheRing(
|
|
|
|
['1.2.3.4:11211', '1.2.3.5:11211'])
|
2010-07-12 22:03:45 +00:00
|
|
|
mock1 = ExplodingMockMemcached()
|
|
|
|
mock2 = MockMemcached()
|
2013-09-04 22:20:44 +00:00
|
|
|
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock2, mock2)])
|
|
|
|
memcache_client._client_cache['1.2.3.5:11211'] = MockedMemcachePool(
|
Socket errors don't warrant tracebacks when talking to memcached
Currently, timeouts when talking to memcached cause log lines like
ERROR:root:Timeout connecting to memcached: 192.168.23.62:11211
Meanwhile, socket errors (which you'd expect to be about as common
as timeouts) cause log lines like
ERROR:root:Error talking to memcached: 192.168.23.70:11211
Traceback (most recent call last):
File "/usr/lib/pymodules/python2.7/swift/common/memcached.py", line 293, in set
(key, flags, timeout, len(value), value))
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 390, in sendall
tail = self.send(data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 384, in send
return self._send_loop(self.fd.send, data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 371, in _send_loop
return send_method(data, *args)
error: [Errno 32] Broken pipe
... or ...
ERROR:root:Error connecting to memcached: 192.168.23.73:11211
Traceback (most recent call last):
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 239, in _get_conns
fp, sock = self._client_cache[server].get()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 135, in get
fp, sock = self.create()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 128, in create
sock.connect((host, int(port)))
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 237, in connect
while not socket_connect(fd, address):
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 39, in socket_connect
raise socket.error(err, errno.errorcode[err])
error: [Errno 101] ENETUNREACH
... which seems excessive. Now, socket errors are logged as normal
errors, without the tracebacks.
Change-Id: I71a2c4786c0406dbc43b829cad5a0c7e2c45de21
2017-08-22 05:31:59 +00:00
|
|
|
[(mock1, mock1), (mock1, mock1)])
|
2010-07-12 22:03:45 +00:00
|
|
|
memcache_client.set('some_key', [1, 2, 3])
|
Socket errors don't warrant tracebacks when talking to memcached
Currently, timeouts when talking to memcached cause log lines like
ERROR:root:Timeout connecting to memcached: 192.168.23.62:11211
Meanwhile, socket errors (which you'd expect to be about as common
as timeouts) cause log lines like
ERROR:root:Error talking to memcached: 192.168.23.70:11211
Traceback (most recent call last):
File "/usr/lib/pymodules/python2.7/swift/common/memcached.py", line 293, in set
(key, flags, timeout, len(value), value))
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 390, in sendall
tail = self.send(data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 384, in send
return self._send_loop(self.fd.send, data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 371, in _send_loop
return send_method(data, *args)
error: [Errno 32] Broken pipe
... or ...
ERROR:root:Error connecting to memcached: 192.168.23.73:11211
Traceback (most recent call last):
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 239, in _get_conns
fp, sock = self._client_cache[server].get()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 135, in get
fp, sock = self.create()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 128, in create
sock.connect((host, int(port)))
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 237, in connect
while not socket_connect(fd, address):
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 39, in socket_connect
raise socket.error(err, errno.errorcode[err])
error: [Errno 101] ENETUNREACH
... which seems excessive. Now, socket errors are logged as normal
errors, without the tracebacks.
Change-Id: I71a2c4786c0406dbc43b829cad5a0c7e2c45de21
2017-08-22 05:31:59 +00:00
|
|
|
self.assertEqual(mock1.exploded, True)
|
|
|
|
self.assertEqual(self.logger.get_lines_for_level('error'), [
|
|
|
|
'Error talking to memcached: 1.2.3.5:11211: '
|
|
|
|
'[Errno 32] Broken pipe',
|
|
|
|
])
|
|
|
|
|
|
|
|
self.logger.clear()
|
|
|
|
mock1.exploded = False
|
2015-08-05 18:28:14 +00:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), [1, 2, 3])
|
|
|
|
self.assertEqual(mock1.exploded, True)
|
Socket errors don't warrant tracebacks when talking to memcached
Currently, timeouts when talking to memcached cause log lines like
ERROR:root:Timeout connecting to memcached: 192.168.23.62:11211
Meanwhile, socket errors (which you'd expect to be about as common
as timeouts) cause log lines like
ERROR:root:Error talking to memcached: 192.168.23.70:11211
Traceback (most recent call last):
File "/usr/lib/pymodules/python2.7/swift/common/memcached.py", line 293, in set
(key, flags, timeout, len(value), value))
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 390, in sendall
tail = self.send(data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 384, in send
return self._send_loop(self.fd.send, data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 371, in _send_loop
return send_method(data, *args)
error: [Errno 32] Broken pipe
... or ...
ERROR:root:Error connecting to memcached: 192.168.23.73:11211
Traceback (most recent call last):
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 239, in _get_conns
fp, sock = self._client_cache[server].get()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 135, in get
fp, sock = self.create()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 128, in create
sock.connect((host, int(port)))
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 237, in connect
while not socket_connect(fd, address):
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 39, in socket_connect
raise socket.error(err, errno.errorcode[err])
error: [Errno 101] ENETUNREACH
... which seems excessive. Now, socket errors are logged as normal
errors, without the tracebacks.
Change-Id: I71a2c4786c0406dbc43b829cad5a0c7e2c45de21
2017-08-22 05:31:59 +00:00
|
|
|
self.assertEqual(self.logger.get_lines_for_level('error'), [
|
|
|
|
'Error talking to memcached: 1.2.3.5:11211: '
|
|
|
|
'[Errno 32] Broken pipe',
|
|
|
|
])
|
|
|
|
# Check that we really did call create() twice
|
|
|
|
self.assertEqual(memcache_client._client_cache['1.2.3.5:11211'].mocks,
|
|
|
|
[])
|
2010-07-12 22:03:45 +00:00
|
|
|
|
|
|
|
def test_delete(self):
|
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'])
|
|
|
|
mock = MockMemcached()
|
2013-09-04 22:20:44 +00:00
|
|
|
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock, mock)] * 2)
|
2010-07-12 22:03:45 +00:00
|
|
|
memcache_client.set('some_key', [1, 2, 3])
|
2015-08-05 18:28:14 +00:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), [1, 2, 3])
|
2010-07-12 22:03:45 +00:00
|
|
|
memcache_client.delete('some_key')
|
2017-06-07 03:37:01 +00:00
|
|
|
self.assertIsNone(memcache_client.get('some_key'))
|
2010-07-12 22:03:45 +00:00
|
|
|
|
|
|
|
def test_multi(self):
|
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'])
|
|
|
|
mock = MockMemcached()
|
2013-09-04 22:20:44 +00:00
|
|
|
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock, mock)] * 2)
|
2016-06-23 19:22:02 +00:00
|
|
|
|
2010-07-12 22:03:45 +00:00
|
|
|
memcache_client.set_multi(
|
|
|
|
{'some_key1': [1, 2, 3], 'some_key2': [4, 5, 6]}, 'multi_key')
|
2015-08-05 18:28:14 +00:00
|
|
|
self.assertEqual(
|
2010-07-12 22:03:45 +00:00
|
|
|
memcache_client.get_multi(('some_key2', 'some_key1'), 'multi_key'),
|
|
|
|
[[4, 5, 6], [1, 2, 3]])
|
2018-02-01 22:30:32 +00:00
|
|
|
for key in (b'some_key1', b'some_key2'):
|
|
|
|
key = md5(key).hexdigest().encode('ascii')
|
2016-06-23 19:22:02 +00:00
|
|
|
self.assertIn(key, mock.cache)
|
|
|
|
_junk, cache_timeout, _junk = mock.cache[key]
|
2018-02-01 22:30:32 +00:00
|
|
|
self.assertEqual(cache_timeout, b'0')
|
2016-06-23 19:22:02 +00:00
|
|
|
|
Swift MemcacheRing (set) interface is incompatible fixes
This patch fixes the Swift MemcacheRing set and set_multi
interface incompatible problem with python memcache. The fix
added two extra named parameters to both set and set_multi
method. When only time or timeout parameter is present, then one
of the value will be used. When both time and timeout are present,
the time parameter will be used.
Named parameter min_compress_len is added for pure compatibility
purposes. The current implementation ignores this parameter.
To make swift memcached methods all consistent cross the board,
method incr and decr have also been changed to include a new
named parameter time.
In future OpenStack releases, the named parameter timeout will be
removed, keep the named parameter timeout around for now is
to make sure that mismatched releases between client and server
will still work.
From now on, when a call is made to set, set_multi, decr, incr
by using timeout parametner, a warning message will be logged to
indicate the deprecation of the parameter.
Fixes: bug #1095730
Change-Id: I07af784a54d7d79395fc3265e74145f92f38a893
2013-02-13 18:54:51 +00:00
|
|
|
memcache_client.set_multi(
|
|
|
|
{'some_key1': [1, 2, 3], 'some_key2': [4, 5, 6]}, 'multi_key',
|
|
|
|
time=20)
|
2018-02-01 22:30:32 +00:00
|
|
|
for key in (b'some_key1', b'some_key2'):
|
|
|
|
key = md5(key).hexdigest().encode('ascii')
|
2016-06-23 19:22:02 +00:00
|
|
|
_junk, cache_timeout, _junk = mock.cache[key]
|
2018-02-01 22:30:32 +00:00
|
|
|
self.assertEqual(cache_timeout, b'20')
|
Swift MemcacheRing (set) interface is incompatible fixes
This patch fixes the Swift MemcacheRing set and set_multi
interface incompatible problem with python memcache. The fix
added two extra named parameters to both set and set_multi
method. When only time or timeout parameter is present, then one
of the value will be used. When both time and timeout are present,
the time parameter will be used.
Named parameter min_compress_len is added for pure compatibility
purposes. The current implementation ignores this parameter.
To make swift memcached methods all consistent cross the board,
method incr and decr have also been changed to include a new
named parameter time.
In future OpenStack releases, the named parameter timeout will be
removed, keep the named parameter timeout around for now is
to make sure that mismatched releases between client and server
will still work.
From now on, when a call is made to set, set_multi, decr, incr
by using timeout parametner, a warning message will be logged to
indicate the deprecation of the parameter.
Fixes: bug #1095730
Change-Id: I07af784a54d7d79395fc3265e74145f92f38a893
2013-02-13 18:54:51 +00:00
|
|
|
|
2012-11-16 05:09:14 +00:00
|
|
|
fortydays = 50 * 24 * 60 * 60
|
|
|
|
esttimeout = time.time() + fortydays
|
|
|
|
memcache_client.set_multi(
|
|
|
|
{'some_key1': [1, 2, 3], 'some_key2': [4, 5, 6]}, 'multi_key',
|
2015-12-13 20:13:42 +00:00
|
|
|
time=fortydays)
|
2018-02-01 22:30:32 +00:00
|
|
|
for key in (b'some_key1', b'some_key2'):
|
|
|
|
key = md5(key).hexdigest().encode('ascii')
|
2016-06-23 19:22:02 +00:00
|
|
|
_junk, cache_timeout, _junk = mock.cache[key]
|
|
|
|
self.assertAlmostEqual(float(cache_timeout), esttimeout, delta=1)
|
2015-08-05 18:28:14 +00:00
|
|
|
self.assertEqual(memcache_client.get_multi(
|
2012-09-04 15:01:02 +00:00
|
|
|
('some_key2', 'some_key1', 'not_exists'), 'multi_key'),
|
|
|
|
[[4, 5, 6], [1, 2, 3], None])
|
2010-07-12 22:03:45 +00:00
|
|
|
|
2015-09-03 02:19:05 +00:00
|
|
|
# Now lets simulate a lost connection and make sure we don't get
|
|
|
|
# the index out of range stack trace when it does
|
|
|
|
mock_stderr = six.StringIO()
|
|
|
|
not_expected = "IndexError: list index out of range"
|
|
|
|
with patch("sys.stderr", mock_stderr):
|
|
|
|
mock.read_return_empty_str = True
|
|
|
|
self.assertEqual(memcache_client.get_multi(
|
|
|
|
('some_key2', 'some_key1', 'not_exists'), 'multi_key'),
|
|
|
|
None)
|
|
|
|
self.assertFalse(not_expected in mock_stderr.getvalue())
|
|
|
|
|
Do not use pickle for serialization in memcache, but JSON
We don't want to use pickle as it can execute arbitrary code. JSON is
safer. However, note that it supports serialization for only some
specific subset of object types; this should be enough for what we need,
though.
To avoid issues on upgrades (unability to read pickled values, and cache
poisoning for old servers not understanding JSON), we add a
memcache_serialization_support configuration option, with the following
values:
0 = older, insecure pickle serialization
1 = json serialization but pickles can still be read (still insecure)
2 = json serialization only (secure and the default)
To avoid an instant full cache flush, existing installations should
upgrade with 0, then set to 1 and reload, then after some time (24
hours) set to 2 and reload. Support for 0 and 1 will be removed in
future versions.
Part of bug 1006414.
Change-Id: Id7d6d547b103b4f23ebf5be98b88f09ec6027ce4
2012-06-21 12:37:41 +00:00
|
|
|
def test_serialization(self):
|
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
|
|
|
allow_pickle=True)
|
|
|
|
mock = MockMemcached()
|
2013-09-04 22:20:44 +00:00
|
|
|
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
|
|
|
|
[(mock, mock)] * 2)
|
Do not use pickle for serialization in memcache, but JSON
We don't want to use pickle as it can execute arbitrary code. JSON is
safer. However, note that it supports serialization for only some
specific subset of object types; this should be enough for what we need,
though.
To avoid issues on upgrades (unability to read pickled values, and cache
poisoning for old servers not understanding JSON), we add a
memcache_serialization_support configuration option, with the following
values:
0 = older, insecure pickle serialization
1 = json serialization but pickles can still be read (still insecure)
2 = json serialization only (secure and the default)
To avoid an instant full cache flush, existing installations should
upgrade with 0, then set to 1 and reload, then after some time (24
hours) set to 2 and reload. Support for 0 and 1 will be removed in
future versions.
Part of bug 1006414.
Change-Id: Id7d6d547b103b4f23ebf5be98b88f09ec6027ce4
2012-06-21 12:37:41 +00:00
|
|
|
memcache_client.set('some_key', [1, 2, 3])
|
2015-08-05 18:28:14 +00:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), [1, 2, 3])
|
Do not use pickle for serialization in memcache, but JSON
We don't want to use pickle as it can execute arbitrary code. JSON is
safer. However, note that it supports serialization for only some
specific subset of object types; this should be enough for what we need,
though.
To avoid issues on upgrades (unability to read pickled values, and cache
poisoning for old servers not understanding JSON), we add a
memcache_serialization_support configuration option, with the following
values:
0 = older, insecure pickle serialization
1 = json serialization but pickles can still be read (still insecure)
2 = json serialization only (secure and the default)
To avoid an instant full cache flush, existing installations should
upgrade with 0, then set to 1 and reload, then after some time (24
hours) set to 2 and reload. Support for 0 and 1 will be removed in
future versions.
Part of bug 1006414.
Change-Id: Id7d6d547b103b4f23ebf5be98b88f09ec6027ce4
2012-06-21 12:37:41 +00:00
|
|
|
memcache_client._allow_pickle = False
|
|
|
|
memcache_client._allow_unpickle = True
|
2015-08-05 18:28:14 +00:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), [1, 2, 3])
|
Do not use pickle for serialization in memcache, but JSON
We don't want to use pickle as it can execute arbitrary code. JSON is
safer. However, note that it supports serialization for only some
specific subset of object types; this should be enough for what we need,
though.
To avoid issues on upgrades (unability to read pickled values, and cache
poisoning for old servers not understanding JSON), we add a
memcache_serialization_support configuration option, with the following
values:
0 = older, insecure pickle serialization
1 = json serialization but pickles can still be read (still insecure)
2 = json serialization only (secure and the default)
To avoid an instant full cache flush, existing installations should
upgrade with 0, then set to 1 and reload, then after some time (24
hours) set to 2 and reload. Support for 0 and 1 will be removed in
future versions.
Part of bug 1006414.
Change-Id: Id7d6d547b103b4f23ebf5be98b88f09ec6027ce4
2012-06-21 12:37:41 +00:00
|
|
|
memcache_client._allow_unpickle = False
|
2017-06-07 03:37:01 +00:00
|
|
|
self.assertIsNone(memcache_client.get('some_key'))
|
Do not use pickle for serialization in memcache, but JSON
We don't want to use pickle as it can execute arbitrary code. JSON is
safer. However, note that it supports serialization for only some
specific subset of object types; this should be enough for what we need,
though.
To avoid issues on upgrades (unability to read pickled values, and cache
poisoning for old servers not understanding JSON), we add a
memcache_serialization_support configuration option, with the following
values:
0 = older, insecure pickle serialization
1 = json serialization but pickles can still be read (still insecure)
2 = json serialization only (secure and the default)
To avoid an instant full cache flush, existing installations should
upgrade with 0, then set to 1 and reload, then after some time (24
hours) set to 2 and reload. Support for 0 and 1 will be removed in
future versions.
Part of bug 1006414.
Change-Id: Id7d6d547b103b4f23ebf5be98b88f09ec6027ce4
2012-06-21 12:37:41 +00:00
|
|
|
memcache_client.set('some_key', [1, 2, 3])
|
2015-08-05 18:28:14 +00:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), [1, 2, 3])
|
Do not use pickle for serialization in memcache, but JSON
We don't want to use pickle as it can execute arbitrary code. JSON is
safer. However, note that it supports serialization for only some
specific subset of object types; this should be enough for what we need,
though.
To avoid issues on upgrades (unability to read pickled values, and cache
poisoning for old servers not understanding JSON), we add a
memcache_serialization_support configuration option, with the following
values:
0 = older, insecure pickle serialization
1 = json serialization but pickles can still be read (still insecure)
2 = json serialization only (secure and the default)
To avoid an instant full cache flush, existing installations should
upgrade with 0, then set to 1 and reload, then after some time (24
hours) set to 2 and reload. Support for 0 and 1 will be removed in
future versions.
Part of bug 1006414.
Change-Id: Id7d6d547b103b4f23ebf5be98b88f09ec6027ce4
2012-06-21 12:37:41 +00:00
|
|
|
memcache_client._allow_unpickle = True
|
2015-08-05 18:28:14 +00:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), [1, 2, 3])
|
Do not use pickle for serialization in memcache, but JSON
We don't want to use pickle as it can execute arbitrary code. JSON is
safer. However, note that it supports serialization for only some
specific subset of object types; this should be enough for what we need,
though.
To avoid issues on upgrades (unability to read pickled values, and cache
poisoning for old servers not understanding JSON), we add a
memcache_serialization_support configuration option, with the following
values:
0 = older, insecure pickle serialization
1 = json serialization but pickles can still be read (still insecure)
2 = json serialization only (secure and the default)
To avoid an instant full cache flush, existing installations should
upgrade with 0, then set to 1 and reload, then after some time (24
hours) set to 2 and reload. Support for 0 and 1 will be removed in
future versions.
Part of bug 1006414.
Change-Id: Id7d6d547b103b4f23ebf5be98b88f09ec6027ce4
2012-06-21 12:37:41 +00:00
|
|
|
memcache_client._allow_pickle = True
|
2015-08-05 18:28:14 +00:00
|
|
|
self.assertEqual(memcache_client.get('some_key'), [1, 2, 3])
|
2010-07-12 22:03:45 +00:00
|
|
|
|
2013-09-04 22:20:44 +00:00
|
|
|
def test_connection_pooling(self):
|
|
|
|
with patch('swift.common.memcached.socket') as mock_module:
|
Fix IPv6 handling in MemcacheConnPool.
The patch removes the assumption of IPv4-only addresses in the
MemcacheConnPool. The changes are around address handling.
Namely, if a server is specified with an address
[<address>]:port (port is optional), it is assumed to be an IPv6
address [1]. If an IPv6 address is specified without "[]", an exception
is raised as it is impossible to parse such addresses correctly.
For testing, memcache can be configured to listen on the link-local,
unique-local, or ::1 (equivalent to 127.0.0.1) addresses. Link-local
addresses are assigned by default to each interface and are of the form
"fe80::dead:beef". These addresses require a scope ID, which would look
like "fe80::dead:beef%eth0" (replacing eth0 with the correct interface).
Unique-local addresses are any addresses in the fc00::/7 subnet. To add
a ULA to an interface use the "ip" utility. For example:
"ip -6 address add fc01::dead:beef dev eth0". Lastly, and probably
simplest, memcache can be configured to listen on "::1". The same
address would be used in the swift configuration, e.g. "[::1]:11211".
Note: only memcached version 1.4.25 or greater supports binding to an
IPv6 address.
Fixes #1526570
[1] IPv6 host literals:
https://tools.ietf.org/html/rfc3986#section-3.2.2
Change-Id: I8408143c1d47d24e70df56a08167c529825276a2
2015-12-16 20:07:27 +00:00
|
|
|
def mock_getaddrinfo(host, port, family=socket.AF_INET,
|
|
|
|
socktype=socket.SOCK_STREAM, proto=0,
|
|
|
|
flags=0):
|
|
|
|
return [(family, socktype, proto, '', (host, port))]
|
|
|
|
|
|
|
|
mock_module.getaddrinfo = mock_getaddrinfo
|
|
|
|
|
2013-09-04 22:20:44 +00:00
|
|
|
# patch socket, stub socket.socket, mock sock
|
|
|
|
mock_sock = mock_module.socket.return_value
|
|
|
|
|
|
|
|
# track clients waiting for connections
|
|
|
|
connected = []
|
|
|
|
connections = Queue()
|
2013-10-04 12:04:42 +00:00
|
|
|
errors = []
|
2013-09-04 22:20:44 +00:00
|
|
|
|
|
|
|
def wait_connect(addr):
|
|
|
|
connected.append(addr)
|
2013-11-14 23:58:52 +00:00
|
|
|
sleep(0.1) # yield
|
2013-10-04 12:04:42 +00:00
|
|
|
val = connections.get()
|
|
|
|
if val is not None:
|
|
|
|
errors.append(val)
|
|
|
|
|
2013-09-04 22:20:44 +00:00
|
|
|
mock_sock.connect = wait_connect
|
|
|
|
|
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
|
|
|
|
connect_timeout=10)
|
|
|
|
# sanity
|
2015-08-05 18:28:14 +00:00
|
|
|
self.assertEqual(1, len(memcache_client._client_cache))
|
2013-09-04 22:20:44 +00:00
|
|
|
for server, pool in memcache_client._client_cache.items():
|
2013-10-04 12:04:42 +00:00
|
|
|
self.assertEqual(2, pool.max_size)
|
2013-09-04 22:20:44 +00:00
|
|
|
|
|
|
|
# make 10 requests "at the same time"
|
|
|
|
p = GreenPool()
|
|
|
|
for i in range(10):
|
|
|
|
p.spawn(memcache_client.set, 'key', 'value')
|
|
|
|
for i in range(3):
|
|
|
|
sleep(0.1)
|
2013-10-04 12:04:42 +00:00
|
|
|
self.assertEqual(2, len(connected))
|
|
|
|
|
2013-09-04 22:20:44 +00:00
|
|
|
# give out a connection
|
|
|
|
connections.put(None)
|
2013-10-04 12:04:42 +00:00
|
|
|
|
|
|
|
# at this point, only one connection should have actually been
|
|
|
|
# created, the other is in the creation step, and the rest of the
|
|
|
|
# clients are not attempting to connect. we let this play out a
|
|
|
|
# bit to verify.
|
2013-09-04 22:20:44 +00:00
|
|
|
for i in range(3):
|
|
|
|
sleep(0.1)
|
2013-10-04 12:04:42 +00:00
|
|
|
self.assertEqual(2, len(connected))
|
|
|
|
|
|
|
|
# finish up, this allows the final connection to be created, so
|
|
|
|
# that all the other clients can use the two existing connections
|
|
|
|
# and no others will be created.
|
|
|
|
connections.put(None)
|
|
|
|
connections.put('nono')
|
|
|
|
self.assertEqual(2, len(connected))
|
2013-09-04 22:20:44 +00:00
|
|
|
p.waitall()
|
2013-10-04 12:04:42 +00:00
|
|
|
self.assertEqual(2, len(connected))
|
|
|
|
self.assertEqual(0, len(errors),
|
|
|
|
"A client was allowed a third connection")
|
|
|
|
connections.get_nowait()
|
|
|
|
self.assertTrue(connections.empty())
|
2013-09-04 22:20:44 +00:00
|
|
|
|
2013-10-08 04:28:48 +00:00
|
|
|
def test_connection_pool_timeout(self):
|
Socket errors don't warrant tracebacks when talking to memcached
Currently, timeouts when talking to memcached cause log lines like
ERROR:root:Timeout connecting to memcached: 192.168.23.62:11211
Meanwhile, socket errors (which you'd expect to be about as common
as timeouts) cause log lines like
ERROR:root:Error talking to memcached: 192.168.23.70:11211
Traceback (most recent call last):
File "/usr/lib/pymodules/python2.7/swift/common/memcached.py", line 293, in set
(key, flags, timeout, len(value), value))
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 390, in sendall
tail = self.send(data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 384, in send
return self._send_loop(self.fd.send, data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 371, in _send_loop
return send_method(data, *args)
error: [Errno 32] Broken pipe
... or ...
ERROR:root:Error connecting to memcached: 192.168.23.73:11211
Traceback (most recent call last):
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 239, in _get_conns
fp, sock = self._client_cache[server].get()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 135, in get
fp, sock = self.create()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 128, in create
sock.connect((host, int(port)))
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 237, in connect
while not socket_connect(fd, address):
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 39, in socket_connect
raise socket.error(err, errno.errorcode[err])
error: [Errno 101] ENETUNREACH
... which seems excessive. Now, socket errors are logged as normal
errors, without the tracebacks.
Change-Id: I71a2c4786c0406dbc43b829cad5a0c7e2c45de21
2017-08-22 05:31:59 +00:00
|
|
|
connections = defaultdict(Queue)
|
|
|
|
pending = defaultdict(int)
|
|
|
|
served = defaultdict(int)
|
|
|
|
|
|
|
|
class MockConnectionPool(memcached.MemcacheConnPool):
|
|
|
|
def get(self):
|
|
|
|
pending[self.host] += 1
|
|
|
|
conn = connections[self.host].get()
|
|
|
|
pending[self.host] -= 1
|
|
|
|
return conn
|
|
|
|
|
|
|
|
def put(self, *args, **kwargs):
|
|
|
|
connections[self.host].put(*args, **kwargs)
|
|
|
|
served[self.host] += 1
|
|
|
|
|
|
|
|
with mock.patch.object(memcached, 'MemcacheConnPool',
|
|
|
|
MockConnectionPool):
|
2013-10-08 04:28:48 +00:00
|
|
|
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211',
|
|
|
|
'1.2.3.5:11211'],
|
|
|
|
io_timeout=0.5,
|
|
|
|
pool_timeout=0.1)
|
|
|
|
|
2014-02-03 05:42:39 +00:00
|
|
|
# Hand out a couple slow connections to 1.2.3.5, leaving 1.2.3.4
|
|
|
|
# fast. All ten (10) clients should try to talk to .5 first, and
|
|
|
|
# then move on to .4, and we'll assert all that below.
|
2013-10-08 04:28:48 +00:00
|
|
|
mock_conn = MagicMock(), MagicMock()
|
|
|
|
mock_conn[1].sendall = lambda x: sleep(0.2)
|
2016-01-08 22:54:56 +00:00
|
|
|
connections['1.2.3.5'].put(mock_conn)
|
|
|
|
connections['1.2.3.5'].put(mock_conn)
|
2013-10-08 04:28:48 +00:00
|
|
|
|
|
|
|
mock_conn = MagicMock(), MagicMock()
|
2016-01-08 22:54:56 +00:00
|
|
|
connections['1.2.3.4'].put(mock_conn)
|
|
|
|
connections['1.2.3.4'].put(mock_conn)
|
2014-02-03 05:42:39 +00:00
|
|
|
|
|
|
|
p = GreenPool()
|
|
|
|
for i in range(10):
|
|
|
|
p.spawn(memcache_client.set, 'key', 'value')
|
|
|
|
|
|
|
|
# Wait for the dust to settle.
|
2013-10-08 04:28:48 +00:00
|
|
|
p.waitall()
|
2014-02-03 05:42:39 +00:00
|
|
|
|
Socket errors don't warrant tracebacks when talking to memcached
Currently, timeouts when talking to memcached cause log lines like
ERROR:root:Timeout connecting to memcached: 192.168.23.62:11211
Meanwhile, socket errors (which you'd expect to be about as common
as timeouts) cause log lines like
ERROR:root:Error talking to memcached: 192.168.23.70:11211
Traceback (most recent call last):
File "/usr/lib/pymodules/python2.7/swift/common/memcached.py", line 293, in set
(key, flags, timeout, len(value), value))
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 390, in sendall
tail = self.send(data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 384, in send
return self._send_loop(self.fd.send, data, flags)
File "/usr/lib/python2.7/dist-packages/eventlet/greenio/base.py", line 371, in _send_loop
return send_method(data, *args)
error: [Errno 32] Broken pipe
... or ...
ERROR:root:Error connecting to memcached: 192.168.23.73:11211
Traceback (most recent call last):
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 239, in _get_conns
fp, sock = self._client_cache[server].get()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 135, in get
fp, sock = self.create()
File "/opt/ss/lib/python2.7/site-packages/swift/common/memcached.py", line 128, in create
sock.connect((host, int(port)))
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 237, in connect
while not socket_connect(fd, address):
File "/opt/ss/lib/python2.7/site-packages/eventlet/greenio/base.py", line 39, in socket_connect
raise socket.error(err, errno.errorcode[err])
error: [Errno 101] ENETUNREACH
... which seems excessive. Now, socket errors are logged as normal
errors, without the tracebacks.
Change-Id: I71a2c4786c0406dbc43b829cad5a0c7e2c45de21
2017-08-22 05:31:59 +00:00
|
|
|
self.assertEqual(pending['1.2.3.5'], 8)
|
|
|
|
self.assertEqual(len(memcache_client._errors['1.2.3.5:11211']), 8)
|
|
|
|
self.assertEqual(
|
|
|
|
self.logger.get_lines_for_level('error'),
|
|
|
|
['Timeout getting a connection to memcached: 1.2.3.5:11211'] * 8)
|
|
|
|
self.assertEqual(served['1.2.3.5'], 2)
|
|
|
|
self.assertEqual(pending['1.2.3.4'], 0)
|
|
|
|
self.assertEqual(len(memcache_client._errors['1.2.3.4:11211']), 0)
|
|
|
|
self.assertEqual(served['1.2.3.4'], 8)
|
|
|
|
|
|
|
|
# and we never got more put in that we gave out
|
|
|
|
self.assertEqual(connections['1.2.3.5'].qsize(), 2)
|
|
|
|
self.assertEqual(connections['1.2.3.4'].qsize(), 2)
|
2013-10-08 04:28:48 +00:00
|
|
|
|
2012-09-04 15:01:02 +00:00
|
|
|
|
2010-07-12 22:03:45 +00:00
|
|
|
if __name__ == '__main__':
|
|
|
|
unittest.main()
|