Address some late comments for memcache clients

This change addresses some late comments from the following review:
https://review.openstack.org/#/c/119452/31

Change-Id: I031620f9085ff914aa9b99c21387f953b6ada171
Related-Bug: #1360446
This commit is contained in:
Lance Bragstad 2014-09-26 15:31:24 +00:00 committed by David Stanek
parent 24f101737c
commit bdc0f68210
5 changed files with 56 additions and 37 deletions

View File

@ -316,28 +316,27 @@ configuration option.
The drivers Keystone provides are: The drivers Keystone provides are:
* ``keystone.token.persistence.backends.sql.Token`` - The SQL-based (default)
token persistence engine. This backend stores all token data in the same SQL
store that is used for Identity/Assignment/etc.
* ``keystone.token.persistence.backends.memcache.Token`` - The memcached based
token persistence backend. This backend relies on ``dogpile.cache`` and stores
the token data in a set of memcached servers. The servers urls are specified
in the ``[memcache]\servers`` configuration option in the Keystone config.
* ``keystone.token.persistence.backends.memcache_pool.Token`` - The pooled memcached * ``keystone.token.persistence.backends.memcache_pool.Token`` - The pooled memcached
token persistence engine. This backend supports the concept of pooled memcache token persistence engine. This backend supports the concept of pooled memcache
client object (allowing for the re-use of the client objects). This backend has client object (allowing for the re-use of the client objects). This backend has
a number of extra tunable options in the ``[memcache]`` section of the config. a number of extra tunable options in the ``[memcache]`` section of the config.
* ``keystone.token.persistence.backends.sql.Token`` - The SQL-based (default)
token persistence engine.
* ``keystone.token.persistence.backends.memcache.Token`` - The memcached based
token persistence backend. This backend relies on ``dogpile.cache`` and stores
the token data in a set of memcached servers. The servers URLs are specified
in the ``[memcache]\servers`` configuration option in the Keystone config.
.. WARNING:: .. WARNING::
It is recommended you use the ``keystone.token.persistence.backend.memcache_pool.Token`` It is recommended you use the ``keystone.token.persistence.backend.memcache_pool.Token``
backend instead of ``keystone.token.persistence.backend.memcache.Token`` as the token backend instead of ``keystone.token.persistence.backend.memcache.Token`` as the token
persistence driver if you are deploying Keystone under eventlet instead of persistence driver if you are deploying Keystone under eventlet instead of
Apache + mod_wsgi. This recommendation are due to known issues with the use of Apache + mod_wsgi. This recommendation is due to known issues with the
``thread.local`` under eventlet that can allow the leaking of memcache client objects use of ``thread.local`` under eventlet that can allow the leaking of
and consumption of extra sockets. memcache client objects and consumption of extra sockets.
Token Provider Token Provider

View File

@ -536,27 +536,27 @@
# Memcache servers in the format of "host:port". # Memcache servers in the format of "host:port".
# (dogpile.cache.memcache and keystone.cache.memcache_pool # (dogpile.cache.memcache and keystone.cache.memcache_pool
# backends only) (list value) # backends only). (list value)
#memcache_servers=localhost:11211 #memcache_servers=localhost:11211
# Number of seconds memcached server is considered dead before # Number of seconds memcached server is considered dead before
# it is tried again. (dogpile.cache.memcache and # it is tried again. (dogpile.cache.memcache and
# keystone.cache.memcache_pool backends only) (integer value) # keystone.cache.memcache_pool backends only). (integer value)
#memcache_dead_retry=300 #memcache_dead_retry=300
# Timeout in seconds for every call to a server. # Timeout in seconds for every call to a server.
# (dogpile.cache.memcache and keystone.cache.memcache_pool # (dogpile.cache.memcache and keystone.cache.memcache_pool
# backends only) (integer value) # backends only). (integer value)
#memcache_socket_timeout=3 #memcache_socket_timeout=3
# Max total number of open connections to every memcached # Max total number of open connections to every memcached
# server. (keystone.cache.memcache_pool backend only) (integer # server. (keystone.cache.memcache_pool backend only).
# value) # (integer value)
#memcache_pool_maxsize=10 #memcache_pool_maxsize=10
# Number of seconds a connection to memcached is held unused # Number of seconds a connection to memcached is held unused
# in the pool before it is closed. # in the pool before it is closed.
# (keystone.cache.memcache_pool backend only) (integer value) # (keystone.cache.memcache_pool backend only). (integer value)
#memcache_pool_unused_timeout=60 #memcache_pool_unused_timeout=60
# Number of seconds that an operation will wait to get a # Number of seconds that an operation will wait to get a

View File

@ -35,11 +35,6 @@ from keystone.openstack.common import log
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
# NOTE(morganfainberg): This is used as the maximum number of seconds a get
# of a new connection will wait for before raising an exception indicating
# a serious / most likely non-recoverable delay has occurred.
CONNECTION_GET_TIMEOUT = 120
# This 'class' is taken from http://stackoverflow.com/a/22520633/238308 # This 'class' is taken from http://stackoverflow.com/a/22520633/238308
# Don't inherit client from threading.local so that we can reuse clients in # Don't inherit client from threading.local so that we can reuse clients in
# different threads # different threads
@ -78,9 +73,25 @@ class ConnectionPool(queue.Queue):
self._acquired = 0 self._acquired = 0
def _create_connection(self): def _create_connection(self):
"""Returns a connection instance.
This is called when the pool needs another instance created.
:returns: a new connection instance
"""
raise NotImplementedError raise NotImplementedError
def _destroy_connection(self, conn): def _destroy_connection(self, conn):
"""Destroy and cleanup a connection instance.
This is called when the pool wishes to get rid of an existing
connection. This is the opportunity for a subclass to free up
resources and cleaup after itself.
:param conn: the connection object to destroy
"""
raise NotImplementedError raise NotImplementedError
def _debug_logger(self, msg, *args, **kwargs): def _debug_logger(self, msg, *args, **kwargs):
@ -110,6 +121,9 @@ class ConnectionPool(queue.Queue):
def _qsize(self): def _qsize(self):
return self.maxsize - self._acquired return self.maxsize - self._acquired
# NOTE(dstanek): stdlib and eventlet Queue implementations
# have different names for the qsize method. This ensures
# that we override both of them.
if not hasattr(queue.Queue, '_qsize'): if not hasattr(queue.Queue, '_qsize'):
qsize = _qsize qsize = _qsize
@ -121,18 +135,24 @@ class ConnectionPool(queue.Queue):
self._acquired += 1 self._acquired += 1
return conn return conn
def _drop_expired_connections(self, conn):
"""Drop all expired connections from the right end of the queue.
:param conn: connection object
"""
now = time.time()
while self.queue and self.queue[0].ttl < now:
conn = self.queue.popleft().connection
self._debug_logger('Reaping connection %s', id(conn))
self._destroy_connection(conn)
def _put(self, conn): def _put(self, conn):
self.queue.append(_PoolItem( self.queue.append(_PoolItem(
ttl=time.time() + self._unused_timeout, ttl=time.time() + self._unused_timeout,
connection=conn, connection=conn,
)) ))
self._acquired -= 1 self._acquired -= 1
# Drop all expired connections from the right end of the queue self._drop_expired_connections(conn)
now = time.time()
while self.queue and self.queue[0].ttl < now:
conn = self.queue.popleft().connection
self._debug_logger('Reaping connection %s', id(conn))
self._destroy_connection(conn)
class MemcacheClientPool(ConnectionPool): class MemcacheClientPool(ConnectionPool):
@ -173,9 +193,8 @@ class MemcacheClientPool(ConnectionPool):
# If this client found that one of the hosts is dead, mark it as # If this client found that one of the hosts is dead, mark it as
# such in our internal list # such in our internal list
now = time.time() now = time.time()
for i, deaduntil, host in zip(itertools.count(), for i, host in zip(itertools.count(), conn.servers):
self._hosts_deaduntil, deaduntil = self._hosts_deaduntil[i]
conn.servers):
# Do nothing if we already know this host is dead # Do nothing if we already know this host is dead
if deaduntil <= now: if deaduntil <= now:
if host.deaduntil > now: if host.deaduntil > now:

View File

@ -336,27 +336,27 @@ FILE_OPTIONS = {
cfg.ListOpt('memcache_servers', default=['localhost:11211'], cfg.ListOpt('memcache_servers', default=['localhost:11211'],
help='Memcache servers in the format of "host:port".' help='Memcache servers in the format of "host:port".'
' (dogpile.cache.memcache and keystone.cache.memcache_pool' ' (dogpile.cache.memcache and keystone.cache.memcache_pool'
' backends only)'), ' backends only).'),
cfg.IntOpt('memcache_dead_retry', cfg.IntOpt('memcache_dead_retry',
default=5 * 60, default=5 * 60,
help='Number of seconds memcached server is considered dead' help='Number of seconds memcached server is considered dead'
' before it is tried again. (dogpile.cache.memcache and' ' before it is tried again. (dogpile.cache.memcache and'
' keystone.cache.memcache_pool backends only)'), ' keystone.cache.memcache_pool backends only).'),
cfg.IntOpt('memcache_socket_timeout', cfg.IntOpt('memcache_socket_timeout',
default=3, default=3,
help='Timeout in seconds for every call to a server.' help='Timeout in seconds for every call to a server.'
' (dogpile.cache.memcache and keystone.cache.memcache_pool' ' (dogpile.cache.memcache and keystone.cache.memcache_pool'
' backends only)'), ' backends only).'),
cfg.IntOpt('memcache_pool_maxsize', cfg.IntOpt('memcache_pool_maxsize',
default=10, default=10,
help='Max total number of open connections to every' help='Max total number of open connections to every'
' memcached server. (keystone.cache.memcache_pool backend' ' memcached server. (keystone.cache.memcache_pool backend'
' only)'), ' only).'),
cfg.IntOpt('memcache_pool_unused_timeout', cfg.IntOpt('memcache_pool_unused_timeout',
default=60, default=60,
help='Number of seconds a connection to memcached is held' help='Number of seconds a connection to memcached is held'
' unused in the pool before it is closed.' ' unused in the pool before it is closed.'
' (keystone.cache.memcache_pool backend only)'), ' (keystone.cache.memcache_pool backend only).'),
cfg.IntOpt('memcache_pool_connection_get_timeout', cfg.IntOpt('memcache_pool_connection_get_timeout',
default=10, default=10,
help='Number of seconds that an operation will wait to get ' help='Number of seconds that an operation will wait to get '

View File

@ -40,6 +40,7 @@ def set_default_for_default_log_levels():
extra_log_level_defaults = [ extra_log_level_defaults = [
'dogpile=INFO', 'dogpile=INFO',
'routes=INFO', 'routes=INFO',
'keystone.common._memcache_pool=INFO',
] ]
def find_default_log_levels_opt(): def find_default_log_levels_opt():