LBaaS: update status of members according to health statistics

Added members health stats reporting to the haproxy driver.
During pool stats update db plugin checks for members stats
and updates members statuses if any

Fixes bug 1160125

Change-Id: I77bf13615607fcf91bf877c228811ea8008b2457
This commit is contained in:
Oleg Bondarev 2013-08-14 16:11:24 +04:00 committed by Gerrit Code Review
parent 795155c743
commit 957533f685
6 changed files with 139 additions and 54 deletions

View File

@ -32,6 +32,7 @@ from neutron.openstack.common.db import exception
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron.services.loadbalancer import constants as lb_const
LOG = logging.getLogger(__name__)
@ -178,6 +179,7 @@ class LoadBalancerPluginDb(LoadBalancerPluginBase,
status_description=None):
with context.session.begin(subtransactions=True):
v_db = self._get_resource(context, model, id)
if v_db.status != status:
v_db.status = status
# update status_description in two cases:
# - new value is passed
@ -468,11 +470,17 @@ class LoadBalancerPluginDb(LoadBalancerPluginBase,
def update_pool_stats(self, context, pool_id, data=None):
"""Update a pool with new stats structure."""
data = data or {}
with context.session.begin(subtransactions=True):
pool_db = self._get_resource(context, Pool, pool_id)
self.assert_modification_allowed(pool_db)
pool_db.stats = self._create_pool_stats(context, pool_id, data)
for member, stats in data.get('members', {}).items():
stats_status = stats.get(lb_const.STATS_STATUS)
if stats_status:
self.update_status(context, Member, member, stats_status)
def _create_pool_stats(self, context, pool_id, data=None):
# This is internal method to add pool statistics. It won't
# be exposed to API
@ -480,10 +488,10 @@ class LoadBalancerPluginDb(LoadBalancerPluginBase,
data = {}
stats_db = PoolStatistics(
pool_id=pool_id,
bytes_in=data.get("bytes_in", 0),
bytes_out=data.get("bytes_out", 0),
active_connections=data.get("active_connections", 0),
total_connections=data.get("total_connections", 0)
bytes_in=data.get(lb_const.STATS_IN_BYTES, 0),
bytes_out=data.get(lb_const.STATS_OUT_BYTES, 0),
active_connections=data.get(lb_const.STATS_ACTIVE_CONNECTIONS, 0),
total_connections=data.get(lb_const.STATS_TOTAL_CONNECTIONS, 0)
)
return stats_db
@ -555,10 +563,10 @@ class LoadBalancerPluginDb(LoadBalancerPluginBase,
pool = self._get_resource(context, Pool, pool_id)
stats = pool['stats']
res = {'bytes_in': stats['bytes_in'],
'bytes_out': stats['bytes_out'],
'active_connections': stats['active_connections'],
'total_connections': stats['total_connections']}
res = {lb_const.STATS_IN_BYTES: stats['bytes_in'],
lb_const.STATS_OUT_BYTES: stats['bytes_out'],
lb_const.STATS_ACTIVE_CONNECTIONS: stats['active_connections'],
lb_const.STATS_TOTAL_CONNECTIONS: stats['total_connections']}
return {'stats': res}
def create_pool_health_monitor(self, context, health_monitor, pool_id):

View File

@ -32,12 +32,16 @@ SESSION_PERSISTENCE_SOURCE_IP = 'SOURCE_IP'
SESSION_PERSISTENCE_HTTP_COOKIE = 'HTTP_COOKIE'
SESSION_PERSISTENCE_APP_COOKIE = 'APP_COOKIE'
STATS_CURRENT_CONNECTIONS = 'CURRENT_CONNECTIONS'
STATS_MAX_CONNECTIONS = 'MAX_CONNECTIONS'
STATS_CURRENT_SESSIONS = 'CURRENT_SESSIONS'
STATS_MAX_SESSIONS = 'MAX_SESSIONS'
STATS_TOTAL_SESSIONS = 'TOTAL_SESSIONS'
STATS_IN_BYTES = 'IN_BYTES'
STATS_OUT_BYTES = 'OUT_BYTES'
STATS_CONNECTION_ERRORS = 'CONNECTION_ERRORS'
STATS_RESPONSE_ERRORS = 'RESPONSE_ERRORS'
STATS_ACTIVE_CONNECTIONS = 'active_connections'
STATS_MAX_CONNECTIONS = 'max_connections'
STATS_TOTAL_CONNECTIONS = 'total_connections'
STATS_CURRENT_SESSIONS = 'current_sessions'
STATS_MAX_SESSIONS = 'max_sessions'
STATS_TOTAL_SESSIONS = 'total_sessions'
STATS_IN_BYTES = 'bytes_in'
STATS_OUT_BYTES = 'bytes_out'
STATS_CONNECTION_ERRORS = 'connection_errors'
STATS_RESPONSE_ERRORS = 'response_errors'
STATS_STATUS = 'status'
STATS_HEALTH = 'health'
STATS_FAILED_CHECKS = 'failed_checks'

View File

@ -38,7 +38,7 @@ BALANCE_MAP = {
}
STATS_MAP = {
constants.STATS_CURRENT_CONNECTIONS: 'qcur',
constants.STATS_ACTIVE_CONNECTIONS: 'qcur',
constants.STATS_MAX_CONNECTIONS: 'qmax',
constants.STATS_CURRENT_SESSIONS: 'scur',
constants.STATS_MAX_SESSIONS: 'smax',

View File

@ -25,6 +25,8 @@ from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import exceptions
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services.loadbalancer import constants as lb_const
from neutron.services.loadbalancer.drivers.haproxy import cfg as hacfg
LOG = logging.getLogger(__name__)
@ -105,11 +107,48 @@ class HaproxyNSDriver(object):
def get_stats(self, pool_id):
socket_path = self._get_state_file_path(pool_id, 'sock')
TYPE_BACKEND_REQUEST = 2
TYPE_SERVER_REQUEST = 4
if os.path.exists(socket_path):
parsed_stats = self._get_stats_from_socket(
socket_path,
entity_type=TYPE_BACKEND_REQUEST | TYPE_SERVER_REQUEST)
pool_stats = self._get_backend_stats(parsed_stats)
pool_stats['members'] = self._get_servers_stats(parsed_stats)
return pool_stats
else:
LOG.warn(_('Stats socket not found for pool %s') % pool_id)
return {}
def _get_backend_stats(self, parsed_stats):
TYPE_BACKEND_RESPONSE = '1'
for stats in parsed_stats:
if stats['type'] == TYPE_BACKEND_RESPONSE:
unified_stats = dict((k, stats.get(v, ''))
for k, v in hacfg.STATS_MAP.items())
return unified_stats
return {}
def _get_servers_stats(self, parsed_stats):
TYPE_SERVER_RESPONSE = '2'
res = {}
for stats in parsed_stats:
if stats['type'] == TYPE_SERVER_RESPONSE:
res[stats['svname']] = {
lb_const.STATS_STATUS: (constants.INACTIVE
if stats['status'] == 'DOWN'
else constants.ACTIVE),
lb_const.STATS_HEALTH: stats['check_status'],
lb_const.STATS_FAILED_CHECKS: stats['chkfail']
}
return res
def _get_stats_from_socket(self, socket_path, entity_type):
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(socket_path)
s.send('show stat -1 2 -1\n')
s.send('show stat -1 %s -1\n' % entity_type)
raw_stats = ''
chunk_size = 1024
while True:
@ -120,24 +159,20 @@ class HaproxyNSDriver(object):
return self._parse_stats(raw_stats)
except socket.error as e:
LOG.warn(_('Error while connecting to stats socket: %s') % e)
return {}
else:
LOG.warn(_('Stats socket not found for pool %s') % pool_id)
LOG.warn(_('Error while connecting to stats socket: %s'), e)
return {}
def _parse_stats(self, raw_stats):
stat_lines = raw_stats.splitlines()
if len(stat_lines) < 2:
return {}
stat_names = [line.strip('# ') for line in stat_lines[0].split(',')]
stat_values = [line.strip() for line in stat_lines[1].split(',')]
stats = dict(zip(stat_names, stat_values))
unified_stats = {}
for stat in hacfg.STATS_MAP:
unified_stats[stat] = stats.get(hacfg.STATS_MAP[stat], '')
return []
stat_names = [name.strip('# ') for name in stat_lines[0].split(',')]
res_stats = []
for raw_values in stat_lines[1:]:
stat_values = [value.strip() for value in raw_values.split(',')]
res_stats.append(dict(zip(stat_names, stat_values)))
return unified_stats
return res_stats
def remove_orphans(self, known_pool_ids):
raise NotImplementedError()

View File

@ -987,6 +987,23 @@ class TestLoadBalancer(LoadBalancerPluginDbTestCase):
for k, v in stats_data.items():
self.assertEqual(pool_obj.stats.__dict__[k], v)
def test_update_pool_stats_members_statuses(self):
with self.pool() as pool:
pool_id = pool['pool']['id']
with self.member(pool_id=pool_id) as member:
member_id = member['member']['id']
stats_data = {'members': {
member_id: {
'status': 'INACTIVE'
}
}}
ctx = context.get_admin_context()
member = self.plugin.get_member(ctx, member_id)
self.assertEqual('PENDING_CREATE', member['status'])
self.plugin.update_pool_stats(ctx, pool_id, stats_data)
member = self.plugin.get_member(ctx, member_id)
self.assertEqual('INACTIVE', member['status'])
def test_get_pool_stats(self):
keys = [("bytes_in", 0),
("bytes_out", 0),

View File

@ -141,7 +141,15 @@ class TestHaproxyNSDriver(base.BaseTestCase):
'req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,\n'
'8e271901-69ed-403e-a59b-f53cf77ef208,BACKEND,1,2,3,4,0,'
'10,7764,2365,0,0,,0,0,0,0,UP,1,1,0,,0,103780,0,,1,2,0,,0'
',,1,0,,0,,,,0,0,0,0,0,0,,,,,0,0,\n\n')
',,1,0,,0,,,,0,0,0,0,0,0,,,,,0,0,\n'
'a557019b-dc07-4688-9af4-f5cf02bb6d4b,'
'32a6c2a3-420a-44c3-955d-86bd2fc6871e,0,0,0,1,,7,1120,'
'224,,0,,0,0,0,0,UP,1,1,0,0,1,2623,303,,1,2,1,,7,,2,0,,'
'1,L7OK,200,98,0,7,0,0,0,0,0,,,,0,0,\n'
'a557019b-dc07-4688-9af4-f5cf02bb6d4b,'
'd9aea044-8867-4e80-9875-16fb808fa0f9,0,0,0,2,,12,0,0,,'
'0,,0,0,8,4,DOWN,1,1,0,9,2,308,675,,1,2,2,,4,,2,0,,2,'
'L4CON,,2999,0,0,0,0,0,0,0,,,,0,0,\n')
raw_stats_empty = ('# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,'
'bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,'
'status,weight,act,bck,chkfail,chkdown,lastchg,'
@ -161,20 +169,33 @@ class TestHaproxyNSDriver(base.BaseTestCase):
socket.return_value = socket
socket.recv.return_value = raw_stats
exp_stats = {'CONNECTION_ERRORS': '0',
'CURRENT_CONNECTIONS': '1',
'CURRENT_SESSIONS': '3',
'IN_BYTES': '7764',
'MAX_CONNECTIONS': '2',
'MAX_SESSIONS': '4',
'OUT_BYTES': '2365',
'RESPONSE_ERRORS': '0',
'TOTAL_SESSIONS': '10'}
exp_stats = {'connection_errors': '0',
'active_connections': '1',
'current_sessions': '3',
'bytes_in': '7764',
'max_connections': '2',
'max_sessions': '4',
'bytes_out': '2365',
'response_errors': '0',
'total_sessions': '10',
'members': {
'32a6c2a3-420a-44c3-955d-86bd2fc6871e': {
'status': 'ACTIVE',
'health': 'L7OK',
'failed_checks': '0'
},
'd9aea044-8867-4e80-9875-16fb808fa0f9': {
'status': 'INACTIVE',
'health': 'L4CON',
'failed_checks': '9'
}
}
}
stats = self.driver.get_stats('pool_id')
self.assertEqual(exp_stats, stats)
socket.recv.return_value = raw_stats_empty
self.assertEqual({}, self.driver.get_stats('pool_id'))
self.assertEqual({'members': {}}, self.driver.get_stats('pool_id'))
path_exists.return_value = False
socket.reset_mock()