Merge "Fix way of calculate LB status after HM event"

This commit is contained in:
Zuul 2022-05-31 16:28:23 +00:00 committed by Gerrit Code Review
commit 48c55f94ad
4 changed files with 320 additions and 141 deletions

View File

@ -30,6 +30,7 @@ OVN_FIP_EXT_ID_KEY = 'neutron:fip_id'
OVN_FIP_PORT_EXT_ID_KEY = 'neutron:fip_port_id'
OVN_GW_PORT_EXT_ID_KEY = 'neutron:gw_port_id'
OVN_PORT_CIDR_EXT_ID_KEY = 'neutron:cidrs'
OVN_MEMBER_STATUS_KEY = 'neutron:member_status'
LB_EXT_IDS_LS_REFS_KEY = 'ls_refs'
LB_EXT_IDS_LR_REF_KEY = 'lr_ref'

View File

@ -1637,6 +1637,56 @@ class OvnProviderHelper():
return status
def _find_member_status(self, ovn_lb, member_id):
# NOTE (froyo): Search on lb.external_ids under tag
# neutron:member_status, if member not found we will return
# NO_MONITOR
try:
existing_members = ovn_lb.external_ids.get(
ovn_const.OVN_MEMBER_STATUS_KEY)
existing_members = jsonutils.loads(existing_members)
return existing_members[member_id]
except TypeError:
LOG.debug("no member status on external_ids: %s",
str(existing_members))
except KeyError:
LOG.debug("Error member_id %s not found on member_status",
str(member_id))
return constants.NO_MONITOR
def _update_member_status(self, ovn_lb, member, status=None, delete=False):
existing_members = ovn_lb.external_ids.get(
ovn_const.OVN_MEMBER_STATUS_KEY)
try:
existing_members = jsonutils.loads(existing_members)
except TypeError:
LOG.debug("no member status on external_ids: %s",
str(existing_members))
existing_members = {}
if delete:
if member in existing_members:
del existing_members[member]
else:
existing_members[member] = status
try:
if existing_members:
member_status = {
ovn_const.OVN_MEMBER_STATUS_KEY:
jsonutils.dumps(existing_members)}
self.ovn_nbdb_api.db_set(
'Load_Balancer', ovn_lb.uuid,
('external_ids', member_status)).execute()
else:
self.ovn_nbdb_api.db_remove(
'Load_Balancer', ovn_lb.uuid, 'external_ids',
(ovn_const.OVN_MEMBER_STATUS_KEY)).execute()
except Exception:
LOG.exception("Error storing member status on external_ids member:"
" %s delete: %s status: %s", str(member),
str(delete), str(status))
def _add_member(self, member, ovn_lb, pool_key):
external_ids = copy.deepcopy(ovn_lb.external_ids)
existing_members = external_ids[pool_key]
@ -1738,11 +1788,18 @@ class OvnProviderHelper():
constants.PROVISIONING_STATUS: constants.ACTIVE})
status[constants.LISTENERS] = listener_status
operating_status = constants.NO_MONITOR
if new_member and ovn_lb.health_check:
operating_status = constants.ONLINE
if not self._update_hm_members(ovn_lb, pool_key):
operating_status = constants.ERROR
member_status[constants.OPERATING_STATUS] = operating_status
self._update_member_status(
ovn_lb,
member[constants.ID],
operating_status)
return status
def _remove_member(self, member, ovn_lb, pool_key):
@ -1808,6 +1865,9 @@ class OvnProviderHelper():
{constants.ID: ovn_lb.name,
constants.PROVISIONING_STATUS: constants.ACTIVE}]}
self._update_member_status(
ovn_lb, member[constants.ID], None, delete=True)
listener_status = []
for listener in pool_listeners:
listener_status.append(
@ -1855,17 +1915,14 @@ class OvnProviderHelper():
self._update_member(member, ovn_lb, pool_key)
if constants.ADMIN_STATE_UP in member:
if member[constants.ADMIN_STATE_UP]:
old_admin_state_up = member.get('old_admin_state_up')
if old_admin_state_up is None:
exist_member = self._octavia_driver_lib.get_member(
member[constants.ID])
if exist_member:
old_admin_state_up = exist_member.admin_state_up
if old_admin_state_up:
member_status[constants.OPERATING_STATUS] = (
constants.ONLINE)
# if HM exists trust on neutron:member_status
# as the last status valid for the member
if ovn_lb.health_check:
# search status of member_uuid
last_status = self._find_member_status(
ovn_lb, member[constants.ID])
member_status[constants.OPERATING_STATUS] = last_status
else:
# going from down to up should reflect NO_MONITOR state
member_status[constants.OPERATING_STATUS] = (
constants.NO_MONITOR)
else:
@ -1884,6 +1941,12 @@ class OvnProviderHelper():
{constants.ID: ovn_lb.name,
constants.PROVISIONING_STATUS: constants.ACTIVE}]}
if constants.OPERATING_STATUS in member_status:
self._update_member_status(
ovn_lb,
member[constants.ID],
member_status[constants.OPERATING_STATUS])
listener_status = []
for listener in pool_listeners:
listener_status.append(
@ -2306,6 +2369,13 @@ class OvnProviderHelper():
constants.ID: mem_info.split('_')[1],
constants.PROVISIONING_STATUS: constants.ACTIVE,
constants.OPERATING_STATUS: constants.ONLINE})
# NOTE (froyo): to sync local info with the HM initial one for
# previously created members, in case HM detects any change we
# will be event triggered about the change one.
self._update_member_status(
ovn_lb,
mem_info.split('_')[1],
constants.ONLINE)
status[constants.MEMBERS] = member_status
# MONITOR_PRT = 80
@ -2479,86 +2549,122 @@ class OvnProviderHelper():
self.add_request({'type': ovn_const.REQ_TYPE_HM_UPDATE_EVENT,
'info': request_info})
def _get_new_operating_statuses(self, ovn_lb, pool_id, member_id,
member_status):
# When a member's operating status changes, we have to determine
# the correct operating_status to report back to Octavia.
# For example:
#
# LB with Pool and 2 members
#
# member-1 goes offline
# member-1 operating_status is ERROR
# if Pool operating_status is ONLINE
# Pool operating_status is DEGRADED
# if LB operating_status is ONLINE
# LB operating_status is DEGRADED
#
# member-2 then goes offline
# member-2 operating_status is ERROR
# Pool operating_status is ERROR
# LB operating_status is ERROR
#
# The opposite would also have to happen.
#
# If there is only one member, the Pool and LB will reflect
# the same status
operating_status = member_status
def _get_current_operating_statuses(self, ovn_lb):
# NOTE (froyo) We would base all logic in the external_ids field
# 'neutron:member_status' that should include all LB member status
# in order to calculate the global LB status (listeners, pools, members
# included)
status = {
constants.LOADBALANCERS: [],
constants.LISTENERS: [],
constants.POOLS: [],
constants.MEMBERS: []
}
# Assume the best
pool_status = constants.ONLINE
lb_status = constants.ONLINE
listeners = {}
pools = {}
member_statuses = ovn_lb.external_ids.get(
ovn_const.OVN_MEMBER_STATUS_KEY)
pool = self._octavia_driver_lib.get_pool(pool_id)
if pool:
pool_status = pool.operating_status
lb = self._octavia_driver_lib.get_loadbalancer(ovn_lb.name)
if lb:
lb_status = lb.operating_status
try:
member_statuses = jsonutils.loads(member_statuses)
except TypeError:
LOG.debug("no member status on external_ids: %s",
str(member_statuses))
member_statuses = {}
for k, v in ovn_lb.external_ids.items():
if ovn_const.LB_EXT_IDS_POOL_PREFIX not in k:
if ovn_const.LB_EXT_IDS_LISTENER_PREFIX in k:
listeners[k.split('_')[1]] = [
x.split('_')[1] for x in v.split(',')]
continue
lb_pool_id = k.split('_')[1]
if lb_pool_id != pool_id:
if ovn_const.LB_EXT_IDS_POOL_PREFIX in k:
pools[k.split('_')[1]] = [
x.split('_')[1] for x in v.split(',')]
continue
existing_members = v.split(",")
for mem in existing_members:
# Ignore the passed member ID, we already know it's status
mem_id = mem.split('_')[1]
if mem_id != member_id:
member = self._octavia_driver_lib.get_member(mem_id)
# If the statuses are different it is degraded
if member and member.operating_status != member_status:
operating_status = constants.DEGRADED
break
# operating_status will either be ONLINE, ERROR or DEGRADED
if operating_status == constants.ONLINE:
if pool_status != constants.ONLINE:
pool_status = constants.ONLINE
if lb_status != constants.ONLINE:
lb_status = constants.ONLINE
elif operating_status == constants.ERROR:
if pool_status == constants.ONLINE:
pool_status = constants.ERROR
if lb_status == constants.ONLINE:
lb_status = constants.ERROR
else:
if pool_status == constants.ONLINE:
pool_status = constants.DEGRADED
if lb_status == constants.ONLINE:
lb_status = constants.DEGRADED
for member_id, member_status in member_statuses.items():
status[constants.MEMBERS].append({
constants.ID: member_id,
constants.PROVISIONING_STATUS: constants.ACTIVE,
constants.OPERATING_STATUS: member_status})
return lb_status, pool_status
# get pool statuses
for pool_id, members in pools.items():
for i, member in enumerate(members):
if member in member_statuses:
members[i] = member_statuses[member]
else:
# if we don't have local info we assume best option
members[i] = constants.ONLINE
_pool = self._octavia_driver_lib.get_pool(pool_id)
if not _pool.admin_state_up:
pools[pool_id] = constants.OFFLINE
elif all(constants.ERROR == member_status
for member_status in pools[pool_id]):
pools[pool_id] = constants.ERROR
elif any(constants.ERROR == member_status
for member_status in pools[pool_id]):
pools[pool_id] = constants.DEGRADED
else:
pools[pool_id] = constants.ONLINE
status[constants.POOLS].append(
{constants.ID: pool_id,
constants.PROVISIONING_STATUS: constants.ACTIVE,
constants.OPERATING_STATUS: pools[pool_id]})
# get listener statuses
for listener_id, listener_pools in listeners.items():
for i, pool in enumerate(listener_pools):
if pool in pools:
listener_pools[i] = pools[pool]
else:
# if we don't have local info we assume best option
listener_pools[i] = constants.ONLINE
_listener = self._octavia_driver_lib.get_listener(listener_id)
if not _listener.admin_state_up:
listeners[listener_id] = constants.OFFLINE
elif any(constants.ERROR == pool_status
for pool_status in listeners[listener_id]):
listeners[listener_id] = constants.ERROR
elif any(constants.DEGRADED == pool_status
for pool_status in listeners[listener_id]):
listeners[listener_id] = constants.DEGRADED
else:
listeners[listener_id] = constants.ONLINE
status[constants.LISTENERS].append(
{constants.ID: listener_id,
constants.PROVISIONING_STATUS: constants.ACTIVE,
constants.OPERATING_STATUS: listeners[listener_id]})
# get LB status
lb_status = constants.ONLINE
_lb = self._octavia_driver_lib.get_loadbalancer(ovn_lb.name)
if not _lb.admin_state_up:
lb_status = constants.OFFLINE
elif any(constants.ERROR == status
for status in listeners.values()):
lb_status = constants.ERROR
elif any(constants.DEGRADED == status
for status in listeners.values()):
lb_status = constants.DEGRADED
status[constants.LOADBALANCERS].append({
constants.ID: ovn_lb.name,
constants.PROVISIONING_STATUS: constants.ACTIVE,
constants.OPERATING_STATUS: lb_status})
return status
def hm_update_event(self, info):
ovn_lb = info['ovn_lb']
# Lookup pool and member
pool_id = None
# Lookup member
member_id = None
for k, v in ovn_lb.external_ids.items():
if ovn_const.LB_EXT_IDS_POOL_PREFIX not in k:
continue
@ -2568,8 +2674,9 @@ class OvnProviderHelper():
if info['port'] != member_port:
continue
# match
pool_id = k.split('_')[1]
member_id = v.split('_')[1]
member_id = [mb.split('_')[1] for mb in v.split(',')
if member_ip in mb and member_port in mb][0]
break
# found it in inner loop
@ -2583,20 +2690,7 @@ class OvnProviderHelper():
member_status = constants.ONLINE
if info['status'] == ['offline']:
member_status = constants.ERROR
lb_status, pool_status = self._get_new_operating_statuses(
ovn_lb, pool_id, member_id, member_status)
status = {
constants.POOLS: [
{constants.ID: pool_id,
constants.PROVISIONING_STATUS: constants.ACTIVE,
constants.OPERATING_STATUS: pool_status}],
constants.MEMBERS: [
{constants.ID: member_id,
constants.PROVISIONING_STATUS: constants.ACTIVE,
constants.OPERATING_STATUS: member_status}],
constants.LOADBALANCERS: [
{constants.ID: ovn_lb.name,
constants.PROVISIONING_STATUS: constants.ACTIVE,
constants.OPERATING_STATUS: lb_status}]}
self._update_member_status(ovn_lb, member_id, member_status)
status = self._get_current_operating_statuses(ovn_lb)
return status

View File

@ -180,6 +180,11 @@ class TestOvnOctaviaBase(base.TestOVNFunctionalBase,
external_ids[
ovn_const.LB_EXT_IDS_LS_REFS_KEY] = jsonutils.loads(
ls_refs)
member_status = external_ids.get(ovn_const.OVN_MEMBER_STATUS_KEY)
if member_status:
external_ids[
ovn_const.OVN_MEMBER_STATUS_KEY] = jsonutils.loads(
member_status)
lb_dict = {'name': lb.name, 'protocol': lb.protocol,
'vips': lb.vips, 'external_ids': external_ids}
try:
@ -574,6 +579,7 @@ class TestOvnOctaviaBase(base.TestOVNFunctionalBase,
pool_info = {}
for p in lb_data.get('pools', []):
member_status = {}
external_ids = _get_lb_field_by_protocol(
p.protocol.lower(),
field='external_ids')
@ -600,11 +606,14 @@ class TestOvnOctaviaBase(base.TestOVNFunctionalBase,
'neutron-%s' % port['network_id'], 0)
ex['neutron-%s' % port['network_id']] = act + 1
break
member_status[m.member_id] = o_constants.NO_MONITOR
pool_key = 'pool_' + p.pool_id
if not p.admin_state_up:
pool_key += ':D'
external_ids[pool_key] = p_members
pool_info[p.pool_id] = p_members
if member_status:
external_ids[ovn_const.OVN_MEMBER_STATUS_KEY] = member_status
for listener in lb_data['listeners']:
expected_vips = _get_lb_field_by_protocol(
@ -833,7 +842,8 @@ class TestOvnOctaviaBase(base.TestOVNFunctionalBase,
'listeners': []
}
if getattr(member, 'admin_state_up', None):
expected_status['members'][0]['operating_status'] = "ONLINE"
expected_status['members'][0][
'operating_status'] = o_constants.NO_MONITOR
else:
expected_status['members'][0]['operating_status'] = "OFFLINE"
self._wait_for_status_and_validate(lb_data, [expected_status])

View File

@ -19,6 +19,7 @@ from neutronclient.common import exceptions as n_exc
from octavia_lib.api.drivers import data_models
from octavia_lib.api.drivers import exceptions
from octavia_lib.common import constants
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from ovsdbapp.backend.ovs_idl import idlutils
@ -114,14 +115,17 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: 'foo_port',
'enabled': True,
'pool_%s' % self.pool_id: self.member_line,
'listener_%s' % self.listener_id: '80:pool_%s' % self.pool_id}
'listener_%s' % self.listener_id: '80:pool_%s' % self.pool_id,
ovn_const.OVN_MEMBER_STATUS_KEY: '{"%s": "%s"}'
% (self.member_id, constants.NO_MONITOR)}
self.ovn_hm_lb.external_ids = {
ovn_const.LB_EXT_IDS_VIP_KEY: '10.22.33.99',
ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '123.123.123.99',
ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: 'foo_hm_port',
'enabled': True,
'pool_%s' % self.pool_id: [],
'listener_%s' % self.listener_id: '80:pool_%s' % self.pool_id}
'listener_%s' % self.listener_id: '80:pool_%s' % self.pool_id,
ovn_const.OVN_MEMBER_STATUS_KEY: '{}'}
self.helper.ovn_nbdb_api.db_find.return_value.\
execute.return_value = [self.ovn_lb]
self.helper.ovn_nbdb_api.db_list_rows.return_value.\
@ -216,6 +220,42 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
(self.helper.ovn_nbdb_api.ls_get.return_value.
execute.return_value) = self.network
def test__update_member_status(self):
self.helper._update_member_status(
self.ovn_lb, self.member_id, constants.NO_MONITOR)
member_status = {
ovn_const.OVN_MEMBER_STATUS_KEY: '{"%s": "%s"}'
% (self.member_id, constants.NO_MONITOR)}
self.helper.ovn_nbdb_api.db_set.assert_called_once_with(
'Load_Balancer', self.ovn_lb.uuid, ('external_ids', member_status))
def test__update_member_status_delete(self):
self.helper._update_member_status(
self.ovn_lb, self.member_id, None, True)
self.helper.ovn_nbdb_api.db_remove.assert_called_once_with(
'Load_Balancer', self.ovn_lb.uuid, 'external_ids',
ovn_const.OVN_MEMBER_STATUS_KEY)
def test__update_member_status_delete_not_found(self):
self.helper._update_member_status(
self.ovn_lb, 'fool', None, True)
member_status = {
ovn_const.OVN_MEMBER_STATUS_KEY: '{"%s": "%s"}'
% (self.member_id, constants.NO_MONITOR)}
self.helper.ovn_nbdb_api.db_set.assert_called_once_with(
'Load_Balancer', self.ovn_lb.uuid, ('external_ids', member_status))
def test__find_member_status(self):
status = self.helper._find_member_status(self.ovn_lb, self.member_id)
self.assertEqual(status, constants.NO_MONITOR)
status = self.helper._find_member_status(
self.ovn_hm_lb, self.member_id)
self.assertEqual(status, constants.NO_MONITOR)
def test__find_member_status_exception(self):
status = self.helper._find_member_status(self.ovn_hm_lb, 'foo')
self.assertEqual(status, constants.NO_MONITOR)
@mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.'
'_find_ovn_lbs')
def test__clean_lb_if_empty(self, lb):
@ -1373,7 +1413,9 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '123.123.123.123',
ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: 'foo_port',
'enabled': True,
'listener_%s' % self.listener_id: '80:'}))]
'listener_%s' % self.listener_id: '80:',
ovn_const.OVN_MEMBER_STATUS_KEY: '{"%s": "%s"}'
% (self.member_id, constants.NO_MONITOR)}))]
self.assertEqual(self.helper.ovn_nbdb_api.db_set.call_count,
len(expected_calls))
self.helper.ovn_nbdb_api.db_set.assert_has_calls(
@ -1409,7 +1451,9 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
'listener_%s' % self.listener_id: '80:',
ovn_const.LB_EXT_IDS_VIP_KEY: '10.22.33.4',
ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '123.123.123.123',
ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: 'foo_port'}))
ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: 'foo_port',
ovn_const.OVN_MEMBER_STATUS_KEY: '{"%s": "%s"}'
% (self.member_id, constants.NO_MONITOR)}))
def test_pool_delete_pool_disabled(self):
disabled_p_key = self.helper._get_pool_key(self.pool_id,
@ -1571,7 +1615,13 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
def test_member_create_already_exists(self):
self.helper.member_create(self.member)
self.helper.ovn_nbdb_api.db_set.assert_not_called()
member_status = {
ovn_const.OVN_MEMBER_STATUS_KEY: '{"%s": "%s"}'
% (self.member_id, constants.NO_MONITOR)}
self.helper.ovn_nbdb_api.db_set.assert_called_once_with(
'Load_Balancer',
self.ovn_lb.uuid,
('external_ids', member_status))
def test_member_create_first_member_in_pool(self):
self.ovn_lb.external_ids.update({
@ -1629,7 +1679,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
self.assertEqual(status['members'][0]['provisioning_status'],
constants.ACTIVE)
self.assertEqual(status['members'][0]['operating_status'],
constants.ONLINE)
constants.NO_MONITOR)
self.member['admin_state_up'] = False
status = self.helper.member_update(self.member)
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
@ -1674,7 +1724,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
self.assertEqual(status['members'][0]['provisioning_status'],
constants.ACTIVE)
self.assertEqual(status['members'][0]['operating_status'],
constants.ONLINE)
constants.NO_MONITOR)
def test_member_update_disabled_lb(self):
self.helper._find_ovn_lb_with_pool_key.side_effect = [
@ -3715,18 +3765,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
def test_hm_update_event_member_port_not_found(self):
self._test_hm_update_no_member(False, True)
def _test_hm_update_status(self, ip, port, member_status,
lb_status=constants.ONLINE,
pool_status=constants.ONLINE):
fake_lb = fakes.FakeLB(
uuid=uuidutils.generate_uuid(),
admin_state_up=True,
name='fake_lb',
ext_ids={})
fake_pool = fakes.FakePool(
uuid=uuidutils.generate_uuid(),
admin_state_up=True,
name='fake_pool')
def _test_hm_update_status(self, member_id, ip, port, member_status):
info = {
'ovn_lb': self.ovn_hm_lb,
'ip': ip,
@ -3735,14 +3774,26 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
'port': port,
'protocol': self.ovn_hm_lb.protocol,
'status': [member_status]}
fake_lb.operating_status = lb_status
fake_pool.operating_status = pool_status
self.octavia_driver_lib.get_pool.return_value = fake_pool
self.octavia_driver_lib.get_loadbalancer.return_value = fake_lb
self._update_member_status(self.ovn_hm_lb, member_id, member_status)
status = self.helper.hm_update_event(info)
return status
def _update_member_status(self, lb, member_id, member_status):
status = constants.ONLINE
if member_status == 'offline':
status = constants.ERROR
try:
existing_member_status = lb.external_ids[
ovn_const.OVN_MEMBER_STATUS_KEY]
member_statuses = jsonutils.loads(existing_member_status)
except Exception:
member_statuses = {}
member_statuses[member_id] = status
lb.external_ids[
ovn_const.OVN_MEMBER_STATUS_KEY] = jsonutils.dumps(
member_statuses)
def _add_member(self, subnet, port):
fake_port = fakes.FakePort.create_one_port(
attrs={'allowed_address_pairs': ''})
@ -3761,18 +3812,34 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
pool_key = 'pool_%s' % self.pool_id
existing_members = self.ovn_hm_lb.external_ids[pool_key]
existing_member_status = self.ovn_hm_lb.external_ids[
ovn_const.OVN_MEMBER_STATUS_KEY]
try:
member_statuses = jsonutils.loads(existing_member_status)
except Exception:
member_statuses = {}
if existing_members:
existing_members = ','.join([existing_members, member_line])
self.ovn_hm_lb.external_ids[pool_key] = existing_members
member_statuses[member['id']] = constants.ONLINE
self.ovn_hm_lb.external_ids[
ovn_const.OVN_MEMBER_STATUS_KEY] = jsonutils.dumps(
member_statuses)
else:
self.ovn_hm_lb.external_ids[pool_key] = member_line
member_status = '{"%s": "%s"}' % (member['id'],
constants.ONLINE)
self.ovn_hm_lb.external_ids[
ovn_const.OVN_MEMBER_STATUS_KEY] = member_status
return member
def test_hm_update_status_offline(self):
fake_subnet = fakes.FakeSubnet.create_one_subnet()
member = self._add_member(fake_subnet, 8080)
status = self._test_hm_update_status(member['address'], '8080',
'offline')
status = self._test_hm_update_status(
member['id'], member['address'], '8080', 'offline')
self.assertEqual(status['pools'][0]['provisioning_status'],
constants.ACTIVE)
@ -3790,15 +3857,13 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
def test_hm_update_status_offline_lb_pool_offline(self):
fake_subnet = fakes.FakeSubnet.create_one_subnet()
member = self._add_member(fake_subnet, 8080)
status = self._test_hm_update_status(member['address'], '8080',
'offline',
lb_status=constants.OFFLINE,
pool_status=constants.OFFLINE)
status = self._test_hm_update_status(
member['id'], member['address'], '8080', 'offline')
self.assertEqual(status['pools'][0]['provisioning_status'],
constants.ACTIVE)
self.assertEqual(status['pools'][0]['operating_status'],
constants.OFFLINE)
constants.ERROR)
self.assertEqual(status['members'][0]['provisioning_status'],
constants.ACTIVE)
self.assertEqual(status['members'][0]['operating_status'],
@ -3806,13 +3871,13 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
constants.ACTIVE)
self.assertEqual(status['loadbalancers'][0]['operating_status'],
constants.OFFLINE)
constants.ERROR)
def test_hm_update_status_online(self):
fake_subnet = fakes.FakeSubnet.create_one_subnet()
member = self._add_member(fake_subnet, 8080)
status = self._test_hm_update_status(member['address'], '8080',
'online')
status = self._test_hm_update_status(
member['id'], member['address'], '8080', 'online')
self.assertEqual(status['pools'][0]['provisioning_status'],
constants.ACTIVE)
@ -3830,10 +3895,8 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
def test_hm_update_status_online_lb_pool_offline(self):
fake_subnet = fakes.FakeSubnet.create_one_subnet()
member = self._add_member(fake_subnet, 8080)
status = self._test_hm_update_status(member['address'], '8080',
'online',
lb_status=constants.OFFLINE,
pool_status=constants.OFFLINE)
status = self._test_hm_update_status(
member['id'], member['address'], '8080', 'online')
self.assertEqual(status['pools'][0]['provisioning_status'],
constants.ACTIVE)
@ -3868,7 +3931,9 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
fake_member.operating_status = constants.ONLINE
self.octavia_driver_lib.get_member.return_value = fake_member
status = self._test_hm_update_status(ip_1, '8081', 'offline')
status = self._test_hm_update_status(
member_1['id'], ip_1, '8080', 'offline')
self.assertEqual(status['members'][0]['operating_status'],
constants.ERROR)
self.assertEqual(status['pools'][0]['operating_status'],
@ -3880,7 +3945,8 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
# for Pool and Loadbalancer
fake_member.operating_status = constants.ERROR
self.octavia_driver_lib.get_member.return_value = fake_member
status = self._test_hm_update_status(ip_1, '8081', 'offline')
status = self._test_hm_update_status(
member_2['id'], ip_2, '8081', 'offline')
self.assertEqual(status['members'][0]['operating_status'],
constants.ERROR)
self.assertEqual(status['pools'][0]['operating_status'],
@ -3891,7 +3957,6 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
def test_hm_update_status_online_two_members(self):
fake_subnet = fakes.FakeSubnet.create_one_subnet()
member_1 = self._add_member(fake_subnet, 8080)
ip_1 = member_1['address']
member_2 = self._add_member(fake_subnet, 8081)
ip_2 = member_2['address']
# This is the Octavia API version
@ -3905,10 +3970,18 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
# Second member ERROR, operating_status should be DEGRADED
# for Pool and Loadbalancer
fake_member.operating_status = constants.ERROR
self.octavia_driver_lib.get_member.return_value = fake_member
status = self._test_hm_update_status(ip_1, '8081', 'online')
status = self._test_hm_update_status(
member_2['id'], ip_2, '8081', 'offline')
member_status = {
ovn_const.OVN_MEMBER_STATUS_KEY: '{"%s": "%s", "%s": "%s"}'
% (member_1['id'],
constants.ONLINE,
member_2['id'],
constants.ERROR,)}
self.helper.ovn_nbdb_api.db_set.assert_called_once_with(
'Load_Balancer',
self.ovn_hm_lb.uuid,
('external_ids', member_status))
self.assertEqual(status['members'][0]['operating_status'],
constants.ONLINE)
self.assertEqual(status['pools'][0]['operating_status'],
@ -3920,7 +3993,8 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
# for Pool and Loadbalancer
fake_member.operating_status = constants.ONLINE
self.octavia_driver_lib.get_member.return_value = fake_member
status = self._test_hm_update_status(ip_1, '8081', 'online')
status = self._test_hm_update_status(
member_2['id'], ip_2, '8081', 'online')
self.assertEqual(status['members'][0]['operating_status'],
constants.ONLINE)
self.assertEqual(status['pools'][0]['operating_status'],