Merge "Cover the use case of a member non existing" into stable/xena

This commit is contained in:
Zuul 2023-09-26 17:27:00 +00:00 committed by Gerrit Code Review
commit 2db264fc58
3 changed files with 165 additions and 218 deletions

View File

@ -1926,41 +1926,14 @@ class OvnProviderHelper():
def member_create(self, member):
new_member = None
pool_listeners = []
try:
pool_key, ovn_lb = self._find_ovn_lb_by_pool_id(
member[constants.POOL_ID])
pool_listeners = self._get_pool_listeners(ovn_lb, pool_key)
new_member = self._add_member(member, ovn_lb, pool_key)
pool = {constants.ID: member[constants.POOL_ID],
constants.PROVISIONING_STATUS: constants.ACTIVE,
constants.OPERATING_STATUS: constants.ONLINE}
status = {
constants.POOLS: [pool],
constants.LOADBALANCERS: [
{constants.ID: ovn_lb.name,
constants.PROVISIONING_STATUS: constants.ACTIVE}]}
operating_status = constants.NO_MONITOR
except Exception:
LOG.exception(ovn_const.EXCEPTION_MSG, "creation of member")
status = {
constants.POOLS: [
{constants.ID: member[constants.POOL_ID],
constants.PROVISIONING_STATUS: constants.ERROR}],
constants.MEMBERS: [
{constants.ID: member[constants.ID],
constants.PROVISIONING_STATUS: constants.ACTIVE}],
constants.LOADBALANCERS: [
{constants.ID: ovn_lb.name,
constants.PROVISIONING_STATUS: constants.ACTIVE}]}
listener_status = []
for listener in pool_listeners:
listener_status.append(
{constants.ID: listener,
constants.PROVISIONING_STATUS: constants.ACTIVE})
status[constants.LISTENERS] = listener_status
operating_status = constants.NO_MONITOR
operating_status = constants.ERROR
if not member[constants.ADMIN_STATE_UP]:
operating_status = constants.OFFLINE
elif (new_member and operating_status == constants.NO_MONITOR and
@ -1968,18 +1941,18 @@ class OvnProviderHelper():
operating_status = constants.ONLINE
mb_ip, mb_port, mb_subnet, mb_id = self._extract_member_info(
new_member)[0]
if not self._update_hm_member(ovn_lb, pool_key, mb_ip):
operating_status = constants.ERROR
member_status = {constants.ID: member[constants.ID],
constants.PROVISIONING_STATUS: constants.ACTIVE,
constants.OPERATING_STATUS: operating_status}
status[constants.MEMBERS] = [member_status]
mb_status = self._update_hm_member(ovn_lb, pool_key, mb_ip)
operating_status = (
constants.ERROR
if mb_status != constants.ONLINE else mb_status
)
self._update_external_ids_member_status(
ovn_lb,
member[constants.ID],
operating_status)
status = self._get_current_operating_statuses(ovn_lb)
return status
def _remove_member(self, member, ovn_lb, pool_key):
@ -2020,71 +1993,39 @@ class OvnProviderHelper():
operator_fault_string=msg)
def member_delete(self, member):
pool_listeners = []
error_deleting_member = False
try:
pool_key, ovn_lb = self._find_ovn_lb_by_pool_id(
member[constants.POOL_ID])
pool_listeners = self._get_pool_listeners(ovn_lb, pool_key)
pool_status = self._remove_member(member, ovn_lb, pool_key)
pool = {constants.ID: member[constants.POOL_ID],
constants.PROVISIONING_STATUS: constants.ACTIVE,
constants.OPERATING_STATUS: pool_status}
if ovn_lb.health_check and pool_status == constants.OFFLINE:
# NOTE(froyo): if the pool status is OFFLINE there are no more
# members. So we should ensure the hm-port is deleted if no
# more LB are using it. We need to do this call after the
# cleaning of the ip_port_mappings for the ovn LB.
self._clean_up_hm_port(member[constants.SUBNET_ID])
status = {
constants.POOLS: [pool],
constants.MEMBERS: [
{constants.ID: member[constants.ID],
constants.PROVISIONING_STATUS: constants.DELETED}],
constants.LOADBALANCERS: [
{constants.ID: ovn_lb.name,
constants.PROVISIONING_STATUS: constants.ACTIVE}]}
except Exception:
LOG.exception(ovn_const.EXCEPTION_MSG, "deletion of member")
status = {
constants.POOLS: [
{constants.ID: member[constants.POOL_ID],
constants.PROVISIONING_STATUS: constants.ACTIVE}],
constants.MEMBERS: [
{constants.ID: member[constants.ID],
constants.PROVISIONING_STATUS: constants.ERROR}],
constants.LOADBALANCERS: [
{constants.ID: ovn_lb.name,
constants.PROVISIONING_STATUS: constants.ACTIVE}]}
error_deleting_member = True
self._update_external_ids_member_status(
ovn_lb, member[constants.ID], None, delete=True)
listener_status = []
for listener in pool_listeners:
listener_status.append(
{constants.ID: listener,
constants.PROVISIONING_STATUS: constants.ACTIVE})
status[constants.LISTENERS] = listener_status
status = self._get_current_operating_statuses(ovn_lb)
status[constants.MEMBERS] = [
{constants.ID: member[constants.ID],
constants.PROVISIONING_STATUS: constants.DELETED}]
if error_deleting_member:
status[constants.MEMBERS][0][constants.PROVISIONING_STATUS] = (
constants.ERROR)
return status
def member_update(self, member):
pool_listeners = []
try:
error_updating_member = False
pool_key, ovn_lb = self._find_ovn_lb_by_pool_id(
member[constants.POOL_ID])
member_status = {constants.ID: member[constants.ID],
constants.PROVISIONING_STATUS: constants.ACTIVE}
status = {
constants.POOLS: [
{constants.ID: member[constants.POOL_ID],
constants.PROVISIONING_STATUS: constants.ACTIVE}],
constants.MEMBERS: [member_status],
constants.LOADBALANCERS: [
{constants.ID: ovn_lb.name,
constants.PROVISIONING_STATUS: constants.ACTIVE}]}
pool_listeners = self._get_pool_listeners(ovn_lb, pool_key)
member_operating_status = constants.NO_MONITOR
last_status = self._find_member_status(
ovn_lb, member[constants.ID])
if constants.ADMIN_STATE_UP in member:
@ -2093,30 +2034,25 @@ class OvnProviderHelper():
# as the last status valid for the member
if ovn_lb.health_check:
# search status of member_uuid
member_status[constants.OPERATING_STATUS] = last_status
member_operating_status = last_status
else:
member_status[constants.OPERATING_STATUS] = (
constants.NO_MONITOR)
member_operating_status = constants.NO_MONITOR
else:
member_status[constants.OPERATING_STATUS] = (
constants.OFFLINE)
member_operating_status = constants.OFFLINE
if constants.OPERATING_STATUS in member_status:
self._update_external_ids_member_status(
ovn_lb,
member[constants.ID],
member_status[constants.OPERATING_STATUS])
self._update_external_ids_member_status(
ovn_lb,
member[constants.ID],
member_operating_status)
# NOTE(froyo): If we are toggling from/to OFFLINE due to an
# admin_state_up change, in that case we should update vips
if (
last_status != constants.OFFLINE and
member_status[constants.OPERATING_STATUS] ==
constants.OFFLINE
member_operating_status == constants.OFFLINE
) or (
last_status == constants.OFFLINE and
member_status[constants.OPERATING_STATUS] !=
constants.OFFLINE
member_operating_status != constants.OFFLINE
):
commands = []
commands.extend(self._refresh_lb_vips(ovn_lb,
@ -2125,23 +2061,16 @@ class OvnProviderHelper():
except Exception:
LOG.exception(ovn_const.EXCEPTION_MSG, "update of member")
status = {
constants.POOLS: [
{constants.ID: member[constants.POOL_ID],
constants.PROVISIONING_STATUS: constants.ACTIVE}],
constants.MEMBERS: [
{constants.ID: member[constants.ID],
constants.PROVISIONING_STATUS: constants.ERROR}],
constants.LOADBALANCERS: [
{constants.ID: ovn_lb.name,
constants.PROVISIONING_STATUS: constants.ACTIVE}]}
error_updating_member = True
listener_status = []
for listener in pool_listeners:
listener_status.append(
{constants.ID: listener,
constants.PROVISIONING_STATUS: constants.ACTIVE})
status[constants.LISTENERS] = listener_status
status = self._get_current_operating_statuses(ovn_lb)
status[constants.MEMBERS] = [
{constants.ID: member[constants.ID],
constants.PROVISIONING_STATUS: constants.ACTIVE,
constants.OPERATING_STATUS: member_operating_status}]
if error_updating_member:
status[constants.MEMBERS][0][constants.PROVISIONING_STATUS] = (
constants.ERROR)
return status
def _get_existing_pool_members(self, pool_id):
@ -2574,18 +2503,17 @@ class OvnProviderHelper():
# Update just the backend_ip member
for mb_ip, mb_port, mb_subnet, mb_id in self._extract_member_info(
ovn_lb.external_ids[pool_key]):
member_lsp = self._get_member_lsp(mb_ip, mb_subnet)
if mb_ip == backend_ip:
member_lsp = self._get_member_lsp(mb_ip, mb_subnet)
if not member_lsp:
# NOTE(froyo): In order to continue evaluating the rest of
# the members, we just warn about the member issue,
# assuming that it will be in OFFLINE status as soon as the
# HM does the first evaluation.
# No port found for the member backend IP, we can determine
# that the port doesn't exists or a typo on creation of the
# member, anyway put the member inmediatelly as ERROR
LOG.error("Member %(member)s Logical_Switch_Port not "
"found, when creating a Health Monitor for "
"pool %(pool)s.",
{'member': mb_ip, 'pool': pool_key})
break
return constants.ERROR
network_id = member_lsp.external_ids.get(
ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY).split('neutron-')[1]
@ -2598,7 +2526,7 @@ class OvnProviderHelper():
"health monitoring. Cannot find a Health "
"Monitor for pool %(pool)s.",
{'network': network_id, 'pool': pool_key})
return False
return None
hm_source_ip = None
for fixed_ip in hm_port['fixed_ips']:
if fixed_ip['subnet_id'] == mb_subnet:
@ -2611,15 +2539,14 @@ class OvnProviderHelper():
{'subnet': mb_subnet,
'member': mb_ip,
'pool': pool_key})
return False
return None
self._update_ip_port_mappings(ovn_lb, backend_ip,
member_lsp.name, hm_source_ip,
delete)
return True
return constants.ONLINE
# NOTE(froyo): If the backend is not located or just one member but not
# found the lsp
return True
# NOTE(froyo): If the backend is not located
return constants.ERROR
def _lookup_lbhcs_by_hm_id(self, hm_id):
lbhc_rows = self.ovn_nbdb_api.db_list_rows(
@ -2707,11 +2634,16 @@ class OvnProviderHelper():
if hm_status[constants.PROVISIONING_STATUS] == constants.ACTIVE:
for mb_ip, mb_port, mb_subnet, mb_id in self._extract_member_info(
ovn_lb.external_ids[pool_key]):
if not self._update_hm_member(ovn_lb, pool_key, mb_ip):
mb_status = self._update_hm_member(ovn_lb, pool_key, mb_ip)
if not mb_status:
hm_status[constants.PROVISIONING_STATUS] = constants.ERROR
hm_status[constants.OPERATING_STATUS] = constants.ERROR
self._clean_ip_port_mappings(ovn_lb, pool_key)
break
self._update_external_ids_member_status(
ovn_lb, mb_id, mb_status)
else:
status = self._get_current_operating_statuses(ovn_lb)
status[constants.HEALTHMONITORS] = [hm_status]
return status
@ -2958,11 +2890,12 @@ class OvnProviderHelper():
for k, v in ovn_lb.external_ids.items():
if ovn_const.LB_EXT_IDS_LISTENER_PREFIX in k:
listeners[k.split('_')[1]] = [
x.split('_')[1] for x in v.split(',')]
x.split('_')[1] for x in v.split(',')
if ovn_const.LB_EXT_IDS_POOL_PREFIX in x]
continue
if ovn_const.LB_EXT_IDS_POOL_PREFIX in k:
pools[k.split('_')[1]] = [
x.split('_')[1] for x in v.split(',')]
x.split('_')[1] for x in v.split(',') if x]
continue
for member_id, member_status in member_statuses.items():
@ -2981,13 +2914,13 @@ class OvnProviderHelper():
members[i] = constants.ONLINE
_pool = self._octavia_driver_lib.get_pool(pool_id)
if not _pool.admin_state_up:
if not _pool.admin_state_up or not member_statuses:
pools[pool_id] = constants.OFFLINE
elif all(constants.ERROR == member_status
for member_status in pools[pool_id]):
elif pools[pool_id] and all(constants.ERROR == member_status
for member_status in pools[pool_id]):
pools[pool_id] = constants.ERROR
elif any(constants.ERROR == member_status
for member_status in pools[pool_id]):
elif pools[pool_id] and any(constants.ERROR == member_status
for member_status in pools[pool_id]):
pools[pool_id] = constants.DEGRADED
else:
pools[pool_id] = constants.ONLINE

View File

@ -336,11 +336,12 @@ class TestOvnOctaviaBase(base.TestOVNFunctionalBase,
# Withing this status update check if all values of
# expected keys match.
for k, v in expected_status.items():
val_check.append(
sorted(expected_status[k],
key=lambda x: x['id']) ==
sorted(updated_status[k],
key=lambda x: x['id']))
ex = sorted(expected_status[k],
key=lambda x: x['id'])
ox = sorted(updated_status[k],
key=lambda x: x['id'])
val_check.append(all(item in ox for item in ex))
if False in val_check:
# At least one value don't match.
continue
@ -787,7 +788,8 @@ class TestOvnOctaviaBase(base.TestOVNFunctionalBase,
"provisioning_status": o_constants.DELETED}],
'loadbalancers': [{
"id": p.loadbalancer_id,
"provisioning_status": o_constants.ACTIVE}],
"provisioning_status": o_constants.ACTIVE,
'operating_status': o_constants.ONLINE}],
'listeners': []})
self._update_ls_refs(
lb_data, self._local_net_cache[m.subnet_id], add_ref=False)
@ -855,7 +857,8 @@ class TestOvnOctaviaBase(base.TestOVNFunctionalBase,
self._update_ls_refs(lb_data, network_id)
pool_listeners = self._get_pool_listeners(lb_data, pool_id)
expected_listener_status = [
{'id': listener.listener_id, 'provisioning_status': 'ACTIVE'}
{'id': listener.listener_id, 'provisioning_status': 'ACTIVE',
'operating_status': o_constants.ONLINE}
for listener in pool_listeners]
expected_status = {
@ -864,7 +867,8 @@ class TestOvnOctaviaBase(base.TestOVNFunctionalBase,
"provisioning_status": "ACTIVE",
"operating_status": o_constants.NO_MONITOR}],
'loadbalancers': [{'id': pool.loadbalancer_id,
'provisioning_status': 'ACTIVE'}],
'provisioning_status': 'ACTIVE',
'operating_status': o_constants.ONLINE}],
'listeners': expected_listener_status
}
self._wait_for_status_and_validate(lb_data, [expected_status])
@ -892,11 +896,13 @@ class TestOvnOctaviaBase(base.TestOVNFunctionalBase,
self.ovn_driver.member_update(old_member, member)
expected_status = {
'pools': [{'id': pool.pool_id,
'provisioning_status': 'ACTIVE'}],
'provisioning_status': 'ACTIVE',
'operating_status': o_constants.ONLINE}],
'members': [{"id": member.member_id,
'provisioning_status': 'ACTIVE'}],
'loadbalancers': [{'id': pool.loadbalancer_id,
'provisioning_status': 'ACTIVE'}],
'provisioning_status': 'ACTIVE',
'operating_status': o_constants.ONLINE}],
'listeners': []
}
if getattr(member, 'admin_state_up', None):
@ -920,7 +926,8 @@ class TestOvnOctaviaBase(base.TestOVNFunctionalBase,
'provisioning_status': 'ACTIVE',
'operating_status': 'ONLINE'}],
'loadbalancers': [{'id': pool.loadbalancer_id,
'provisioning_status': 'ACTIVE'}],
'provisioning_status': 'ACTIVE',
'operating_status': o_constants.ONLINE}],
'listeners': []})
for m in pool.members:
found = False
@ -968,7 +975,8 @@ class TestOvnOctaviaBase(base.TestOVNFunctionalBase,
'members': [{"id": member.member_id,
"provisioning_status": "DELETED"}],
'loadbalancers': [{"id": pool.loadbalancer_id,
"provisioning_status": "ACTIVE"}],
'provisioning_status': 'ACTIVE',
'operating_status': o_constants.ONLINE}],
'listeners': []}
self._update_ls_refs(lb_data, network_id, add_ref=False)
@ -986,6 +994,10 @@ class TestOvnOctaviaBase(base.TestOVNFunctionalBase,
max_retries, hm_type)
pool.healthmonitor = m_hm
self.ovn_driver._ovn_helper._update_hm_member = mock.MagicMock()
self.ovn_driver._ovn_helper._update_hm_member.side_effect = [
o_constants.ONLINE, o_constants.ONLINE]
self.ovn_driver.health_monitor_create(m_hm)
pool_listeners = self._get_pool_listeners(lb_data, pool_id)
expected_listener_status = [
@ -1005,7 +1017,8 @@ class TestOvnOctaviaBase(base.TestOVNFunctionalBase,
'pools': [pool_status],
'members': expected_member_status,
'loadbalancers': [{'id': pool.loadbalancer_id,
'provisioning_status': o_constants.ACTIVE}],
'provisioning_status': o_constants.ACTIVE,
'operating_status': o_constants.ONLINE}],
'listeners': expected_listener_status,
'healthmonitors': [expected_hm_status]
}

View File

@ -125,7 +125,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: 'foo_hm_port',
ovn_const.LB_EXT_IDS_HMS_KEY: '["%s"]' % (self.ovn_hm.uuid),
'enabled': True,
'pool_%s' % self.pool_id: [],
'pool_%s' % self.pool_id: '',
'listener_%s' % self.listener_id: '80:pool_%s' % self.pool_id,
ovn_const.OVN_MEMBER_STATUS_KEY: '{}'}
self.helper.ovn_nbdb_api.db_find.return_value.\
@ -227,20 +227,16 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
def test__update_hm_member_no_members(self):
pool_key = 'pool_%s' % self.pool_id
self.ovn_lb.external_ids[pool_key] = ''
self.assertTrue(
self.helper._update_hm_member(self.ovn_lb,
pool_key,
'10.0.0.4'))
self.assertEqual(self.helper._update_hm_member(
self.ovn_lb, pool_key, '10.0.0.4'), constants.ERROR)
def test__update_hm_member_backend_ip_not_match(self):
pool_key = 'pool_%s' % self.pool_id
self.ovn_lb.external_ids[pool_key] = self.member_line
with mock.patch.object(ovn_helper.OvnProviderHelper,
'_get_member_lsp'):
self.assertTrue(
self.helper._update_hm_member(self.ovn_lb,
pool_key,
'10.0.0.4'))
self.assertEqual(self.helper._update_hm_member(
self. ovn_lb, pool_key, '10.0.0.4'), constants.ERROR)
@mock.patch.object(ovn_helper.OvnProviderHelper, '_ensure_hm_ovn_port')
def test__update_hm_member_hm_port_multiple_ip(self, ensure_hm_port):
@ -254,10 +250,8 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
pool_key = 'pool_%s' % self.pool_id
with mock.patch.object(ovn_helper.OvnProviderHelper,
'_get_member_lsp'):
self.assertTrue(
self.helper._update_hm_member(self.ovn_lb,
pool_key,
self.member_address))
self.assertEqual(self.helper._update_hm_member(
self.ovn_lb, pool_key, self.member_address), constants.ONLINE)
@mock.patch.object(ovn_helper.OvnProviderHelper, '_ensure_hm_ovn_port')
def test__update_hm_member_hm_port_not_found(self, ensure_hm_port):
@ -265,7 +259,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
pool_key = 'pool_%s' % self.pool_id
with mock.patch.object(ovn_helper.OvnProviderHelper,
'_get_member_lsp'):
self.assertFalse(
self.assertIsNone(
self.helper._update_hm_member(self.ovn_lb,
pool_key,
self.member_address))
@ -1692,28 +1686,11 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
self.helper.ovn_nbdb_api.lb_del.assert_called_once_with(
self.ovn_lb.uuid)
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
def test_member_create(self, net_cli):
net_cli.return_value.show_subnet.side_effect = [idlutils.RowNotFound]
self.ovn_lb.external_ids = mock.MagicMock()
status = self.helper.member_create(self.member)
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
constants.ACTIVE)
self.assertEqual(status['pools'][0]['provisioning_status'],
constants.ACTIVE)
self.assertEqual(status['members'][0]['provisioning_status'],
constants.ACTIVE)
self.assertEqual(status['members'][0]['operating_status'],
constants.NO_MONITOR)
calls = [
mock.call.db_clear('Load_Balancer', self.ovn_lb.uuid, 'vips'),
mock.call.db_set('Load_Balancer', self.ovn_lb.uuid, ('vips', {}))]
self.helper.ovn_nbdb_api.assert_has_calls(calls)
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
def test_member_create_disabled(self, net_cli):
net_cli.return_value.show_subnet.side_effect = [idlutils.RowNotFound]
self.ovn_lb.external_ids = mock.MagicMock()
self._update_external_ids_member_status(self.ovn_lb, self.member['id'],
'offline')
self.member['admin_state_up'] = False
status = self.helper.member_create(self.member)
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
@ -1763,8 +1740,12 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
@mock.patch.object(ovn_helper.OvnProviderHelper, '_add_member')
def test_member_create_exception(self, mock_add_member):
mock_add_member.side_effect = [RuntimeError]
self._update_external_ids_member_status(self.ovn_lb, self.member_id,
'error')
status = self.helper.member_create(self.member)
self.assertEqual(status['pools'][0]['provisioning_status'],
constants.ACTIVE)
self.assertEqual(status['pools'][0]['operating_status'],
constants.ERROR)
def test_member_create_lb_disabled(self):
@ -1799,18 +1780,30 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
self.router)
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
def test_member_create_listener(self, net_cli):
def test_member_create(self, net_cli):
net_cli.return_value.show_subnet.side_effect = [idlutils.RowNotFound]
self.ovn_lb.external_ids = mock.MagicMock()
self.helper._get_pool_listeners.return_value = ['listener1']
status = self.helper.member_create(self.member)
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
constants.ACTIVE)
self.assertEqual(status['loadbalancers'][0]['operating_status'],
constants.ONLINE)
self.assertEqual(status['listeners'][0]['provisioning_status'],
constants.ACTIVE)
self.assertEqual(status['listeners'][0]['id'],
'listener1')
self.assertEqual(status['listeners'][0]['operating_status'],
constants.ONLINE)
self.assertEqual(status['pools'][0]['provisioning_status'],
constants.ACTIVE)
self.assertEqual(status['pools'][0]['operating_status'],
constants.ONLINE)
self.assertEqual(status['members'][0]['id'],
self.member_id)
self.assertEqual(status['members'][0]['provisioning_status'],
constants.ACTIVE)
self.assertEqual(status['members'][0]['operating_status'],
constants.NO_MONITOR)
def test_member_create_already_exists(self):
self.helper.member_create(self.member)
status = self.helper.member_create(self.member)
member_status = {
ovn_const.OVN_MEMBER_STATUS_KEY: '{"%s": "%s"}'
% (self.member_id, constants.NO_MONITOR)}
@ -1818,6 +1811,14 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
'Load_Balancer',
self.ovn_lb.uuid,
('external_ids', member_status))
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
constants.ACTIVE)
self.assertEqual(status['pools'][0]['provisioning_status'],
constants.ACTIVE)
self.assertEqual(status['members'][0]['provisioning_status'],
constants.ACTIVE)
self.assertEqual(status['members'][0]['operating_status'],
constants.NO_MONITOR)
def test_member_create_first_member_in_pool(self):
self.ovn_lb.external_ids.update({
@ -1866,7 +1867,6 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
expected_calls)
def test_member_update(self):
self.ovn_lb.external_ids = mock.MagicMock()
status = self.helper.member_update(self.member)
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
constants.ACTIVE)
@ -1877,6 +1877,8 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
self.assertEqual(status['members'][0]['operating_status'],
constants.NO_MONITOR)
self.member['admin_state_up'] = False
self._update_external_ids_member_status(self.ovn_lb, self.member_id,
'offline')
status = self.helper.member_update(self.member)
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
constants.ACTIVE)
@ -1896,6 +1898,8 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
constants.ACTIVE)
self.member['old_admin_state_up'] = False
self.member['admin_state_up'] = True
self._update_external_ids_member_status(self.ovn_lb, self.member_id,
'online')
status = self.helper.member_update(self.member)
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
constants.ACTIVE)
@ -1930,15 +1934,6 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
[mock.call('pool_%s' % self.pool_id),
mock.call('pool_%s%s' % (self.pool_id, ':D'))])
def test_member_update_pool_listeners(self):
self.ovn_lb.external_ids = mock.MagicMock()
self.helper._get_pool_listeners.return_value = ['listener1']
status = self.helper.member_update(self.member)
self.assertEqual(status['listeners'][0]['provisioning_status'],
constants.ACTIVE)
self.assertEqual(status['listeners'][0]['id'],
'listener1')
@mock.patch.object(ovn_helper.OvnProviderHelper, '_find_member_status')
def test_member_update_exception(self, mock_find_member_status):
mock_find_member_status.side_effect = [TypeError]
@ -1957,7 +1952,11 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
self.assertEqual(status['members'][0]['provisioning_status'],
constants.DELETED)
def test_member_delete_one_left(self):
@mock.patch.object(ovn_helper.OvnProviderHelper, '_remove_member')
@mock.patch.object(ovn_helper.OvnProviderHelper,
'_update_external_ids_member_status')
def test_member_delete_one_left(self, update_external_ids_members,
rmmember):
member2_id = uuidutils.generate_uuid()
member2_port = '1010'
member2_address = '192.168.2.150'
@ -1970,6 +1969,10 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
self.ovn_lb.external_ids.update({
'pool_' + self.pool_id: member_line})
status = self.helper.member_delete(self.member)
rmmember.assert_called_once_with(
self.member, self.ovn_lb, 'pool_' + self.pool_id)
update_external_ids_members.assert_called_once_with(
self.ovn_lb, self.member_id, None, delete=True)
self.assertEqual(status['members'][0]['provisioning_status'],
constants.DELETED)
self.assertEqual(status['pools'][0]['provisioning_status'],
@ -1989,13 +1992,18 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
self.member_address,
delete=True)
def test_member_delete_none(self):
def test_member_delete_not_found_in_pool(self):
self.ovn_lb.external_ids.update({'pool_' + self.pool_id: ''})
self.ovn_lb.external_ids[ovn_const.OVN_MEMBER_STATUS_KEY] = '{}'
status = self.helper.member_delete(self.member)
self.assertEqual(status['members'][0]['provisioning_status'],
constants.ERROR)
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
constants.ACTIVE)
self.assertEqual(status['listeners'][0]['provisioning_status'],
constants.ACTIVE)
self.assertEqual(status['pools'][0]['provisioning_status'],
constants.ACTIVE)
self.assertEqual(status['members'][0]['provisioning_status'],
constants.ERROR)
@mock.patch.object(ovn_helper.OvnProviderHelper, '_remove_member')
def test_member_delete_exception(self, mock_remove_member):
@ -2012,20 +2020,6 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
[mock.call('pool_%s' % self.pool_id),
mock.call('pool_%s%s' % (self.pool_id, ':D'))])
def test_member_delete_pool_listeners(self):
member_line = (
'member_%s_%s:%s_%s' %
(self.member_id, self.member_address, self.member_port,
self.member_subnet_id))
self.ovn_lb.external_ids.update({
'pool_' + self.pool_id: member_line})
self.helper._get_pool_listeners.return_value = ['listener1']
status = self.helper.member_delete(self.member)
self.assertEqual(status['listeners'][0]['provisioning_status'],
constants.ACTIVE)
self.assertEqual(status['listeners'][0]['id'],
'listener1')
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
def test_logical_router_port_event_create(self, net_cli):
self.router_port_event = ovn_event.LogicalRouterPortEvent(
@ -3765,10 +3759,12 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
pool_key = 'pool_%s' % self.pool_id
self.ovn_hm_lb.protocol = [protocol]
folbpi.return_value = (pool_key, self.ovn_hm_lb)
uhm.return_value = True
uhm.return_value = constants.ONLINE
net_cli.return_value.show_subnet.return_value = {'subnet': fake_subnet}
if not fip:
del self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_FIP_KEY]
self._update_external_ids_member_status(self.ovn_hm_lb, self.member_id,
'online')
status = self.helper.hm_create(self.health_monitor)
self.assertEqual(status['healthmonitors'][0]['provisioning_status'],
constants.ACTIVE)
@ -3875,6 +3871,8 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
pool_key = 'pool_%s' % self.pool_id
folbpi.return_value = (pool_key, self.ovn_hm_lb)
self.health_monitor['admin_state_up'] = False
self._update_external_ids_member_status(self.ovn_hm_lb, self.member_id,
'online')
status = self.helper.hm_create(self.health_monitor)
self.assertEqual(status['healthmonitors'][0]['provisioning_status'],
constants.ACTIVE)
@ -4419,7 +4417,7 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
self._test_hm_update_no_member(False, True)
def _test_hm_update_status(self, ovn_lbs, member_id, ip, port,
member_status):
mb_status):
info = {
'ovn_lbs': ovn_lbs,
'ip': ip,
@ -4427,15 +4425,18 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
'src_ip': '10.22.33.4',
'port': port,
'protocol': ovn_lbs[0].protocol,
'status': [member_status]}
'status': [mb_status]}
mb_status_ovn = 'error' if mb_status == 'offline' else mb_status
self._update_external_ids_member_status(self.ovn_hm_lb, member_id,
member_status)
mb_status_ovn)
status = self.helper.hm_update_event(info)
return status
def _update_external_ids_member_status(self, lb, member_id, member_status):
status = constants.ONLINE
if member_status == 'offline':
status = constants.OFFLINE
elif member_status == 'error':
status = constants.ERROR
try:
existing_member_status = lb.external_ids[
@ -4684,9 +4685,9 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
'protocol': self.ovn_hm_lb.protocol,
'status': ovn_const.HM_EVENT_MEMBER_PORT_OFFLINE}
self._update_external_ids_member_status(self.ovn_hm_lb, member['id'],
'offline')
'error')
self._update_external_ids_member_status(ovn_hm_lb_2, member_2['id'],
'offline')
'error')
status = self.helper.hm_update_event(info)
self.assertEqual(status['pools'][0]['provisioning_status'],
@ -4794,10 +4795,10 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
member_lb2 = self._add_member(ovn_hm_lb2, fake_subnet, 8081,
pool_id=pool_id_2, ip=ip_member)
# member lb2 OFFLINE, so lb2 operating_status should be ERROR
# member lb2 ERROR, so lb2 operating_status should be ERROR
# for Pool and Loadbalancer, but lb1 should keep ONLINE
self._update_external_ids_member_status(ovn_hm_lb2, member_lb2['id'],
'offline')
'error')
info = {
'ovn_lbs': [self.ovn_hm_lb, ovn_hm_lb2],