Prevent UDP LBs to use different IP protocol versions in amphora driver
The amphora doesn't support mixing IPv4 and IPv6 addresses for its members and its VIP when using UDP load balancers. This commit adds a validation step in the member_create and th e member_batch_update functions of the amphora driver to ensure that IP protocol versions are the same in a UDP load balancer. Story: 2005876 Task: 33689 Change-Id: If6fb3fde9b43ac82af46eaddc48ec7a3a5b95602
This commit is contained in:
parent
de30dbb62f
commit
56bb1e134d
@ -163,6 +163,11 @@ class AmphoraProviderDriver(driver_base.ProviderDriver):
|
||||
|
||||
# Member
|
||||
def member_create(self, member):
|
||||
pool_id = member.pool_id
|
||||
db_pool = self.repositories.pool.get(db_apis.get_session(),
|
||||
id=pool_id)
|
||||
self._validate_members(db_pool, [member])
|
||||
|
||||
payload = {consts.MEMBER_ID: member.member_id}
|
||||
self.client.cast({}, 'create_member', **payload)
|
||||
|
||||
@ -186,6 +191,9 @@ class AmphoraProviderDriver(driver_base.ProviderDriver):
|
||||
pool_id = members[0].pool_id
|
||||
# The DB should not have updated yet, so we can still use the pool
|
||||
db_pool = self.repositories.pool.get(db_apis.get_session(), id=pool_id)
|
||||
|
||||
self._validate_members(db_pool, members)
|
||||
|
||||
old_members = db_pool.members
|
||||
|
||||
old_member_ids = [m.id for m in old_members]
|
||||
@ -218,6 +226,24 @@ class AmphoraProviderDriver(driver_base.ProviderDriver):
|
||||
'updated_members': updated_members}
|
||||
self.client.cast({}, 'batch_update_members', **payload)
|
||||
|
||||
def _validate_members(self, db_pool, members):
|
||||
if db_pool.protocol == consts.PROTOCOL_UDP:
|
||||
# For UDP LBs, check that we are not mixing IPv4 and IPv6
|
||||
for member in members:
|
||||
member_is_ipv6 = utils.is_ipv6(member.address)
|
||||
|
||||
for listener in db_pool.listeners:
|
||||
lb = listener.load_balancer
|
||||
vip_is_ipv6 = utils.is_ipv6(lb.vip.ip_address)
|
||||
|
||||
if member_is_ipv6 != vip_is_ipv6:
|
||||
msg = ("This provider doesn't support mixing IPv4 and "
|
||||
"IPv6 addresses for its VIP and members in UDP "
|
||||
"load balancers.")
|
||||
raise exceptions.UnsupportedOptionError(
|
||||
user_fault_string=msg,
|
||||
operator_fault_string=msg)
|
||||
|
||||
# Health Monitor
|
||||
def health_monitor_create(self, healthmonitor):
|
||||
payload = {consts.HEALTH_MONITOR_ID: healthmonitor.healthmonitor_id}
|
||||
|
@ -162,6 +162,11 @@ class AmphoraProviderDriver(driver_base.ProviderDriver):
|
||||
|
||||
# Member
|
||||
def member_create(self, member):
|
||||
pool_id = member.pool_id
|
||||
db_pool = self.repositories.pool.get(db_apis.get_session(),
|
||||
id=pool_id)
|
||||
self._validate_members(db_pool, [member])
|
||||
|
||||
payload = {consts.MEMBER_ID: member.member_id}
|
||||
self.client.cast({}, 'create_member', **payload)
|
||||
|
||||
@ -185,6 +190,9 @@ class AmphoraProviderDriver(driver_base.ProviderDriver):
|
||||
pool_id = members[0].pool_id
|
||||
# The DB should not have updated yet, so we can still use the pool
|
||||
db_pool = self.repositories.pool.get(db_apis.get_session(), id=pool_id)
|
||||
|
||||
self._validate_members(db_pool, members)
|
||||
|
||||
old_members = db_pool.members
|
||||
|
||||
old_member_ids = [m.id for m in old_members]
|
||||
@ -217,6 +225,24 @@ class AmphoraProviderDriver(driver_base.ProviderDriver):
|
||||
'updated_members': updated_members}
|
||||
self.client.cast({}, 'batch_update_members', **payload)
|
||||
|
||||
def _validate_members(self, db_pool, members):
|
||||
if db_pool.protocol == consts.PROTOCOL_UDP:
|
||||
# For UDP LBs, check that we are not mixing IPv4 and IPv6
|
||||
for member in members:
|
||||
member_is_ipv6 = utils.is_ipv6(member.address)
|
||||
|
||||
for listener in db_pool.listeners:
|
||||
lb = listener.load_balancer
|
||||
vip_is_ipv6 = utils.is_ipv6(lb.vip.ip_address)
|
||||
|
||||
if member_is_ipv6 != vip_is_ipv6:
|
||||
msg = ("This provider doesn't support mixing IPv4 and "
|
||||
"IPv6 addresses for its VIP and members in UDP "
|
||||
"load balancers.")
|
||||
raise exceptions.UnsupportedOptionError(
|
||||
user_fault_string=msg,
|
||||
operator_fault_string=msg)
|
||||
|
||||
# Health Monitor
|
||||
def health_monitor_create(self, healthmonitor):
|
||||
payload = {consts.HEALTH_MONITOR_ID: healthmonitor.healthmonitor_id}
|
||||
|
@ -206,14 +206,60 @@ class TestAmphoraDriver(base.TestRpc):
|
||||
mock_cast.assert_called_with({}, 'update_pool', **payload)
|
||||
|
||||
# Member
|
||||
@mock.patch('octavia.db.api.get_session')
|
||||
@mock.patch('octavia.db.repositories.PoolRepository.get')
|
||||
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||
def test_member_create(self, mock_cast):
|
||||
def test_member_create(self, mock_cast, mock_pool_get, mock_session):
|
||||
provider_member = driver_dm.Member(
|
||||
member_id=self.sample_data.member1_id)
|
||||
self.amp_driver.member_create(provider_member)
|
||||
payload = {consts.MEMBER_ID: self.sample_data.member1_id}
|
||||
mock_cast.assert_called_with({}, 'create_member', **payload)
|
||||
|
||||
@mock.patch('octavia.db.api.get_session')
|
||||
@mock.patch('octavia.db.repositories.PoolRepository.get')
|
||||
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||
def test_member_create_udp_ipv4(self, mock_cast, mock_pool_get,
|
||||
mock_session):
|
||||
mock_lb = mock.MagicMock()
|
||||
mock_lb.vip = mock.MagicMock()
|
||||
mock_lb.vip.ip_address = "192.0.1.1"
|
||||
mock_listener = mock.MagicMock()
|
||||
mock_listener.load_balancer = mock_lb
|
||||
mock_pool = mock.MagicMock()
|
||||
mock_pool.protocol = consts.PROTOCOL_UDP
|
||||
mock_pool.listeners = [mock_listener]
|
||||
mock_pool_get.return_value = mock_pool
|
||||
|
||||
provider_member = driver_dm.Member(
|
||||
member_id=self.sample_data.member1_id,
|
||||
address="192.0.2.1")
|
||||
self.amp_driver.member_create(provider_member)
|
||||
payload = {consts.MEMBER_ID: self.sample_data.member1_id}
|
||||
mock_cast.assert_called_with({}, 'create_member', **payload)
|
||||
|
||||
@mock.patch('octavia.db.api.get_session')
|
||||
@mock.patch('octavia.db.repositories.PoolRepository.get')
|
||||
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||
def test_member_create_udp_ipv4_ipv6(self, mock_cast, mock_pool_get,
|
||||
mock_session):
|
||||
mock_lb = mock.MagicMock()
|
||||
mock_lb.vip = mock.MagicMock()
|
||||
mock_lb.vip.ip_address = "fe80::1"
|
||||
mock_listener = mock.MagicMock()
|
||||
mock_listener.load_balancer = mock_lb
|
||||
mock_pool = mock.MagicMock()
|
||||
mock_pool.protocol = consts.PROTOCOL_UDP
|
||||
mock_pool.listeners = [mock_listener]
|
||||
mock_pool_get.return_value = mock_pool
|
||||
|
||||
provider_member = driver_dm.Member(
|
||||
member_id=self.sample_data.member1_id,
|
||||
address="192.0.2.1")
|
||||
self.assertRaises(exceptions.UnsupportedOptionError,
|
||||
self.amp_driver.member_create,
|
||||
provider_member)
|
||||
|
||||
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||
def test_member_delete(self, mock_cast):
|
||||
provider_member = driver_dm.Member(
|
||||
@ -332,6 +378,83 @@ class TestAmphoraDriver(base.TestRpc):
|
||||
payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id}
|
||||
mock_cast.assert_called_with({}, 'delete_health_monitor', **payload)
|
||||
|
||||
@mock.patch('octavia.db.api.get_session')
|
||||
@mock.patch('octavia.db.repositories.PoolRepository.get')
|
||||
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||
def test_member_batch_update_udp_ipv4(self, mock_cast, mock_pool_get,
|
||||
mock_session):
|
||||
|
||||
mock_lb = mock.MagicMock()
|
||||
mock_lb.vip = mock.MagicMock()
|
||||
mock_lb.vip.ip_address = "192.0.1.1"
|
||||
mock_listener = mock.MagicMock()
|
||||
mock_listener.load_balancer = mock_lb
|
||||
mock_pool = mock.MagicMock()
|
||||
mock_pool.protocol = consts.PROTOCOL_UDP
|
||||
mock_pool.listeners = [mock_listener]
|
||||
mock_pool.members = self.sample_data.db_pool1_members
|
||||
mock_pool_get.return_value = mock_pool
|
||||
|
||||
prov_mem_update = driver_dm.Member(
|
||||
member_id=self.sample_data.member2_id,
|
||||
pool_id=self.sample_data.pool1_id, admin_state_up=False,
|
||||
address='192.0.2.17', monitor_address='192.0.2.77',
|
||||
protocol_port=80, name='updated-member2')
|
||||
prov_new_member = driver_dm.Member(
|
||||
member_id=self.sample_data.member3_id,
|
||||
pool_id=self.sample_data.pool1_id,
|
||||
address='192.0.2.18', monitor_address='192.0.2.28',
|
||||
protocol_port=80, name='member3')
|
||||
prov_members = [prov_mem_update, prov_new_member]
|
||||
|
||||
update_mem_dict = {'ip_address': '192.0.2.17',
|
||||
'name': 'updated-member2',
|
||||
'monitor_address': '192.0.2.77',
|
||||
'id': self.sample_data.member2_id,
|
||||
'enabled': False,
|
||||
'protocol_port': 80,
|
||||
'pool_id': self.sample_data.pool1_id}
|
||||
|
||||
self.amp_driver.member_batch_update(prov_members)
|
||||
|
||||
payload = {'old_member_ids': [self.sample_data.member1_id],
|
||||
'new_member_ids': [self.sample_data.member3_id],
|
||||
'updated_members': [update_mem_dict]}
|
||||
mock_cast.assert_called_with({}, 'batch_update_members', **payload)
|
||||
|
||||
@mock.patch('octavia.db.api.get_session')
|
||||
@mock.patch('octavia.db.repositories.PoolRepository.get')
|
||||
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||
def test_member_batch_update_udp_ipv4_ipv6(self, mock_cast, mock_pool_get,
|
||||
mock_session):
|
||||
|
||||
mock_lb = mock.MagicMock()
|
||||
mock_lb.vip = mock.MagicMock()
|
||||
mock_lb.vip.ip_address = "192.0.1.1"
|
||||
mock_listener = mock.MagicMock()
|
||||
mock_listener.load_balancer = mock_lb
|
||||
mock_pool = mock.MagicMock()
|
||||
mock_pool.protocol = consts.PROTOCOL_UDP
|
||||
mock_pool.listeners = [mock_listener]
|
||||
mock_pool.members = self.sample_data.db_pool1_members
|
||||
mock_pool_get.return_value = mock_pool
|
||||
|
||||
prov_mem_update = driver_dm.Member(
|
||||
member_id=self.sample_data.member2_id,
|
||||
pool_id=self.sample_data.pool1_id, admin_state_up=False,
|
||||
address='fe80::1', monitor_address='fe80::2',
|
||||
protocol_port=80, name='updated-member2')
|
||||
prov_new_member = driver_dm.Member(
|
||||
member_id=self.sample_data.member3_id,
|
||||
pool_id=self.sample_data.pool1_id,
|
||||
address='192.0.2.18', monitor_address='192.0.2.28',
|
||||
protocol_port=80, name='member3')
|
||||
prov_members = [prov_mem_update, prov_new_member]
|
||||
|
||||
self.assertRaises(exceptions.UnsupportedOptionError,
|
||||
self.amp_driver.member_batch_update,
|
||||
prov_members)
|
||||
|
||||
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||
def test_health_monitor_update(self, mock_cast):
|
||||
old_provider_hm = driver_dm.HealthMonitor(
|
||||
|
@ -206,14 +206,60 @@ class TestAmphoraDriver(base.TestRpc):
|
||||
mock_cast.assert_called_with({}, 'update_pool', **payload)
|
||||
|
||||
# Member
|
||||
@mock.patch('octavia.db.api.get_session')
|
||||
@mock.patch('octavia.db.repositories.PoolRepository.get')
|
||||
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||
def test_member_create(self, mock_cast):
|
||||
def test_member_create(self, mock_cast, mock_pool_get, mock_session):
|
||||
provider_member = driver_dm.Member(
|
||||
member_id=self.sample_data.member1_id)
|
||||
self.amp_driver.member_create(provider_member)
|
||||
payload = {consts.MEMBER_ID: self.sample_data.member1_id}
|
||||
mock_cast.assert_called_with({}, 'create_member', **payload)
|
||||
|
||||
@mock.patch('octavia.db.api.get_session')
|
||||
@mock.patch('octavia.db.repositories.PoolRepository.get')
|
||||
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||
def test_member_create_udp_ipv4(self, mock_cast, mock_pool_get,
|
||||
mock_session):
|
||||
mock_lb = mock.MagicMock()
|
||||
mock_lb.vip = mock.MagicMock()
|
||||
mock_lb.vip.ip_address = "192.0.1.1"
|
||||
mock_listener = mock.MagicMock()
|
||||
mock_listener.load_balancer = mock_lb
|
||||
mock_pool = mock.MagicMock()
|
||||
mock_pool.protocol = consts.PROTOCOL_UDP
|
||||
mock_pool.listeners = [mock_listener]
|
||||
mock_pool_get.return_value = mock_pool
|
||||
|
||||
provider_member = driver_dm.Member(
|
||||
member_id=self.sample_data.member1_id,
|
||||
address="192.0.2.1")
|
||||
self.amp_driver.member_create(provider_member)
|
||||
payload = {consts.MEMBER_ID: self.sample_data.member1_id}
|
||||
mock_cast.assert_called_with({}, 'create_member', **payload)
|
||||
|
||||
@mock.patch('octavia.db.api.get_session')
|
||||
@mock.patch('octavia.db.repositories.PoolRepository.get')
|
||||
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||
def test_member_create_udp_ipv4_ipv6(self, mock_cast, mock_pool_get,
|
||||
mock_session):
|
||||
mock_lb = mock.MagicMock()
|
||||
mock_lb.vip = mock.MagicMock()
|
||||
mock_lb.vip.ip_address = "fe80::1"
|
||||
mock_listener = mock.MagicMock()
|
||||
mock_listener.load_balancer = mock_lb
|
||||
mock_pool = mock.MagicMock()
|
||||
mock_pool.protocol = consts.PROTOCOL_UDP
|
||||
mock_pool.listeners = [mock_listener]
|
||||
mock_pool_get.return_value = mock_pool
|
||||
|
||||
provider_member = driver_dm.Member(
|
||||
member_id=self.sample_data.member1_id,
|
||||
address="192.0.2.1")
|
||||
self.assertRaises(exceptions.UnsupportedOptionError,
|
||||
self.amp_driver.member_create,
|
||||
provider_member)
|
||||
|
||||
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||
def test_member_delete(self, mock_cast):
|
||||
provider_member = driver_dm.Member(
|
||||
@ -332,6 +378,83 @@ class TestAmphoraDriver(base.TestRpc):
|
||||
payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id}
|
||||
mock_cast.assert_called_with({}, 'delete_health_monitor', **payload)
|
||||
|
||||
@mock.patch('octavia.db.api.get_session')
|
||||
@mock.patch('octavia.db.repositories.PoolRepository.get')
|
||||
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||
def test_member_batch_update_udp_ipv4(self, mock_cast, mock_pool_get,
|
||||
mock_session):
|
||||
|
||||
mock_lb = mock.MagicMock()
|
||||
mock_lb.vip = mock.MagicMock()
|
||||
mock_lb.vip.ip_address = "192.0.1.1"
|
||||
mock_listener = mock.MagicMock()
|
||||
mock_listener.load_balancer = mock_lb
|
||||
mock_pool = mock.MagicMock()
|
||||
mock_pool.protocol = consts.PROTOCOL_UDP
|
||||
mock_pool.listeners = [mock_listener]
|
||||
mock_pool.members = self.sample_data.db_pool1_members
|
||||
mock_pool_get.return_value = mock_pool
|
||||
|
||||
prov_mem_update = driver_dm.Member(
|
||||
member_id=self.sample_data.member2_id,
|
||||
pool_id=self.sample_data.pool1_id, admin_state_up=False,
|
||||
address='192.0.2.17', monitor_address='192.0.2.77',
|
||||
protocol_port=80, name='updated-member2')
|
||||
prov_new_member = driver_dm.Member(
|
||||
member_id=self.sample_data.member3_id,
|
||||
pool_id=self.sample_data.pool1_id,
|
||||
address='192.0.2.18', monitor_address='192.0.2.28',
|
||||
protocol_port=80, name='member3')
|
||||
prov_members = [prov_mem_update, prov_new_member]
|
||||
|
||||
update_mem_dict = {'ip_address': '192.0.2.17',
|
||||
'name': 'updated-member2',
|
||||
'monitor_address': '192.0.2.77',
|
||||
'id': self.sample_data.member2_id,
|
||||
'enabled': False,
|
||||
'protocol_port': 80,
|
||||
'pool_id': self.sample_data.pool1_id}
|
||||
|
||||
self.amp_driver.member_batch_update(prov_members)
|
||||
|
||||
payload = {'old_member_ids': [self.sample_data.member1_id],
|
||||
'new_member_ids': [self.sample_data.member3_id],
|
||||
'updated_members': [update_mem_dict]}
|
||||
mock_cast.assert_called_with({}, 'batch_update_members', **payload)
|
||||
|
||||
@mock.patch('octavia.db.api.get_session')
|
||||
@mock.patch('octavia.db.repositories.PoolRepository.get')
|
||||
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||
def test_member_batch_update_udp_ipv4_ipv6(self, mock_cast, mock_pool_get,
|
||||
mock_session):
|
||||
|
||||
mock_lb = mock.MagicMock()
|
||||
mock_lb.vip = mock.MagicMock()
|
||||
mock_lb.vip.ip_address = "192.0.1.1"
|
||||
mock_listener = mock.MagicMock()
|
||||
mock_listener.load_balancer = mock_lb
|
||||
mock_pool = mock.MagicMock()
|
||||
mock_pool.protocol = consts.PROTOCOL_UDP
|
||||
mock_pool.listeners = [mock_listener]
|
||||
mock_pool.members = self.sample_data.db_pool1_members
|
||||
mock_pool_get.return_value = mock_pool
|
||||
|
||||
prov_mem_update = driver_dm.Member(
|
||||
member_id=self.sample_data.member2_id,
|
||||
pool_id=self.sample_data.pool1_id, admin_state_up=False,
|
||||
address='fe80::1', monitor_address='fe80::2',
|
||||
protocol_port=80, name='updated-member2')
|
||||
prov_new_member = driver_dm.Member(
|
||||
member_id=self.sample_data.member3_id,
|
||||
pool_id=self.sample_data.pool1_id,
|
||||
address='192.0.2.18', monitor_address='192.0.2.28',
|
||||
protocol_port=80, name='member3')
|
||||
prov_members = [prov_mem_update, prov_new_member]
|
||||
|
||||
self.assertRaises(exceptions.UnsupportedOptionError,
|
||||
self.amp_driver.member_batch_update,
|
||||
prov_members)
|
||||
|
||||
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||
def test_health_monitor_update(self, mock_cast):
|
||||
old_provider_hm = driver_dm.HealthMonitor(
|
||||
|
@ -0,0 +1,7 @@
|
||||
---
|
||||
fixes:
|
||||
- |
|
||||
Adding a member with different IP protocol version than the VIP IP protocol
|
||||
version in a UDP load balancer caused a crash in the amphora. A validation
|
||||
step in the amphora driver now prevents mixing IP protocol versions in UDP
|
||||
load balancers.
|
Loading…
Reference in New Issue
Block a user