Assign peer_port on listener creation
The introduction of shared_pools broke one of the flows for assigning the listener peer_port. This went unnoticed for a little while since this is presently only used in active-standby topologies, and we don't have any scenario tests right now which exercise active-standby regularly. In looking to fix this flow, I realized that there's no reason we can't assign the listener peer_port when the listener object is created in the database. By doing this, we eliminate the need for a couple controller worker database tasks and simplify the listener creation flow. This patch, therefore, updates the repository code to assign the peer_port on listener creation, and eliminates the now redundant controller worker database tasks and simplifies the create listener flow. Change-Id: I0c15dfa154c7cd57f1626945bb76c0ac0b9de071 Closes-Bug: 1547233
This commit is contained in:
parent
24ef5c9d96
commit
1b992d1e12
octavia
controller/worker
db
tests
functional/db
unit/controller/worker
@ -188,10 +188,8 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
|
||||
|
||||
create_listener_tf = self._taskflow_load(self._listener_flows.
|
||||
get_create_listener_flow(),
|
||||
store={constants.LISTENER:
|
||||
listener,
|
||||
constants.LOADBALANCER:
|
||||
load_balancer,
|
||||
store={constants.LOADBALANCER:
|
||||
load_balancer,
|
||||
constants.LISTENERS:
|
||||
[listener]})
|
||||
with tf_logging.DynamicLoggingListener(create_listener_tf,
|
||||
|
@ -30,12 +30,6 @@ class ListenerFlows(object):
|
||||
:returns: The flow for creating a listener
|
||||
"""
|
||||
create_listener_flow = linear_flow.Flow(constants.CREATE_LISTENER_FLOW)
|
||||
create_listener_flow.add(database_tasks.AllocateListenerPeerPort(
|
||||
requires='listener',
|
||||
provides='listener'))
|
||||
create_listener_flow.add(database_tasks.ReloadListener(
|
||||
requires='listener',
|
||||
provides='listener'))
|
||||
create_listener_flow.add(amphora_driver_tasks.ListenersUpdate(
|
||||
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||
create_listener_flow.add(network_tasks.UpdateVIP(
|
||||
|
@ -268,14 +268,6 @@ class ReloadLoadBalancer(BaseDatabaseTask):
|
||||
id=loadbalancer_id)
|
||||
|
||||
|
||||
class ReloadListener(BaseDatabaseTask):
|
||||
"""Reload listener data from database."""
|
||||
|
||||
def execute(self, listener):
|
||||
"""Reloads listener in DB."""
|
||||
return self.listener_repo.get(db_apis.get_session(), id=listener.id)
|
||||
|
||||
|
||||
class UpdateVIPAfterAllocation(BaseDatabaseTask):
|
||||
|
||||
def execute(self, loadbalancer_id, vip):
|
||||
@ -1011,39 +1003,6 @@ class CreateVRRPGroupForLB(BaseDatabaseTask):
|
||||
return loadbalancer
|
||||
|
||||
|
||||
class AllocateListenerPeerPort(BaseDatabaseTask):
|
||||
"""Get a new peer port number for a listener."""
|
||||
|
||||
def execute(self, listener):
|
||||
"""Allocate a peer port (TCP port)
|
||||
|
||||
:param listener: The listener to be marked deleted
|
||||
:returns: None
|
||||
"""
|
||||
max_peer_port = 0
|
||||
for listener in listener.load_balancer.listeners:
|
||||
if listener.peer_port > max_peer_port:
|
||||
max_peer_port = listener.peer_port
|
||||
|
||||
if max_peer_port == 0:
|
||||
port = constants.HAPROXY_BASE_PEER_PORT
|
||||
else:
|
||||
port = max_peer_port + 1
|
||||
|
||||
self.listener_repo.update(db_apis.get_session(), listener.id,
|
||||
peer_port=port)
|
||||
|
||||
return self.listener_repo.get(db_apis.get_session(), id=listener.id)
|
||||
|
||||
def revert(self, listener, *args, **kwargs):
|
||||
"""nulls the peer port
|
||||
|
||||
:returns: None
|
||||
"""
|
||||
self.listener_repo.update(db_apis.get_session(), listener.id,
|
||||
peer_port=None)
|
||||
|
||||
|
||||
class DisableAmphoraHealthMonitoring(BaseDatabaseTask):
|
||||
"""Disable amphora health monitoring.
|
||||
|
||||
|
@ -321,6 +321,20 @@ class MemberRepository(BaseRepository):
|
||||
class ListenerRepository(BaseRepository):
|
||||
model_class = models.Listener
|
||||
|
||||
def _find_next_peer_port(self, session, lb_id):
|
||||
"""Finds the next available peer port on the load balancer."""
|
||||
max_peer_port = 0
|
||||
load_balancer = session.query(models.LoadBalancer).filter_by(
|
||||
id=lb_id).first()
|
||||
for listener in load_balancer.listeners:
|
||||
if (listener.peer_port is not None and
|
||||
listener.peer_port > max_peer_port):
|
||||
max_peer_port = listener.peer_port
|
||||
if max_peer_port == 0:
|
||||
return constants.HAPROXY_BASE_PEER_PORT
|
||||
else:
|
||||
return max_peer_port + 1
|
||||
|
||||
def _pool_check(self, session, pool_id, listener_id=None,
|
||||
lb_id=None):
|
||||
"""Sanity checks for default_pool_id if specified."""
|
||||
@ -368,6 +382,9 @@ class ListenerRepository(BaseRepository):
|
||||
if default_pool_id:
|
||||
self._pool_check(session, default_pool_id,
|
||||
lb_id=model_kwargs.get('load_balancer_id'))
|
||||
if model_kwargs.get('peer_port') is None:
|
||||
model_kwargs.update(peer_port=self._find_next_peer_port(
|
||||
session, model_kwargs.get('load_balancer_id')))
|
||||
model = self.model_class(**model_kwargs)
|
||||
session.add(model)
|
||||
return model.to_data_model()
|
||||
|
@ -552,9 +552,18 @@ class ListenerRepositoryTest(BaseRepositoryTest):
|
||||
protocol=constants.PROTOCOL_HTTP, protocol_port=port,
|
||||
connection_limit=1, load_balancer_id=self.load_balancer.id,
|
||||
default_pool_id=default_pool_id, operating_status=constants.ONLINE,
|
||||
provisioning_status=constants.ACTIVE, enabled=True)
|
||||
provisioning_status=constants.ACTIVE, enabled=True, peer_port=1025)
|
||||
return listener
|
||||
|
||||
def create_loadbalancer(self, lb_id):
|
||||
lb = self.lb_repo.create(self.session, id=lb_id,
|
||||
project_id=self.FAKE_UUID_2, name="lb_name",
|
||||
description="lb_description",
|
||||
provisioning_status=constants.ACTIVE,
|
||||
operating_status=constants.ONLINE,
|
||||
enabled=True)
|
||||
return lb
|
||||
|
||||
def test_get(self):
|
||||
listener = self.create_listener(self.FAKE_UUID_1, 80)
|
||||
new_listener = self.listener_repo.get(self.session, id=listener.id)
|
||||
@ -584,8 +593,38 @@ class ListenerRepositoryTest(BaseRepositoryTest):
|
||||
self.assertEqual(self.load_balancer.id, new_listener.load_balancer_id)
|
||||
self.assertEqual(constants.ACTIVE, new_listener.provisioning_status)
|
||||
self.assertEqual(constants.ONLINE, new_listener.operating_status)
|
||||
self.assertEqual(1025, new_listener.peer_port)
|
||||
self.assertTrue(new_listener.enabled)
|
||||
|
||||
def test_create_no_peer_port(self):
|
||||
lb = self.create_loadbalancer(uuidutils.generate_uuid())
|
||||
listener = self.listener_repo.create(
|
||||
self.session, id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2,
|
||||
load_balancer_id=lb.id, protocol=constants.PROTOCOL_HTTP,
|
||||
protocol_port=80, provisioning_status=constants.ACTIVE,
|
||||
operating_status=constants.ONLINE, enabled=True)
|
||||
new_listener = self.listener_repo.get(self.session, id=listener.id)
|
||||
self.assertEqual(1025, new_listener.peer_port)
|
||||
|
||||
def test_create_no_peer_port_increments(self):
|
||||
lb = self.create_loadbalancer(uuidutils.generate_uuid())
|
||||
listener_a = self.listener_repo.create(
|
||||
self.session, id=uuidutils.generate_uuid(),
|
||||
project_id=self.FAKE_UUID_2,
|
||||
load_balancer_id=lb.id, protocol=constants.PROTOCOL_HTTP,
|
||||
protocol_port=80, provisioning_status=constants.ACTIVE,
|
||||
operating_status=constants.ONLINE, enabled=True)
|
||||
listener_b = self.listener_repo.create(
|
||||
self.session, id=uuidutils.generate_uuid(),
|
||||
project_id=self.FAKE_UUID_2,
|
||||
load_balancer_id=lb.id, protocol=constants.PROTOCOL_HTTP,
|
||||
protocol_port=81, provisioning_status=constants.ACTIVE,
|
||||
operating_status=constants.ONLINE, enabled=True)
|
||||
new_listener_a = self.listener_repo.get(self.session, id=listener_a.id)
|
||||
new_listener_b = self.listener_repo.get(self.session, id=listener_b.id)
|
||||
self.assertEqual(1025, new_listener_a.peer_port)
|
||||
self.assertEqual(1026, new_listener_b.peer_port)
|
||||
|
||||
def test_create_listener_on_different_lb_than_default_pool(self):
|
||||
load_balancer2 = self.lb_repo.create(
|
||||
self.session, id=self.FAKE_UUID_3, project_id=self.FAKE_UUID_2,
|
||||
@ -714,7 +753,7 @@ class ListenerStatisticsRepositoryTest(BaseRepositoryTest):
|
||||
name="listener_name", description="listener_description",
|
||||
protocol=constants.PROTOCOL_HTTP, protocol_port=80,
|
||||
connection_limit=1, provisioning_status=constants.ACTIVE,
|
||||
operating_status=constants.ONLINE, enabled=True)
|
||||
operating_status=constants.ONLINE, enabled=True, peer_port=1025)
|
||||
|
||||
def create_listener_stats(self, listener_id):
|
||||
stats = self.listener_stats_repo.create(
|
||||
@ -1119,7 +1158,7 @@ class SNIRepositoryTest(BaseRepositoryTest):
|
||||
name="listener_name", description="listener_description",
|
||||
protocol=constants.PROTOCOL_HTTP, protocol_port=80,
|
||||
connection_limit=1, provisioning_status=constants.ACTIVE,
|
||||
operating_status=constants.ONLINE, enabled=True)
|
||||
operating_status=constants.ONLINE, enabled=True, peer_port=1025)
|
||||
|
||||
def create_sni(self, listener_id):
|
||||
sni = self.sni_repo.create(self.session,
|
||||
|
@ -33,12 +33,11 @@ class TestListenerFlows(base.TestCase):
|
||||
|
||||
self.assertIsInstance(listener_flow, flow.Flow)
|
||||
|
||||
self.assertIn(constants.LISTENER, listener_flow.requires)
|
||||
self.assertIn(constants.LOADBALANCER, listener_flow.requires)
|
||||
self.assertIn(constants.LISTENERS, listener_flow.requires)
|
||||
|
||||
self.assertEqual(3, len(listener_flow.requires))
|
||||
self.assertEqual(1, len(listener_flow.provides))
|
||||
self.assertEqual(2, len(listener_flow.requires))
|
||||
self.assertEqual(0, len(listener_flow.provides))
|
||||
|
||||
def test_get_delete_listener_flow(self):
|
||||
|
||||
|
@ -322,27 +322,6 @@ class TestDatabaseTasks(base.TestCase):
|
||||
|
||||
self.assertEqual(_loadbalancer_mock, lb)
|
||||
|
||||
@mock.patch('octavia.db.repositories.ListenerRepository.get',
|
||||
return_value=_listener_mock)
|
||||
def test_reload_listener(self,
|
||||
mock_listener_get,
|
||||
mock_generate_uuid,
|
||||
mock_LOG,
|
||||
mock_get_session,
|
||||
mock_loadbalancer_repo_update,
|
||||
mock_listener_repo_update,
|
||||
mock_amphora_repo_update,
|
||||
mock_amphora_repo_delete):
|
||||
|
||||
reload_listener = database_tasks.ReloadListener()
|
||||
listener = reload_listener.execute(_listener_mock)
|
||||
|
||||
repo.ListenerRepository.get.assert_called_once_with(
|
||||
'TEST',
|
||||
id=LISTENER_ID)
|
||||
|
||||
self.assertEqual(_listener_mock, listener)
|
||||
|
||||
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get',
|
||||
return_value=_loadbalancer_mock)
|
||||
@mock.patch('octavia.db.repositories.VipRepository.update')
|
||||
@ -1166,29 +1145,6 @@ class TestDatabaseTasks(base.TestCase):
|
||||
advert_int=1)
|
||||
create_vrrp_group.execute(_loadbalancer_mock)
|
||||
|
||||
@mock.patch('octavia.db.repositories.ListenerRepository.get')
|
||||
def test_allocate_listener_peer_port(self,
|
||||
mock_listener_repo_get,
|
||||
mock_generate_uuid,
|
||||
mock_LOG,
|
||||
mock_get_session,
|
||||
mock_loadbalancer_repo_update,
|
||||
mock_listener_repo_update,
|
||||
mock_amphora_repo_update,
|
||||
mock_amphora_repo_delete):
|
||||
allocate_listener_peer_port = database_tasks.AllocateListenerPeerPort()
|
||||
allocate_listener_peer_port.execute(_listener_mock)
|
||||
mock_listener_repo_update.assert_called_once_with(
|
||||
'TEST', _listener_mock.id,
|
||||
peer_port=constants.HAPROXY_BASE_PEER_PORT)
|
||||
|
||||
mock_listener_repo_update.reset_mock()
|
||||
|
||||
allocate_listener_peer_port.revert(_listener_mock)
|
||||
mock_listener_repo_update.assert_called_once_with(
|
||||
'TEST', _listener_mock.id,
|
||||
peer_port=None)
|
||||
|
||||
@mock.patch('octavia.db.repositories.AmphoraHealthRepository.delete')
|
||||
def test_disable_amphora_health_monitoring(self,
|
||||
mock_amp_health_repo_delete,
|
||||
|
@ -272,9 +272,7 @@ class TestControllerWorker(base.TestCase):
|
||||
|
||||
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
|
||||
assert_called_once_with(_flow_mock,
|
||||
store={constants.LISTENER:
|
||||
_listener_mock,
|
||||
constants.LOADBALANCER:
|
||||
store={constants.LOADBALANCER:
|
||||
_load_balancer_mock,
|
||||
constants.LISTENERS:
|
||||
[_listener_mock]}))
|
||||
|
Loading…
x
Reference in New Issue
Block a user