Fix parallel plug vip
A recent patch[1] introduced parallel network configuration on load balancer boot. However, this patch has a race condition between the parallel booting amphora. This patch corrects this by making the get amphora network configurations task to work on a single amphora if one is presented to the task. Change-Id: Ideb050a215b0b0335ea94163650959994f987008 Story: 2005080 Task: 29659
This commit is contained in:
parent
6e0bed1c54
commit
1f709e3355
octavia
common
controller/worker
network
tests/unit/controller/worker/flows
@ -235,6 +235,7 @@ OBJECT = 'object'
|
||||
SERVER_PEM = 'server_pem'
|
||||
UPDATE_DICT = 'update_dict'
|
||||
VIP_NETWORK = 'vip_network'
|
||||
AMPHORA_NETWORK_CONFIG = 'amphora_network_config'
|
||||
AMPHORAE_NETWORK_CONFIG = 'amphorae_network_config'
|
||||
ADDED_PORTS = 'added_ports'
|
||||
PORTS = 'ports'
|
||||
@ -357,6 +358,7 @@ UPDATE_MEMBER_INDB = 'octavia-update-member-indb'
|
||||
DELETE_MEMBER_INDB = 'octavia-delete-member-indb'
|
||||
|
||||
# Task Names
|
||||
RELOAD_AMP_AFTER_PLUG_VIP = 'reload-amp-after-plug-vip'
|
||||
RELOAD_LB_AFTER_AMP_ASSOC = 'reload-lb-after-amp-assoc'
|
||||
RELOAD_LB_AFTER_AMP_ASSOC_FULL_GRAPH = 'reload-lb-after-amp-assoc-full-graph'
|
||||
RELOAD_LB_AFTER_PLUG_VIP = 'reload-lb-after-plug-vip'
|
||||
|
@ -303,16 +303,22 @@ class AmphoraFlows(object):
|
||||
flows.append(database_tasks.UpdateAmphoraVIPData(
|
||||
name=sf_name + '-' + constants.UPDATE_AMPHORA_VIP_DATA,
|
||||
requires=constants.AMP_DATA))
|
||||
flows.append(database_tasks.ReloadAmphora(
|
||||
name=sf_name + '-' + constants.RELOAD_AMP_AFTER_PLUG_VIP,
|
||||
requires=constants.AMPHORA_ID,
|
||||
provides=constants.AMPHORA))
|
||||
flows.append(database_tasks.ReloadLoadBalancer(
|
||||
name=sf_name + '-' + constants.RELOAD_LB_AFTER_PLUG_VIP,
|
||||
requires=constants.LOADBALANCER_ID,
|
||||
provides=constants.LOADBALANCER))
|
||||
flows.append(network_tasks.GetAmphoraeNetworkConfigs(
|
||||
flows.append(network_tasks.GetAmphoraNetworkConfigs(
|
||||
name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG,
|
||||
requires=constants.LOADBALANCER,
|
||||
provides=constants.AMPHORAE_NETWORK_CONFIG))
|
||||
flows.append(amphora_driver_tasks.AmphoraePostVIPPlug(
|
||||
requires=(constants.LOADBALANCER, constants.AMPHORA),
|
||||
provides=constants.AMPHORA_NETWORK_CONFIG))
|
||||
flows.append(amphora_driver_tasks.AmphoraPostVIPPlug(
|
||||
name=sf_name + '-' + constants.AMP_POST_VIP_PLUG,
|
||||
rebind={constants.AMPHORAE_NETWORK_CONFIG:
|
||||
constants.AMPHORA_NETWORK_CONFIG},
|
||||
requires=(constants.LOADBALANCER,
|
||||
constants.AMPHORAE_NETWORK_CONFIG)))
|
||||
return flows
|
||||
@ -520,6 +526,10 @@ class AmphoraFlows(object):
|
||||
def get_vrrp_subflow(self, prefix):
|
||||
sf_name = prefix + '-' + constants.GET_VRRP_SUBFLOW
|
||||
vrrp_subflow = linear_flow.Flow(sf_name)
|
||||
vrrp_subflow.add(network_tasks.GetAmphoraeNetworkConfigs(
|
||||
name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG,
|
||||
requires=constants.LOADBALANCER,
|
||||
provides=constants.AMPHORAE_NETWORK_CONFIG))
|
||||
vrrp_subflow.add(amphora_driver_tasks.AmphoraUpdateVRRPInterface(
|
||||
name=sf_name + '-' + constants.AMP_UPDATE_VRRP_INTF,
|
||||
requires=constants.LOADBALANCER,
|
||||
|
@ -485,6 +485,15 @@ class UpdateVIPForDelete(BaseNetworkTask):
|
||||
self.network_driver.update_vip(loadbalancer, for_delete=True)
|
||||
|
||||
|
||||
class GetAmphoraNetworkConfigs(BaseNetworkTask):
|
||||
"""Task to retrieve amphora network details."""
|
||||
|
||||
def execute(self, loadbalancer, amphora=None):
|
||||
LOG.debug("Retrieving vip network details.")
|
||||
return self.network_driver.get_network_configs(loadbalancer,
|
||||
amphora=amphora)
|
||||
|
||||
|
||||
class GetAmphoraeNetworkConfigs(BaseNetworkTask):
|
||||
"""Task to retrieve amphorae network details."""
|
||||
|
||||
|
@ -284,7 +284,7 @@ class AbstractNetworkDriver(object):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_network_configs(self, load_balancer):
|
||||
def get_network_configs(self, load_balancer, amphora=None):
|
||||
"""Retrieve network configurations
|
||||
|
||||
This method assumes that a dictionary of AmphoraNetworkConfigs keyed
|
||||
@ -298,6 +298,7 @@ class AbstractNetworkDriver(object):
|
||||
Example return: {<amphora.id>: <AmphoraNetworkConfig>}
|
||||
|
||||
:param load_balancer: The load_balancer configuration
|
||||
:param amphora: Optional amphora to only query.
|
||||
:return: dict of octavia.network.data_models.AmphoraNetworkConfig
|
||||
keyed off of the amphora id the config is associated with.
|
||||
:raises: NotFound, NetworkNotFound, SubnetNotFound, PortNotFound
|
||||
|
@ -591,30 +591,38 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
||||
|
||||
return plugged_interface
|
||||
|
||||
def get_network_configs(self, loadbalancer):
|
||||
def _get_amp_net_configs(self, amp, amp_configs, vip_subnet, vip_port):
|
||||
if amp.status != constants.DELETED:
|
||||
LOG.debug("Retrieving network details for amphora %s", amp.id)
|
||||
vrrp_port = self.get_port(amp.vrrp_port_id)
|
||||
vrrp_subnet = self.get_subnet(
|
||||
vrrp_port.get_subnet_id(amp.vrrp_ip))
|
||||
vrrp_port.network = self.get_network(vrrp_port.network_id)
|
||||
ha_port = self.get_port(amp.ha_port_id)
|
||||
ha_subnet = self.get_subnet(
|
||||
ha_port.get_subnet_id(amp.ha_ip))
|
||||
|
||||
amp_configs[amp.id] = n_data_models.AmphoraNetworkConfig(
|
||||
amphora=amp,
|
||||
vip_subnet=vip_subnet,
|
||||
vip_port=vip_port,
|
||||
vrrp_subnet=vrrp_subnet,
|
||||
vrrp_port=vrrp_port,
|
||||
ha_subnet=ha_subnet,
|
||||
ha_port=ha_port
|
||||
)
|
||||
|
||||
def get_network_configs(self, loadbalancer, amphora=None):
|
||||
vip_subnet = self.get_subnet(loadbalancer.vip.subnet_id)
|
||||
vip_port = self.get_port(loadbalancer.vip.port_id)
|
||||
amp_configs = {}
|
||||
for amp in loadbalancer.amphorae:
|
||||
if amp.status != constants.DELETED:
|
||||
LOG.debug("Retrieving network details for amphora %s", amp.id)
|
||||
vrrp_port = self.get_port(amp.vrrp_port_id)
|
||||
vrrp_subnet = self.get_subnet(
|
||||
vrrp_port.get_subnet_id(amp.vrrp_ip))
|
||||
vrrp_port.network = self.get_network(vrrp_port.network_id)
|
||||
ha_port = self.get_port(amp.ha_port_id)
|
||||
ha_subnet = self.get_subnet(
|
||||
ha_port.get_subnet_id(amp.ha_ip))
|
||||
|
||||
amp_configs[amp.id] = n_data_models.AmphoraNetworkConfig(
|
||||
amphora=amp,
|
||||
vip_subnet=vip_subnet,
|
||||
vip_port=vip_port,
|
||||
vrrp_subnet=vrrp_subnet,
|
||||
vrrp_port=vrrp_port,
|
||||
ha_subnet=ha_subnet,
|
||||
ha_port=ha_port
|
||||
)
|
||||
if amphora:
|
||||
self._get_amp_net_configs(amphora, amp_configs,
|
||||
vip_subnet, vip_port)
|
||||
else:
|
||||
for amp in loadbalancer.amphorae:
|
||||
self._get_amp_net_configs(amp, amp_configs,
|
||||
vip_subnet, vip_port)
|
||||
return amp_configs
|
||||
|
||||
def wait_for_port_detach(self, amphora):
|
||||
|
@ -210,26 +210,37 @@ class NoopManager(object):
|
||||
self.networkconfigconfig[(amphora.id, port.id)] = (
|
||||
amphora, port, 'plug_port')
|
||||
|
||||
def get_network_configs(self, loadbalancer):
|
||||
LOG.debug("Network %s no-op, get_network_configs loadbalancer id %s ",
|
||||
self.__class__.__name__, loadbalancer.id)
|
||||
def _get_amp_net_configs(self, amp, amp_configs, vip_subnet, vip_port):
|
||||
vrrp_port = self.get_port(amp.vrrp_port_id)
|
||||
ha_port = self.get_port(amp.ha_port_id)
|
||||
amp_configs[amp.id] = network_models.AmphoraNetworkConfig(
|
||||
amphora=amp,
|
||||
vrrp_subnet=self.get_subnet(
|
||||
vrrp_port.get_subnet_id(amp.vrrp_ip)),
|
||||
vrrp_port=vrrp_port,
|
||||
ha_subnet=self.get_subnet(
|
||||
ha_port.get_subnet_id(amp.ha_ip)),
|
||||
ha_port=ha_port)
|
||||
|
||||
def get_network_configs(self, loadbalancer, amphora=None):
|
||||
amphora_id = amphora.id if amphora else None
|
||||
LOG.debug("Network %s no-op, get_network_configs loadbalancer id "
|
||||
"%s amphora id: %s", self.__class__.__name__,
|
||||
loadbalancer.id, amphora_id)
|
||||
self.networkconfigconfig[(loadbalancer.id)] = (
|
||||
loadbalancer, 'get_network_configs')
|
||||
vip_subnet = self.get_subnet(loadbalancer.vip.subnet_id)
|
||||
vip_port = self.get_port(loadbalancer.vip.port_id)
|
||||
|
||||
amp_configs = {}
|
||||
for amp in loadbalancer.amphorae:
|
||||
vrrp_port = self.get_port(amp.vrrp_port_id)
|
||||
ha_port = self.get_port(amp.ha_port_id)
|
||||
amp_configs[amp.id] = network_models.AmphoraNetworkConfig(
|
||||
amphora=amp,
|
||||
vip_subnet=self.get_subnet(loadbalancer.vip.subnet_id),
|
||||
vip_port=self.get_port(loadbalancer.vip.port_id),
|
||||
vrrp_subnet=self.get_subnet(
|
||||
vrrp_port.get_subnet_id(amp.vrrp_ip)),
|
||||
vrrp_port=vrrp_port,
|
||||
ha_subnet=self.get_subnet(
|
||||
ha_port.get_subnet_id(amp.ha_ip)),
|
||||
ha_port=ha_port)
|
||||
if amphora:
|
||||
self._get_amp_net_configs(amphora, amp_configs,
|
||||
vip_subnet, vip_port)
|
||||
else:
|
||||
for amp in loadbalancer.amphorae:
|
||||
self._get_amp_net_configs(amp, amp_configs,
|
||||
vip_subnet, vip_port)
|
||||
|
||||
return amp_configs
|
||||
|
||||
def wait_for_port_detach(self, amphora):
|
||||
@ -310,8 +321,8 @@ class NoopNetworkDriver(driver_base.AbstractNetworkDriver):
|
||||
def plug_port(self, amphora, port):
|
||||
return self.driver.plug_port(amphora, port)
|
||||
|
||||
def get_network_configs(self, loadbalancer):
|
||||
return self.driver.get_network_configs(loadbalancer)
|
||||
def get_network_configs(self, loadbalancer, amphora=None):
|
||||
return self.driver.get_network_configs(loadbalancer, amphora)
|
||||
|
||||
def wait_for_port_detach(self, amphora):
|
||||
self.driver.wait_for_port_detach(amphora)
|
||||
|
@ -354,8 +354,8 @@ class TestAmphoraFlows(base.TestCase):
|
||||
|
||||
self.assertIn(constants.LOADBALANCER, vrrp_subflow.requires)
|
||||
|
||||
self.assertEqual(1, len(vrrp_subflow.provides))
|
||||
self.assertEqual(2, len(vrrp_subflow.requires))
|
||||
self.assertEqual(2, len(vrrp_subflow.provides))
|
||||
self.assertEqual(1, len(vrrp_subflow.requires))
|
||||
|
||||
def test_get_post_map_lb_subflow(self, mock_get_net_driver):
|
||||
|
||||
|
@ -156,8 +156,8 @@ class TestLoadBalancerFlows(base.TestCase):
|
||||
self.assertIn(constants.UPDATE_DICT, amp_flow.requires)
|
||||
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
|
||||
|
||||
self.assertEqual(1, len(amp_flow.provides))
|
||||
self.assertEqual(3, len(amp_flow.requires))
|
||||
self.assertEqual(2, len(amp_flow.provides))
|
||||
self.assertEqual(2, len(amp_flow.requires))
|
||||
|
||||
# Test mark_active=False
|
||||
amp_flow = self.LBFlow.get_post_lb_amp_association_flow(
|
||||
@ -169,8 +169,8 @@ class TestLoadBalancerFlows(base.TestCase):
|
||||
self.assertIn(constants.UPDATE_DICT, amp_flow.requires)
|
||||
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
|
||||
|
||||
self.assertEqual(1, len(amp_flow.provides))
|
||||
self.assertEqual(3, len(amp_flow.requires))
|
||||
self.assertEqual(2, len(amp_flow.provides))
|
||||
self.assertEqual(2, len(amp_flow.requires))
|
||||
|
||||
def test_get_create_load_balancer_flows_single_listeners(
|
||||
self, mock_get_net_driver):
|
||||
@ -193,8 +193,7 @@ class TestLoadBalancerFlows(base.TestCase):
|
||||
self.assertIn(constants.ADDED_PORTS, create_flow.provides)
|
||||
self.assertIn(constants.VIP, create_flow.provides)
|
||||
self.assertIn(constants.AMP_DATA, create_flow.provides)
|
||||
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG,
|
||||
create_flow.provides)
|
||||
self.assertIn(constants.AMPHORA_NETWORK_CONFIG, create_flow.provides)
|
||||
|
||||
self.assertEqual(4, len(create_flow.requires))
|
||||
self.assertEqual(13, len(create_flow.provides),
|
||||
@ -225,5 +224,5 @@ class TestLoadBalancerFlows(base.TestCase):
|
||||
create_flow.provides)
|
||||
|
||||
self.assertEqual(4, len(create_flow.requires))
|
||||
self.assertEqual(13, len(create_flow.provides),
|
||||
self.assertEqual(14, len(create_flow.provides),
|
||||
create_flow.provides)
|
||||
|
Loading…
x
Reference in New Issue
Block a user