Refactor the pluggin of the VIP
This aims to make plugging the vip based on the amphora instead the LB for the LB create flows. Change-Id: I0eaae7b60333d2e720acc9570a0d398a06be46e5
This commit is contained in:
parent
fae5b05980
commit
06baf6d542
octavia
common
controller/worker
network
tests/unit
controller/worker
network/drivers
releasenotes/notes
@ -258,6 +258,8 @@ REQ_CONN_TIMEOUT = 'req_conn_timeout'
|
|||||||
REQ_READ_TIMEOUT = 'req_read_timeout'
|
REQ_READ_TIMEOUT = 'req_read_timeout'
|
||||||
CONN_MAX_RETRIES = 'conn_max_retries'
|
CONN_MAX_RETRIES = 'conn_max_retries'
|
||||||
CONN_RETRY_INTERVAL = 'conn_retry_interval'
|
CONN_RETRY_INTERVAL = 'conn_retry_interval'
|
||||||
|
SUBNET = 'subnet'
|
||||||
|
AMP_DATA = 'amp_data'
|
||||||
ACTIVE_CONNECTIONS = 'active_connections'
|
ACTIVE_CONNECTIONS = 'active_connections'
|
||||||
BYTES_IN = 'bytes_in'
|
BYTES_IN = 'bytes_in'
|
||||||
BYTES_OUT = 'bytes_out'
|
BYTES_OUT = 'bytes_out'
|
||||||
@ -300,6 +302,7 @@ UPDATE_AMPS_SUBFLOW = 'octavia-update-amps-subflow'
|
|||||||
|
|
||||||
POST_MAP_AMP_TO_LB_SUBFLOW = 'octavia-post-map-amp-to-lb-subflow'
|
POST_MAP_AMP_TO_LB_SUBFLOW = 'octavia-post-map-amp-to-lb-subflow'
|
||||||
CREATE_AMP_FOR_LB_SUBFLOW = 'octavia-create-amp-for-lb-subflow'
|
CREATE_AMP_FOR_LB_SUBFLOW = 'octavia-create-amp-for-lb-subflow'
|
||||||
|
AMP_PLUG_NET_SUBFLOW = 'octavia-plug-net-subflow'
|
||||||
GET_AMPHORA_FOR_LB_SUBFLOW = 'octavia-get-amphora-for-lb-subflow'
|
GET_AMPHORA_FOR_LB_SUBFLOW = 'octavia-get-amphora-for-lb-subflow'
|
||||||
POST_LB_AMP_ASSOCIATION_SUBFLOW = (
|
POST_LB_AMP_ASSOCIATION_SUBFLOW = (
|
||||||
'octavia-post-loadbalancer-amp_association-subflow')
|
'octavia-post-loadbalancer-amp_association-subflow')
|
||||||
@ -332,7 +335,11 @@ CREATE_VRRP_GROUP_FOR_LB = 'octavia-create-vrrp-group-for-lb'
|
|||||||
CREATE_VRRP_SECURITY_RULES = 'octavia-create-vrrp-security-rules'
|
CREATE_VRRP_SECURITY_RULES = 'octavia-create-vrrp-security-rules'
|
||||||
AMP_COMPUTE_CONNECTIVITY_WAIT = 'octavia-amp-compute-connectivity-wait'
|
AMP_COMPUTE_CONNECTIVITY_WAIT = 'octavia-amp-compute-connectivity-wait'
|
||||||
AMP_LISTENER_UPDATE = 'octavia-amp-listeners-update'
|
AMP_LISTENER_UPDATE = 'octavia-amp-listeners-update'
|
||||||
|
PLUG_VIP_AMPHORA = 'octavia-amp-plug-vip'
|
||||||
|
APPLY_QOS_AMP = 'octavia-amp-apply-qos'
|
||||||
|
UPDATE_AMPHORA_VIP_DATA = 'ocatvia-amp-update-vip-data'
|
||||||
|
GET_AMP_NETWORK_CONFIG = 'octavia-amp-get-network-config'
|
||||||
|
AMP_POST_VIP_PLUG = 'octavia-amp-post-vip-plug'
|
||||||
GENERATE_SERVER_PEM_TASK = 'GenerateServerPEMTask'
|
GENERATE_SERVER_PEM_TASK = 'GenerateServerPEMTask'
|
||||||
AMPHORA_CONFIG_UPDATE_TASK = 'AmphoraConfigUpdateTask'
|
AMPHORA_CONFIG_UPDATE_TASK = 'AmphoraConfigUpdateTask'
|
||||||
|
|
||||||
@ -352,6 +359,7 @@ DELETE_MEMBER_INDB = 'octavia-delete-member-indb'
|
|||||||
RELOAD_LB_AFTER_AMP_ASSOC = 'reload-lb-after-amp-assoc'
|
RELOAD_LB_AFTER_AMP_ASSOC = 'reload-lb-after-amp-assoc'
|
||||||
RELOAD_LB_AFTER_AMP_ASSOC_FULL_GRAPH = 'reload-lb-after-amp-assoc-full-graph'
|
RELOAD_LB_AFTER_AMP_ASSOC_FULL_GRAPH = 'reload-lb-after-amp-assoc-full-graph'
|
||||||
RELOAD_LB_AFTER_PLUG_VIP = 'reload-lb-after-plug-vip'
|
RELOAD_LB_AFTER_PLUG_VIP = 'reload-lb-after-plug-vip'
|
||||||
|
RELOAD_LB_BEFOR_ALLOCATE_VIP = "reload-lb-before-allocate-vip"
|
||||||
|
|
||||||
NOVA_1 = '1.1'
|
NOVA_1 = '1.1'
|
||||||
NOVA_21 = '2.1'
|
NOVA_21 = '2.1'
|
||||||
|
@ -277,8 +277,46 @@ class AmphoraFlows(object):
|
|||||||
decider=self._create_new_amp_for_lb_decider,
|
decider=self._create_new_amp_for_lb_decider,
|
||||||
decider_depth='flow')
|
decider_depth='flow')
|
||||||
|
|
||||||
|
# Plug the network
|
||||||
|
# todo(xgerman): Rework failover flow
|
||||||
|
if prefix != constants.FAILOVER_AMPHORA_FLOW:
|
||||||
|
sf_name = prefix + '-' + constants.AMP_PLUG_NET_SUBFLOW
|
||||||
|
amp_for_lb_net_flow = linear_flow.Flow(sf_name)
|
||||||
|
amp_for_lb_net_flow.add(amp_for_lb_flow)
|
||||||
|
amp_for_lb_net_flow.add(*self._get_amp_net_subflow(sf_name))
|
||||||
|
return amp_for_lb_net_flow
|
||||||
|
|
||||||
return amp_for_lb_flow
|
return amp_for_lb_flow
|
||||||
|
|
||||||
|
def _get_amp_net_subflow(self, sf_name):
|
||||||
|
flows = []
|
||||||
|
flows.append(network_tasks.PlugVIPAmpphora(
|
||||||
|
name=sf_name + '-' + constants.PLUG_VIP_AMPHORA,
|
||||||
|
requires=(constants.LOADBALANCER, constants.AMPHORA,
|
||||||
|
constants.SUBNET),
|
||||||
|
provides=constants.AMP_DATA))
|
||||||
|
|
||||||
|
flows.append(network_tasks.ApplyQosAmphora(
|
||||||
|
name=sf_name + '-' + constants.APPLY_QOS_AMP,
|
||||||
|
requires=(constants.LOADBALANCER, constants.AMP_DATA,
|
||||||
|
constants.UPDATE_DICT)))
|
||||||
|
flows.append(database_tasks.UpdateAmphoraVIPData(
|
||||||
|
name=sf_name + '-' + constants.UPDATE_AMPHORA_VIP_DATA,
|
||||||
|
requires=constants.AMP_DATA))
|
||||||
|
flows.append(database_tasks.ReloadLoadBalancer(
|
||||||
|
name=sf_name + '-' + constants.RELOAD_LB_AFTER_PLUG_VIP,
|
||||||
|
requires=constants.LOADBALANCER_ID,
|
||||||
|
provides=constants.LOADBALANCER))
|
||||||
|
flows.append(network_tasks.GetAmphoraeNetworkConfigs(
|
||||||
|
name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG,
|
||||||
|
requires=constants.LOADBALANCER,
|
||||||
|
provides=constants.AMPHORAE_NETWORK_CONFIG))
|
||||||
|
flows.append(amphora_driver_tasks.AmphoraePostVIPPlug(
|
||||||
|
name=sf_name + '-' + constants.AMP_POST_VIP_PLUG,
|
||||||
|
requires=(constants.LOADBALANCER,
|
||||||
|
constants.AMPHORAE_NETWORK_CONFIG)))
|
||||||
|
return flows
|
||||||
|
|
||||||
def get_delete_amphora_flow(self):
|
def get_delete_amphora_flow(self):
|
||||||
"""Creates a flow to delete an amphora.
|
"""Creates a flow to delete an amphora.
|
||||||
|
|
||||||
|
@ -55,6 +55,24 @@ class LoadBalancerFlows(object):
|
|||||||
lb_create_flow.add(lifecycle_tasks.LoadBalancerIDToErrorOnRevertTask(
|
lb_create_flow.add(lifecycle_tasks.LoadBalancerIDToErrorOnRevertTask(
|
||||||
requires=constants.LOADBALANCER_ID))
|
requires=constants.LOADBALANCER_ID))
|
||||||
|
|
||||||
|
# allocate VIP
|
||||||
|
lb_create_flow.add(database_tasks.ReloadLoadBalancer(
|
||||||
|
name=constants.RELOAD_LB_BEFOR_ALLOCATE_VIP,
|
||||||
|
requires=constants.LOADBALANCER_ID,
|
||||||
|
provides=constants.LOADBALANCER
|
||||||
|
))
|
||||||
|
lb_create_flow.add(network_tasks.AllocateVIP(
|
||||||
|
requires=constants.LOADBALANCER,
|
||||||
|
provides=constants.VIP))
|
||||||
|
lb_create_flow.add(database_tasks.UpdateVIPAfterAllocation(
|
||||||
|
requires=(constants.LOADBALANCER_ID, constants.VIP),
|
||||||
|
provides=constants.LOADBALANCER))
|
||||||
|
lb_create_flow.add(network_tasks.UpdateVIPSecurityGroup(
|
||||||
|
requires=constants.LOADBALANCER))
|
||||||
|
lb_create_flow.add(network_tasks.GetSubnetFromVIP(
|
||||||
|
requires=constants.LOADBALANCER,
|
||||||
|
provides=constants.SUBNET))
|
||||||
|
|
||||||
if topology == constants.TOPOLOGY_ACTIVE_STANDBY:
|
if topology == constants.TOPOLOGY_ACTIVE_STANDBY:
|
||||||
lb_create_flow.add(*self._create_active_standby_topology())
|
lb_create_flow.add(*self._create_active_standby_topology())
|
||||||
elif topology == constants.TOPOLOGY_SINGLE:
|
elif topology == constants.TOPOLOGY_SINGLE:
|
||||||
@ -107,7 +125,8 @@ class LoadBalancerFlows(object):
|
|||||||
f_name = constants.CREATE_LOADBALANCER_FLOW
|
f_name = constants.CREATE_LOADBALANCER_FLOW
|
||||||
amps_flow = unordered_flow.Flow(f_name)
|
amps_flow = unordered_flow.Flow(f_name)
|
||||||
master_amp_sf = self.amp_flows.get_amphora_for_lb_subflow(
|
master_amp_sf = self.amp_flows.get_amphora_for_lb_subflow(
|
||||||
prefix=constants.ROLE_MASTER, role=constants.ROLE_MASTER)
|
prefix=constants.ROLE_MASTER, role=constants.ROLE_MASTER
|
||||||
|
)
|
||||||
|
|
||||||
backup_amp_sf = self.amp_flows.get_amphora_for_lb_subflow(
|
backup_amp_sf = self.amp_flows.get_amphora_for_lb_subflow(
|
||||||
prefix=constants.ROLE_BACKUP, role=constants.ROLE_BACKUP)
|
prefix=constants.ROLE_BACKUP, role=constants.ROLE_BACKUP)
|
||||||
@ -172,9 +191,6 @@ class LoadBalancerFlows(object):
|
|||||||
requires=constants.LOADBALANCER_ID,
|
requires=constants.LOADBALANCER_ID,
|
||||||
provides=constants.LOADBALANCER))
|
provides=constants.LOADBALANCER))
|
||||||
|
|
||||||
new_LB_net_subflow = self.get_new_LB_networking_subflow()
|
|
||||||
post_create_LB_flow.add(new_LB_net_subflow)
|
|
||||||
|
|
||||||
if topology == constants.TOPOLOGY_ACTIVE_STANDBY:
|
if topology == constants.TOPOLOGY_ACTIVE_STANDBY:
|
||||||
vrrp_subflow = self.amp_flows.get_vrrp_subflow(prefix)
|
vrrp_subflow = self.amp_flows.get_vrrp_subflow(prefix)
|
||||||
post_create_LB_flow.add(vrrp_subflow)
|
post_create_LB_flow.add(vrrp_subflow)
|
||||||
@ -313,7 +329,7 @@ class LoadBalancerFlows(object):
|
|||||||
new_LB_net_subflow.add(network_tasks.ApplyQos(
|
new_LB_net_subflow.add(network_tasks.ApplyQos(
|
||||||
requires=(constants.LOADBALANCER, constants.AMPS_DATA,
|
requires=(constants.LOADBALANCER, constants.AMPS_DATA,
|
||||||
constants.UPDATE_DICT)))
|
constants.UPDATE_DICT)))
|
||||||
new_LB_net_subflow.add(database_tasks.UpdateAmphoraVIPData(
|
new_LB_net_subflow.add(database_tasks.UpdateAmphoraeVIPData(
|
||||||
requires=constants.AMPS_DATA))
|
requires=constants.AMPS_DATA))
|
||||||
new_LB_net_subflow.add(database_tasks.ReloadLoadBalancer(
|
new_LB_net_subflow.add(database_tasks.ReloadLoadBalancer(
|
||||||
name=constants.RELOAD_LB_AFTER_PLUG_VIP,
|
name=constants.RELOAD_LB_AFTER_PLUG_VIP,
|
||||||
|
@ -409,7 +409,7 @@ class UpdateVIPAfterAllocation(BaseDatabaseTask):
|
|||||||
id=loadbalancer_id)
|
id=loadbalancer_id)
|
||||||
|
|
||||||
|
|
||||||
class UpdateAmphoraVIPData(BaseDatabaseTask):
|
class UpdateAmphoraeVIPData(BaseDatabaseTask):
|
||||||
"""Update amphorae VIP data."""
|
"""Update amphorae VIP data."""
|
||||||
|
|
||||||
def execute(self, amps_data):
|
def execute(self, amps_data):
|
||||||
@ -427,6 +427,23 @@ class UpdateAmphoraVIPData(BaseDatabaseTask):
|
|||||||
vrrp_id=1)
|
vrrp_id=1)
|
||||||
|
|
||||||
|
|
||||||
|
class UpdateAmphoraVIPData(BaseDatabaseTask):
|
||||||
|
"""Update amphorae VIP data."""
|
||||||
|
|
||||||
|
def execute(self, amp_data):
|
||||||
|
"""Update amphorae VIP data.
|
||||||
|
|
||||||
|
:param amps_data: Amphorae update dicts.
|
||||||
|
:returns: None
|
||||||
|
"""
|
||||||
|
self.repos.amphora.update(db_apis.get_session(), amp_data.id,
|
||||||
|
vrrp_ip=amp_data.vrrp_ip,
|
||||||
|
ha_ip=amp_data.ha_ip,
|
||||||
|
vrrp_port_id=amp_data.vrrp_port_id,
|
||||||
|
ha_port_id=amp_data.ha_port_id,
|
||||||
|
vrrp_id=1)
|
||||||
|
|
||||||
|
|
||||||
class UpdateAmpFailoverDetails(BaseDatabaseTask):
|
class UpdateAmpFailoverDetails(BaseDatabaseTask):
|
||||||
"""Update amphora failover details in the database."""
|
"""Update amphora failover details in the database."""
|
||||||
|
|
||||||
|
@ -351,6 +351,60 @@ class PlugVIP(BaseNetworkTask):
|
|||||||
{'vip': loadbalancer.vip.ip_address, 'except': e})
|
{'vip': loadbalancer.vip.ip_address, 'except': e})
|
||||||
|
|
||||||
|
|
||||||
|
class UpdateVIPSecurityGroup(BaseNetworkTask):
|
||||||
|
"""Task to setup SG for LB."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer):
|
||||||
|
"""Task to setup SG for LB."""
|
||||||
|
|
||||||
|
LOG.debug("Setup SG for loadbalancer id: %s", loadbalancer.id)
|
||||||
|
|
||||||
|
self.network_driver.update_vip_sg(loadbalancer, loadbalancer.vip)
|
||||||
|
|
||||||
|
|
||||||
|
class GetSubnetFromVIP(BaseNetworkTask):
|
||||||
|
"""Task to plumb a VIP."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer):
|
||||||
|
"""Plumb a vip to an amphora."""
|
||||||
|
|
||||||
|
LOG.debug("Getting subnet for LB: %s", loadbalancer.id)
|
||||||
|
|
||||||
|
return self.network_driver.get_subnet(loadbalancer.vip.subnet_id)
|
||||||
|
|
||||||
|
|
||||||
|
class PlugVIPAmpphora(BaseNetworkTask):
|
||||||
|
"""Task to plumb a VIP."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer, amphora, subnet):
|
||||||
|
"""Plumb a vip to an amphora."""
|
||||||
|
|
||||||
|
LOG.debug("Plumbing VIP for amphora id: %s", amphora.id)
|
||||||
|
|
||||||
|
amp_data = self.network_driver.plug_aap_port(
|
||||||
|
loadbalancer, loadbalancer.vip, amphora, subnet)
|
||||||
|
return amp_data
|
||||||
|
|
||||||
|
def revert(self, result, loadbalancer, amphora, subnet, *args, **kwargs):
|
||||||
|
"""Handle a failure to plumb a vip."""
|
||||||
|
|
||||||
|
if isinstance(result, failure.Failure):
|
||||||
|
return
|
||||||
|
LOG.warning("Unable to plug VIP for amphora id %s "
|
||||||
|
"load balancer id %s",
|
||||||
|
amphora.id, loadbalancer.id)
|
||||||
|
|
||||||
|
try:
|
||||||
|
amphora.vrrp_port_id = result.vrrp_port_id
|
||||||
|
amphora.ha_port_id = result.ha_port_id
|
||||||
|
|
||||||
|
self.network_driver.unplug_aap_port(loadbalancer.vip,
|
||||||
|
amphora, subnet)
|
||||||
|
except Exception as e:
|
||||||
|
LOG.error('Failed to unplug AAP port. Resources may still be in '
|
||||||
|
'use for VIP: %s due to error: %s', loadbalancer.vip, e)
|
||||||
|
|
||||||
|
|
||||||
class UnplugVIP(BaseNetworkTask):
|
class UnplugVIP(BaseNetworkTask):
|
||||||
"""Task to unplug the vip."""
|
"""Task to unplug the vip."""
|
||||||
|
|
||||||
@ -522,20 +576,11 @@ class ApplyQos(BaseNetworkTask):
|
|||||||
"""Call network driver to apply QoS Policy on the vrrp ports."""
|
"""Call network driver to apply QoS Policy on the vrrp ports."""
|
||||||
if not amps_data:
|
if not amps_data:
|
||||||
amps_data = loadbalancer.amphorae
|
amps_data = loadbalancer.amphorae
|
||||||
vrrp_port_ids = [amp.vrrp_port_id for amp in amps_data]
|
|
||||||
for port_id in vrrp_port_ids:
|
apply_qos = ApplyQosAmphora()
|
||||||
try:
|
for amp_data in amps_data:
|
||||||
self.network_driver.apply_qos_on_port(qos_policy_id, port_id)
|
apply_qos._apply_qos_on_vrrp_port(loadbalancer, amp_data,
|
||||||
except Exception:
|
qos_policy_id)
|
||||||
if not is_revert:
|
|
||||||
raise
|
|
||||||
else:
|
|
||||||
LOG.warning('Failed to undo qos policy %(qos_id)s '
|
|
||||||
'on vrrp port: %(port)s from '
|
|
||||||
'amphorae: %(amp)s',
|
|
||||||
{'qos_id': request_qos_id,
|
|
||||||
'port': vrrp_port_ids,
|
|
||||||
'amp': [amp.id for amp in amps_data]})
|
|
||||||
|
|
||||||
def execute(self, loadbalancer, amps_data=None, update_dict=None):
|
def execute(self, loadbalancer, amps_data=None, update_dict=None):
|
||||||
"""Apply qos policy on the vrrp ports which are related with vip."""
|
"""Apply qos policy on the vrrp ports which are related with vip."""
|
||||||
@ -559,3 +604,50 @@ class ApplyQos(BaseNetworkTask):
|
|||||||
is_revert=True,
|
is_revert=True,
|
||||||
request_qos_id=request_qos_id)
|
request_qos_id=request_qos_id)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
|
class ApplyQosAmphora(BaseNetworkTask):
|
||||||
|
"""Apply Quality of Services to the VIP"""
|
||||||
|
|
||||||
|
def _apply_qos_on_vrrp_port(self, loadbalancer, amp_data, qos_policy_id,
|
||||||
|
is_revert=False, request_qos_id=None):
|
||||||
|
"""Call network driver to apply QoS Policy on the vrrp ports."""
|
||||||
|
try:
|
||||||
|
self.network_driver.apply_qos_on_port(qos_policy_id,
|
||||||
|
amp_data.vrrp_port_id)
|
||||||
|
except Exception:
|
||||||
|
if not is_revert:
|
||||||
|
raise
|
||||||
|
else:
|
||||||
|
LOG.warning('Failed to undo qos policy %(qos_id)s '
|
||||||
|
'on vrrp port: %(port)s from '
|
||||||
|
'amphorae: %(amp)s',
|
||||||
|
{'qos_id': request_qos_id,
|
||||||
|
'port': amp_data.vrrp_port_id,
|
||||||
|
'amp': [amp.id for amp in amp_data]})
|
||||||
|
|
||||||
|
def execute(self, loadbalancer, amp_data=None, update_dict=None):
|
||||||
|
"""Apply qos policy on the vrrp ports which are related with vip."""
|
||||||
|
qos_policy_id = loadbalancer.vip.qos_policy_id
|
||||||
|
if not qos_policy_id and (
|
||||||
|
update_dict and (
|
||||||
|
'vip' not in update_dict or
|
||||||
|
'qos_policy_id' not in update_dict['vip'])):
|
||||||
|
return
|
||||||
|
self._apply_qos_on_vrrp_port(loadbalancer, amp_data, qos_policy_id)
|
||||||
|
|
||||||
|
def revert(self, result, loadbalancer, amp_data=None, update_dict=None,
|
||||||
|
*args, **kwargs):
|
||||||
|
"""Handle a failure to apply QoS to VIP"""
|
||||||
|
try:
|
||||||
|
request_qos_id = loadbalancer.vip.qos_policy_id
|
||||||
|
orig_lb = self.task_utils.get_current_loadbalancer_from_db(
|
||||||
|
loadbalancer.id)
|
||||||
|
orig_qos_id = orig_lb.vip.qos_policy_id
|
||||||
|
if request_qos_id != orig_qos_id:
|
||||||
|
self._apply_qos_on_vrrp_port(loadbalancer, amp_data,
|
||||||
|
orig_qos_id, is_revert=True,
|
||||||
|
request_qos_id=request_qos_id)
|
||||||
|
except Exception as e:
|
||||||
|
LOG.error('Failed to remove QoS policy: %s from port: %s due '
|
||||||
|
'to error: %s', orig_qos_id, amp_data.vrrp_port_id, e)
|
||||||
|
@ -318,3 +318,33 @@ class AbstractNetworkDriver(object):
|
|||||||
:raises PortNotFound: Port was not found by neutron.
|
:raises PortNotFound: Port was not found by neutron.
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def update_vip_sg(self, load_balancer, vip):
|
||||||
|
"""Updates the security group for a VIP
|
||||||
|
|
||||||
|
:param load_balancer: Load Balancer to rpepare the VIP for
|
||||||
|
:param vip: The VIP to plug
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def plug_aap_port(self, load_balancer, vip, amphora, subnet):
|
||||||
|
"""Plugs the AAP port to the amp
|
||||||
|
|
||||||
|
:param load_balancer: Load Balancer to prepare the VIP for
|
||||||
|
:param vip: The VIP to plug
|
||||||
|
:param amphora: The amphora to plug the VIP into
|
||||||
|
:param subnet: The subnet to plug the aap into
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def unplug_aap_port(self, vip, amphora, subnet):
|
||||||
|
"""Unplugs the AAP port to the amp
|
||||||
|
|
||||||
|
:param vip: The VIP to plug
|
||||||
|
:param amphora: The amphora to plug the VIP into
|
||||||
|
:param subnet: The subnet to plug the aap into
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
@ -347,15 +347,11 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
LOG.info("Port %s will not be deleted by Octavia as it was "
|
LOG.info("Port %s will not be deleted by Octavia as it was "
|
||||||
"not created by Octavia.", vip.port_id)
|
"not created by Octavia.", vip.port_id)
|
||||||
|
|
||||||
def plug_vip(self, load_balancer, vip):
|
def update_vip_sg(self, load_balancer, vip):
|
||||||
if self.sec_grp_enabled:
|
if self.sec_grp_enabled:
|
||||||
self._update_vip_security_group(load_balancer, vip)
|
self._update_vip_security_group(load_balancer, vip)
|
||||||
plugged_amphorae = []
|
|
||||||
subnet = self.get_subnet(vip.subnet_id)
|
|
||||||
for amphora in six.moves.filter(
|
|
||||||
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
|
|
||||||
load_balancer.amphorae):
|
|
||||||
|
|
||||||
|
def plug_aap_port(self, load_balancer, vip, amphora, subnet):
|
||||||
interface = self._get_plugged_interface(
|
interface = self._get_plugged_interface(
|
||||||
amphora.compute_id, subnet.network_id, amphora.lb_network_ip)
|
amphora.compute_id, subnet.network_id, amphora.lb_network_ip)
|
||||||
if not interface:
|
if not interface:
|
||||||
@ -372,13 +368,24 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
if is_correct_subnet and not is_management_ip:
|
if is_correct_subnet and not is_management_ip:
|
||||||
vrrp_ip = fixed_ip.ip_address
|
vrrp_ip = fixed_ip.ip_address
|
||||||
break
|
break
|
||||||
plugged_amphorae.append(data_models.Amphora(
|
return data_models.Amphora(
|
||||||
id=amphora.id,
|
id=amphora.id,
|
||||||
compute_id=amphora.compute_id,
|
compute_id=amphora.compute_id,
|
||||||
vrrp_ip=vrrp_ip,
|
vrrp_ip=vrrp_ip,
|
||||||
ha_ip=vip.ip_address,
|
ha_ip=vip.ip_address,
|
||||||
vrrp_port_id=interface.port_id,
|
vrrp_port_id=interface.port_id,
|
||||||
ha_port_id=vip.port_id))
|
ha_port_id=vip.port_id)
|
||||||
|
|
||||||
|
# todo (xgerman): Delete later
|
||||||
|
def plug_vip(self, load_balancer, vip):
|
||||||
|
self.update_vip_sg(load_balancer, vip)
|
||||||
|
plugged_amphorae = []
|
||||||
|
subnet = self.get_subnet(vip.subnet_id)
|
||||||
|
for amphora in six.moves.filter(
|
||||||
|
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
|
||||||
|
load_balancer.amphorae):
|
||||||
|
plugged_amphorae.append(self.plug_aap_port(load_balancer, vip,
|
||||||
|
amphora, subnet))
|
||||||
return plugged_amphorae
|
return plugged_amphorae
|
||||||
|
|
||||||
def allocate_vip(self, load_balancer):
|
def allocate_vip(self, load_balancer):
|
||||||
@ -425,18 +432,7 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
new_port = utils.convert_port_dict_to_model(new_port)
|
new_port = utils.convert_port_dict_to_model(new_port)
|
||||||
return self._port_to_vip(new_port, load_balancer)
|
return self._port_to_vip(new_port, load_balancer)
|
||||||
|
|
||||||
def unplug_vip(self, load_balancer, vip):
|
def unplug_aap_port(self, vip, amphora, subnet):
|
||||||
try:
|
|
||||||
subnet = self.get_subnet(vip.subnet_id)
|
|
||||||
except base.SubnetNotFound:
|
|
||||||
msg = ("Can't unplug vip because vip subnet {0} was not "
|
|
||||||
"found").format(vip.subnet_id)
|
|
||||||
LOG.exception(msg)
|
|
||||||
raise base.PluggedVIPNotFound(msg)
|
|
||||||
for amphora in six.moves.filter(
|
|
||||||
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
|
|
||||||
load_balancer.amphorae):
|
|
||||||
|
|
||||||
interface = self._get_plugged_interface(
|
interface = self._get_plugged_interface(
|
||||||
amphora.compute_id, subnet.network_id, amphora.lb_network_ip)
|
amphora.compute_id, subnet.network_id, amphora.lb_network_ip)
|
||||||
if not interface:
|
if not interface:
|
||||||
@ -444,7 +440,7 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
# then that wouldn't evaluate all amphorae, so just continue
|
# then that wouldn't evaluate all amphorae, so just continue
|
||||||
LOG.debug('Cannot get amphora %s interface, skipped',
|
LOG.debug('Cannot get amphora %s interface, skipped',
|
||||||
amphora.compute_id)
|
amphora.compute_id)
|
||||||
continue
|
return
|
||||||
try:
|
try:
|
||||||
self.unplug_network(amphora.compute_id, subnet.network_id)
|
self.unplug_network(amphora.compute_id, subnet.network_id)
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -475,6 +471,19 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
'use for port: %(port)s due to error: %s(except)s',
|
'use for port: %(port)s due to error: %s(except)s',
|
||||||
{'port': amphora.vrrp_port_id, 'except': e})
|
{'port': amphora.vrrp_port_id, 'except': e})
|
||||||
|
|
||||||
|
def unplug_vip(self, load_balancer, vip):
|
||||||
|
try:
|
||||||
|
subnet = self.get_subnet(vip.subnet_id)
|
||||||
|
except base.SubnetNotFound:
|
||||||
|
msg = ("Can't unplug vip because vip subnet {0} was not "
|
||||||
|
"found").format(vip.subnet_id)
|
||||||
|
LOG.exception(msg)
|
||||||
|
raise base.PluggedVIPNotFound(msg)
|
||||||
|
for amphora in six.moves.filter(
|
||||||
|
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
|
||||||
|
load_balancer.amphorae):
|
||||||
|
self.unplug_aap_port(vip, amphora, subnet)
|
||||||
|
|
||||||
def plug_network(self, compute_id, network_id, ip_address=None):
|
def plug_network(self, compute_id, network_id, ip_address=None):
|
||||||
try:
|
try:
|
||||||
interface = self.compute.attach_network_or_port(
|
interface = self.compute.attach_network_or_port(
|
||||||
|
@ -57,20 +57,40 @@ class NoopManager(object):
|
|||||||
LOG.debug("Network %s no-op, plug_vip loadbalancer %s, vip %s",
|
LOG.debug("Network %s no-op, plug_vip loadbalancer %s, vip %s",
|
||||||
self.__class__.__name__,
|
self.__class__.__name__,
|
||||||
loadbalancer.id, vip.ip_address)
|
loadbalancer.id, vip.ip_address)
|
||||||
|
self.update_vip_sg(loadbalancer, vip)
|
||||||
|
amps = []
|
||||||
|
for amphora in loadbalancer.amphorae:
|
||||||
|
amps.append(self.plug_aap_port(loadbalancer, vip, amphora, None))
|
||||||
self.networkconfigconfig[(loadbalancer.id,
|
self.networkconfigconfig[(loadbalancer.id,
|
||||||
vip.ip_address)] = (loadbalancer, vip,
|
vip.ip_address)] = (loadbalancer, vip,
|
||||||
'plug_vip')
|
'plug_vip')
|
||||||
amps = []
|
return amps
|
||||||
for amphora in loadbalancer.amphorae:
|
|
||||||
amps.append(data_models.Amphora(
|
def update_vip_sg(self, load_balancer, vip):
|
||||||
|
LOG.debug("Network %s no-op, update_vip_sg loadbalancer %s, vip %s",
|
||||||
|
self.__class__.__name__,
|
||||||
|
load_balancer.id, vip.ip_address)
|
||||||
|
self.networkconfigconfig[(load_balancer.id,
|
||||||
|
vip.ip_address)] = (load_balancer, vip,
|
||||||
|
'update_vip_sg')
|
||||||
|
|
||||||
|
def plug_aap_port(self, load_balancer, vip, amphora, subnet):
|
||||||
|
LOG.debug("Network %s no-op, plug_aap_port loadbalancer %s, vip %s,"
|
||||||
|
" amphora %s, subnet %s",
|
||||||
|
self.__class__.__name__,
|
||||||
|
load_balancer.id, vip.ip_address, amphora, subnet)
|
||||||
|
self.networkconfigconfig[(amphora.id,
|
||||||
|
vip.ip_address)] = (
|
||||||
|
load_balancer, vip, amphora, subnet,
|
||||||
|
'plug_aap_port')
|
||||||
|
return data_models.Amphora(
|
||||||
id=amphora.id,
|
id=amphora.id,
|
||||||
compute_id=amphora.compute_id,
|
compute_id=amphora.compute_id,
|
||||||
vrrp_ip='198.51.100.1',
|
vrrp_ip='198.51.100.1',
|
||||||
ha_ip='198.51.100.1',
|
ha_ip='198.51.100.1',
|
||||||
vrrp_port_id=uuidutils.generate_uuid(),
|
vrrp_port_id=uuidutils.generate_uuid(),
|
||||||
ha_port_id=uuidutils.generate_uuid()
|
ha_port_id=uuidutils.generate_uuid()
|
||||||
))
|
)
|
||||||
return amps
|
|
||||||
|
|
||||||
def unplug_vip(self, loadbalancer, vip):
|
def unplug_vip(self, loadbalancer, vip):
|
||||||
LOG.debug("Network %s no-op, unplug_vip loadbalancer %s, vip %s",
|
LOG.debug("Network %s no-op, unplug_vip loadbalancer %s, vip %s",
|
||||||
@ -80,6 +100,15 @@ class NoopManager(object):
|
|||||||
vip.ip_address)] = (loadbalancer, vip,
|
vip.ip_address)] = (loadbalancer, vip,
|
||||||
'unplug_vip')
|
'unplug_vip')
|
||||||
|
|
||||||
|
def unplug_aap_port(self, vip, amphora, subnet):
|
||||||
|
LOG.debug("Network %s no-op, unplug_aap_port vip %s amp: %s "
|
||||||
|
"subnet: %s",
|
||||||
|
self.__class__.__name__,
|
||||||
|
vip.ip_address, amphora.id, subnet.id)
|
||||||
|
self.networkconfigconfig[(amphora.id,
|
||||||
|
vip.ip_address)] = (vip, amphora, subnet,
|
||||||
|
'unplug_aap_port')
|
||||||
|
|
||||||
def plug_network(self, compute_id, network_id, ip_address=None):
|
def plug_network(self, compute_id, network_id, ip_address=None):
|
||||||
LOG.debug("Network %s no-op, plug_network compute_id %s, network_id "
|
LOG.debug("Network %s no-op, plug_network compute_id %s, network_id "
|
||||||
"%s, ip_address %s", self.__class__.__name__, compute_id,
|
"%s, ip_address %s", self.__class__.__name__, compute_id,
|
||||||
@ -289,3 +318,12 @@ class NoopNetworkDriver(driver_base.AbstractNetworkDriver):
|
|||||||
|
|
||||||
def apply_qos_on_port(self, qos_id, port_id):
|
def apply_qos_on_port(self, qos_id, port_id):
|
||||||
self.driver.apply_qos_on_port(qos_id, port_id)
|
self.driver.apply_qos_on_port(qos_id, port_id)
|
||||||
|
|
||||||
|
def update_vip_sg(self, load_balancer, vip):
|
||||||
|
self.driver.update_vip_sg(load_balancer, vip)
|
||||||
|
|
||||||
|
def plug_aap_port(self, load_balancer, vip, amphora, subnet):
|
||||||
|
return self.driver.plug_aap_port(load_balancer, vip, amphora, subnet)
|
||||||
|
|
||||||
|
def unplug_aap_port(self, vip, amphora, subnet):
|
||||||
|
self.driver.unplug_aap_port(vip, amphora, subnet)
|
||||||
|
@ -122,23 +122,6 @@ class TestLoadBalancerFlows(base.TestCase):
|
|||||||
self.assertEqual(1, len(lb_flow.provides))
|
self.assertEqual(1, len(lb_flow.provides))
|
||||||
self.assertEqual(4, len(lb_flow.requires))
|
self.assertEqual(4, len(lb_flow.requires))
|
||||||
|
|
||||||
def test_get_new_LB_networking_subflow(self, mock_get_net_driver):
|
|
||||||
|
|
||||||
lb_flow = self.LBFlow.get_new_LB_networking_subflow()
|
|
||||||
|
|
||||||
self.assertIsInstance(lb_flow, flow.Flow)
|
|
||||||
|
|
||||||
self.assertIn(constants.VIP, lb_flow.provides)
|
|
||||||
self.assertIn(constants.AMPS_DATA, lb_flow.provides)
|
|
||||||
self.assertIn(constants.LOADBALANCER, lb_flow.provides)
|
|
||||||
|
|
||||||
self.assertIn(constants.UPDATE_DICT, lb_flow.requires)
|
|
||||||
self.assertIn(constants.LOADBALANCER, lb_flow.requires)
|
|
||||||
self.assertIn(constants.LOADBALANCER_ID, lb_flow.requires)
|
|
||||||
|
|
||||||
self.assertEqual(4, len(lb_flow.provides))
|
|
||||||
self.assertEqual(3, len(lb_flow.requires))
|
|
||||||
|
|
||||||
def test_get_update_load_balancer_flow(self, mock_get_net_driver):
|
def test_get_update_load_balancer_flow(self, mock_get_net_driver):
|
||||||
|
|
||||||
lb_flow = self.LBFlow.get_update_load_balancer_flow()
|
lb_flow = self.LBFlow.get_update_load_balancer_flow()
|
||||||
@ -160,7 +143,7 @@ class TestLoadBalancerFlows(base.TestCase):
|
|||||||
self.assertIn(constants.UPDATE_DICT, amp_flow.requires)
|
self.assertIn(constants.UPDATE_DICT, amp_flow.requires)
|
||||||
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
|
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
|
||||||
|
|
||||||
self.assertEqual(4, len(amp_flow.provides))
|
self.assertEqual(1, len(amp_flow.provides))
|
||||||
self.assertEqual(2, len(amp_flow.requires))
|
self.assertEqual(2, len(amp_flow.requires))
|
||||||
|
|
||||||
# Test Active/Standby path
|
# Test Active/Standby path
|
||||||
@ -173,8 +156,8 @@ class TestLoadBalancerFlows(base.TestCase):
|
|||||||
self.assertIn(constants.UPDATE_DICT, amp_flow.requires)
|
self.assertIn(constants.UPDATE_DICT, amp_flow.requires)
|
||||||
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
|
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
|
||||||
|
|
||||||
self.assertEqual(4, len(amp_flow.provides))
|
self.assertEqual(1, len(amp_flow.provides))
|
||||||
self.assertEqual(2, len(amp_flow.requires))
|
self.assertEqual(3, len(amp_flow.requires))
|
||||||
|
|
||||||
# Test mark_active=False
|
# Test mark_active=False
|
||||||
amp_flow = self.LBFlow.get_post_lb_amp_association_flow(
|
amp_flow = self.LBFlow.get_post_lb_amp_association_flow(
|
||||||
@ -186,8 +169,8 @@ class TestLoadBalancerFlows(base.TestCase):
|
|||||||
self.assertIn(constants.UPDATE_DICT, amp_flow.requires)
|
self.assertIn(constants.UPDATE_DICT, amp_flow.requires)
|
||||||
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
|
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
|
||||||
|
|
||||||
self.assertEqual(4, len(amp_flow.provides))
|
self.assertEqual(1, len(amp_flow.provides))
|
||||||
self.assertEqual(2, len(amp_flow.requires))
|
self.assertEqual(3, len(amp_flow.requires))
|
||||||
|
|
||||||
def test_get_create_load_balancer_flows_single_listeners(
|
def test_get_create_load_balancer_flows_single_listeners(
|
||||||
self, mock_get_net_driver):
|
self, mock_get_net_driver):
|
||||||
@ -209,12 +192,12 @@ class TestLoadBalancerFlows(base.TestCase):
|
|||||||
self.assertIn(constants.DELTAS, create_flow.provides)
|
self.assertIn(constants.DELTAS, create_flow.provides)
|
||||||
self.assertIn(constants.ADDED_PORTS, create_flow.provides)
|
self.assertIn(constants.ADDED_PORTS, create_flow.provides)
|
||||||
self.assertIn(constants.VIP, create_flow.provides)
|
self.assertIn(constants.VIP, create_flow.provides)
|
||||||
self.assertIn(constants.AMPS_DATA, create_flow.provides)
|
self.assertIn(constants.AMP_DATA, create_flow.provides)
|
||||||
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG,
|
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG,
|
||||||
create_flow.provides)
|
create_flow.provides)
|
||||||
|
|
||||||
self.assertEqual(4, len(create_flow.requires))
|
self.assertEqual(4, len(create_flow.requires))
|
||||||
self.assertEqual(12, len(create_flow.provides),
|
self.assertEqual(13, len(create_flow.provides),
|
||||||
create_flow.provides)
|
create_flow.provides)
|
||||||
|
|
||||||
def test_get_create_load_balancer_flows_active_standby_listeners(
|
def test_get_create_load_balancer_flows_active_standby_listeners(
|
||||||
@ -237,10 +220,10 @@ class TestLoadBalancerFlows(base.TestCase):
|
|||||||
self.assertIn(constants.DELTAS, create_flow.provides)
|
self.assertIn(constants.DELTAS, create_flow.provides)
|
||||||
self.assertIn(constants.ADDED_PORTS, create_flow.provides)
|
self.assertIn(constants.ADDED_PORTS, create_flow.provides)
|
||||||
self.assertIn(constants.VIP, create_flow.provides)
|
self.assertIn(constants.VIP, create_flow.provides)
|
||||||
self.assertIn(constants.AMPS_DATA, create_flow.provides)
|
self.assertIn(constants.AMP_DATA, create_flow.provides)
|
||||||
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG,
|
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG,
|
||||||
create_flow.provides)
|
create_flow.provides)
|
||||||
|
|
||||||
self.assertEqual(4, len(create_flow.requires))
|
self.assertEqual(4, len(create_flow.requires))
|
||||||
self.assertEqual(12, len(create_flow.provides),
|
self.assertEqual(13, len(create_flow.provides),
|
||||||
create_flow.provides)
|
create_flow.provides)
|
||||||
|
@ -483,7 +483,7 @@ class TestDatabaseTasks(base.TestCase):
|
|||||||
mock_amphora_repo_update,
|
mock_amphora_repo_update,
|
||||||
mock_amphora_repo_delete):
|
mock_amphora_repo_delete):
|
||||||
|
|
||||||
update_amp_vip_data = database_tasks.UpdateAmphoraVIPData()
|
update_amp_vip_data = database_tasks.UpdateAmphoraeVIPData()
|
||||||
update_amp_vip_data.execute(_amphorae)
|
update_amp_vip_data.execute(_amphorae)
|
||||||
|
|
||||||
mock_amphora_repo_update.assert_called_once_with(
|
mock_amphora_repo_update.assert_called_once_with(
|
||||||
@ -495,6 +495,26 @@ class TestDatabaseTasks(base.TestCase):
|
|||||||
ha_port_id=HA_PORT_ID,
|
ha_port_id=HA_PORT_ID,
|
||||||
vrrp_id=1)
|
vrrp_id=1)
|
||||||
|
|
||||||
|
def test_update_amphora_vip_data2(self,
|
||||||
|
mock_generate_uuid,
|
||||||
|
mock_LOG,
|
||||||
|
mock_get_session,
|
||||||
|
mock_loadbalancer_repo_update,
|
||||||
|
mock_listener_repo_update,
|
||||||
|
mock_amphora_repo_update,
|
||||||
|
mock_amphora_repo_delete):
|
||||||
|
update_amp_vip_data2 = database_tasks.UpdateAmphoraVIPData()
|
||||||
|
update_amp_vip_data2.execute(_amphorae[0])
|
||||||
|
|
||||||
|
mock_amphora_repo_update.assert_called_once_with(
|
||||||
|
'TEST',
|
||||||
|
AMP_ID,
|
||||||
|
vrrp_ip=VRRP_IP,
|
||||||
|
ha_ip=HA_IP,
|
||||||
|
vrrp_port_id=VRRP_PORT_ID,
|
||||||
|
ha_port_id=HA_PORT_ID,
|
||||||
|
vrrp_id=1)
|
||||||
|
|
||||||
def test_update_amp_failover_details(self,
|
def test_update_amp_failover_details(self,
|
||||||
mock_generate_uuid,
|
mock_generate_uuid,
|
||||||
mock_LOG,
|
mock_LOG,
|
||||||
|
@ -765,3 +765,37 @@ class TestNetworkTasks(base.TestCase):
|
|||||||
waitforportdetach.execute(amphora)
|
waitforportdetach.execute(amphora)
|
||||||
|
|
||||||
mock_driver.wait_for_port_detach.assert_called_once_with(amphora)
|
mock_driver.wait_for_port_detach.assert_called_once_with(amphora)
|
||||||
|
|
||||||
|
def test_update_vip_sg(self, mock_get_net_driver):
|
||||||
|
mock_driver = mock.MagicMock()
|
||||||
|
mock_get_net_driver.return_value = mock_driver
|
||||||
|
net = network_tasks.UpdateVIPSecurityGroup()
|
||||||
|
|
||||||
|
net.execute(LB)
|
||||||
|
mock_driver.update_vip_sg.assert_called_once_with(LB, LB.vip)
|
||||||
|
|
||||||
|
def test_get_subnet_from_vip(self, mock_get_net_driver):
|
||||||
|
mock_driver = mock.MagicMock()
|
||||||
|
mock_get_net_driver.return_value = mock_driver
|
||||||
|
net = network_tasks.GetSubnetFromVIP()
|
||||||
|
|
||||||
|
net.execute(LB)
|
||||||
|
mock_driver.get_subnet.assert_called_once_with(LB.vip.subnet_id)
|
||||||
|
|
||||||
|
def test_plug_vip_amphora(self, mock_get_net_driver):
|
||||||
|
mock_driver = mock.MagicMock()
|
||||||
|
mock_get_net_driver.return_value = mock_driver
|
||||||
|
net = network_tasks.PlugVIPAmpphora()
|
||||||
|
mockSubnet = mock.MagicMock()
|
||||||
|
net.execute(LB, self.amphora_mock, mockSubnet)
|
||||||
|
mock_driver.plug_aap_port.assert_called_once_with(
|
||||||
|
LB, LB.vip, self.amphora_mock, mockSubnet)
|
||||||
|
|
||||||
|
def test_revert_plug_vip_amphora(self, mock_get_net_driver):
|
||||||
|
mock_driver = mock.MagicMock()
|
||||||
|
mock_get_net_driver.return_value = mock_driver
|
||||||
|
net = network_tasks.PlugVIPAmpphora()
|
||||||
|
mockSubnet = mock.MagicMock()
|
||||||
|
net.revert(AMPS_DATA[0], LB, self.amphora_mock, mockSubnet)
|
||||||
|
mock_driver.unplug_aap_port.assert_called_once_with(
|
||||||
|
LB.vip, self.amphora_mock, mockSubnet)
|
||||||
|
@ -299,82 +299,75 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
show_port.side_effect = neutron_exceptions.PortNotFoundClient
|
show_port.side_effect = neutron_exceptions.PortNotFoundClient
|
||||||
self.driver.deallocate_vip(vip)
|
self.driver.deallocate_vip(vip)
|
||||||
|
|
||||||
def test_plug_vip_errors_when_nova_cant_find_network_to_attach(self):
|
def test_plug_aap_errors_when_nova_cant_find_network_to_attach(self):
|
||||||
lb = dmh.generate_load_balancer_tree()
|
lb = dmh.generate_load_balancer_tree()
|
||||||
show_subnet = self.driver.neutron_client.show_subnet
|
subnet = network_models.Subnet(id=t_constants.MOCK_VIP_SUBNET_ID,
|
||||||
show_subnet.return_value = {
|
network_id=t_constants.MOCK_VIP_NET_ID)
|
||||||
'subnet': {
|
|
||||||
'id': lb.vip.subnet_id
|
|
||||||
}
|
|
||||||
}
|
|
||||||
list_security_groups = self.driver.neutron_client.list_security_groups
|
|
||||||
lsc_side_effect = [
|
|
||||||
None, {
|
|
||||||
'security_groups': [
|
|
||||||
{'id': 'lb-sec-grp1'}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
list_security_groups.side_effect = lsc_side_effect
|
|
||||||
|
|
||||||
network_attach = self.driver.compute.attach_network_or_port
|
network_attach = self.driver.compute.attach_network_or_port
|
||||||
network_attach.side_effect = nova_exceptions.NotFound(404, "Network")
|
network_attach.side_effect = nova_exceptions.NotFound(404, "Network")
|
||||||
self.assertRaises(network_base.PlugVIPException,
|
self.assertRaises(network_base.PlugVIPException,
|
||||||
self.driver.plug_vip, lb, lb.vip)
|
self.driver.plug_aap_port, lb, lb.vip,
|
||||||
|
lb.amphorae[0], subnet)
|
||||||
|
|
||||||
def test_plug_vip_errors_when_neutron_cant_find_port_to_update(self):
|
def test_plug_aap_errors_when_neutron_cant_find_port_to_update(self):
|
||||||
lb = dmh.generate_load_balancer_tree()
|
lb = dmh.generate_load_balancer_tree()
|
||||||
show_subnet = self.driver.neutron_client.show_subnet
|
subnet = network_models.Subnet(id=t_constants.MOCK_VIP_SUBNET_ID,
|
||||||
show_subnet.return_value = {
|
network_id=t_constants.MOCK_VIP_NET_ID)
|
||||||
'subnet': {
|
|
||||||
'id': lb.vip.subnet_id
|
|
||||||
}
|
|
||||||
}
|
|
||||||
list_security_groups = self.driver.neutron_client.list_security_groups
|
|
||||||
lsc_side_effect = [
|
|
||||||
None, {
|
|
||||||
'security_groups': [
|
|
||||||
{'id': 'lb-sec-grp1'}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
list_security_groups.side_effect = lsc_side_effect
|
|
||||||
network_attach = self.driver.compute.attach_network_or_port
|
network_attach = self.driver.compute.attach_network_or_port
|
||||||
network_attach.return_value = t_constants.MOCK_NOVA_INTERFACE
|
network_attach.return_value = t_constants.MOCK_NOVA_INTERFACE
|
||||||
|
|
||||||
update_port = self.driver.neutron_client.update_port
|
update_port = self.driver.neutron_client.update_port
|
||||||
update_port.side_effect = neutron_exceptions.PortNotFoundClient
|
update_port.side_effect = neutron_exceptions.PortNotFoundClient
|
||||||
self.assertRaises(network_base.PortNotFound,
|
self.assertRaises(network_base.PortNotFound,
|
||||||
self.driver.plug_vip, lb, lb.vip)
|
self.driver.plug_aap_port, lb, lb.vip,
|
||||||
|
lb.amphorae[0], subnet)
|
||||||
|
|
||||||
def test_plug_vip(self):
|
@mock.patch('octavia.network.drivers.neutron.allowed_address_pairs.'
|
||||||
|
'AllowedAddressPairsDriver.update_vip_sg')
|
||||||
|
@mock.patch('octavia.network.drivers.neutron.allowed_address_pairs.'
|
||||||
|
'AllowedAddressPairsDriver.get_subnet')
|
||||||
|
@mock.patch('octavia.network.drivers.neutron.allowed_address_pairs.'
|
||||||
|
'AllowedAddressPairsDriver.plug_aap_port')
|
||||||
|
def test_plug_vip(self, mock_plug_aap, mock_get_subnet,
|
||||||
|
mock_update_vip_sg):
|
||||||
|
lb = dmh.generate_load_balancer_tree()
|
||||||
|
subnet = mock.MagicMock()
|
||||||
|
mock_get_subnet.return_value = subnet
|
||||||
|
mock_plug_aap.side_effect = lb.amphorae
|
||||||
|
amps = self.driver.plug_vip(lb, lb.vip)
|
||||||
|
|
||||||
|
mock_update_vip_sg.assert_called_with(lb, lb.vip)
|
||||||
|
mock_get_subnet.assert_called_with(lb.vip.subnet_id)
|
||||||
|
for amp in amps:
|
||||||
|
mock_plug_aap.assert_any_call(lb, lb.vip, amp, subnet)
|
||||||
|
|
||||||
|
def test_update_vip_sg(self):
|
||||||
lb = dmh.generate_load_balancer_tree()
|
lb = dmh.generate_load_balancer_tree()
|
||||||
show_subnet = self.driver.neutron_client.show_subnet
|
|
||||||
show_subnet.return_value = {
|
|
||||||
'subnet': {
|
|
||||||
'id': t_constants.MOCK_VIP_SUBNET_ID,
|
|
||||||
'network_id': t_constants.MOCK_VIP_NET_ID
|
|
||||||
}
|
|
||||||
}
|
|
||||||
list_ports = self.driver.neutron_client.list_ports
|
|
||||||
port1 = t_constants.MOCK_MANAGEMENT_PORT1['port']
|
|
||||||
port2 = t_constants.MOCK_MANAGEMENT_PORT2['port']
|
|
||||||
list_ports.side_effect = [{'ports': [port1]}, {'ports': [port2]}]
|
|
||||||
network_attach = self.driver.compute.attach_network_or_port
|
|
||||||
network_attach.side_effect = [t_constants.MOCK_VRRP_INTERFACE1,
|
|
||||||
t_constants.MOCK_VRRP_INTERFACE2]
|
|
||||||
list_security_groups = self.driver.neutron_client.list_security_groups
|
list_security_groups = self.driver.neutron_client.list_security_groups
|
||||||
list_security_groups.return_value = {
|
list_security_groups.return_value = {
|
||||||
'security_groups': [
|
'security_groups': [
|
||||||
{'id': 'lb-sec-grp1'}
|
{'id': 'lb-sec-grp1'}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
self.driver.update_vip_sg(lb, lb.vip)
|
||||||
|
|
||||||
|
def test_plug_aap_port(self):
|
||||||
|
lb = dmh.generate_load_balancer_tree()
|
||||||
|
|
||||||
|
subnet = network_models.Subnet(id=t_constants.MOCK_VIP_SUBNET_ID,
|
||||||
|
network_id=t_constants.MOCK_VIP_NET_ID)
|
||||||
|
|
||||||
|
list_ports = self.driver.neutron_client.list_ports
|
||||||
|
port1 = t_constants.MOCK_MANAGEMENT_PORT1['port']
|
||||||
|
port2 = t_constants.MOCK_MANAGEMENT_PORT2['port']
|
||||||
|
list_ports.side_effect = [{'ports': [port1]}, {'ports': [port2]}]
|
||||||
|
network_attach = self.driver.compute.attach_network_or_port
|
||||||
|
network_attach.side_effect = [t_constants.MOCK_VRRP_INTERFACE1]
|
||||||
update_port = self.driver.neutron_client.update_port
|
update_port = self.driver.neutron_client.update_port
|
||||||
expected_aap = {'port': {'allowed_address_pairs':
|
expected_aap = {'port': {'allowed_address_pairs':
|
||||||
[{'ip_address': lb.vip.ip_address}]}}
|
[{'ip_address': lb.vip.ip_address}]}}
|
||||||
amps = self.driver.plug_vip(lb, lb.vip)
|
amp = self.driver.plug_aap_port(lb, lb.vip, lb.amphorae[0], subnet)
|
||||||
self.assertEqual(5, update_port.call_count)
|
|
||||||
for amp in amps:
|
|
||||||
update_port.assert_any_call(amp.vrrp_port_id, expected_aap)
|
update_port.assert_any_call(amp.vrrp_port_id, expected_aap)
|
||||||
self.assertIn(amp.vrrp_ip, [t_constants.MOCK_VRRP_IP1,
|
self.assertIn(amp.vrrp_ip, [t_constants.MOCK_VRRP_IP1,
|
||||||
t_constants.MOCK_VRRP_IP2])
|
t_constants.MOCK_VRRP_IP2])
|
||||||
@ -390,16 +383,12 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
self.addCleanup(setattr, obj, name, current)
|
self.addCleanup(setattr, obj, name, current)
|
||||||
setattr(obj, name, value)
|
setattr(obj, name, value)
|
||||||
|
|
||||||
def test_plug_vip_on_mgmt_net(self):
|
def test_plug_aap_on_mgmt_net(self):
|
||||||
lb = dmh.generate_load_balancer_tree()
|
lb = dmh.generate_load_balancer_tree()
|
||||||
lb.vip.subnet_id = t_constants.MOCK_MANAGEMENT_SUBNET_ID
|
lb.vip.subnet_id = t_constants.MOCK_MANAGEMENT_SUBNET_ID
|
||||||
show_subnet = self.driver.neutron_client.show_subnet
|
subnet = network_models.Subnet(
|
||||||
show_subnet.return_value = {
|
id=t_constants.MOCK_MANAGEMENT_SUBNET_ID,
|
||||||
'subnet': {
|
network_id=t_constants.MOCK_MANAGEMENT_NET_ID)
|
||||||
'id': t_constants.MOCK_MANAGEMENT_SUBNET_ID,
|
|
||||||
'network_id': t_constants.MOCK_MANAGEMENT_NET_ID
|
|
||||||
}
|
|
||||||
}
|
|
||||||
list_ports = self.driver.neutron_client.list_ports
|
list_ports = self.driver.neutron_client.list_ports
|
||||||
port1 = t_constants.MOCK_MANAGEMENT_PORT1['port']
|
port1 = t_constants.MOCK_MANAGEMENT_PORT1['port']
|
||||||
port2 = t_constants.MOCK_MANAGEMENT_PORT2['port']
|
port2 = t_constants.MOCK_MANAGEMENT_PORT2['port']
|
||||||
@ -417,20 +406,11 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
'net_id', t_constants.MOCK_MANAGEMENT_NET_ID)
|
'net_id', t_constants.MOCK_MANAGEMENT_NET_ID)
|
||||||
self._set_safely(t_constants.MOCK_VRRP_FIXED_IPS2[0],
|
self._set_safely(t_constants.MOCK_VRRP_FIXED_IPS2[0],
|
||||||
'subnet_id', t_constants.MOCK_MANAGEMENT_SUBNET_ID)
|
'subnet_id', t_constants.MOCK_MANAGEMENT_SUBNET_ID)
|
||||||
network_attach.side_effect = [t_constants.MOCK_VRRP_INTERFACE1,
|
network_attach.side_effect = [t_constants.MOCK_VRRP_INTERFACE1]
|
||||||
t_constants.MOCK_VRRP_INTERFACE2]
|
|
||||||
list_security_groups = self.driver.neutron_client.list_security_groups
|
|
||||||
list_security_groups.return_value = {
|
|
||||||
'security_groups': [
|
|
||||||
{'id': 'lb-sec-grp1'}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
update_port = self.driver.neutron_client.update_port
|
update_port = self.driver.neutron_client.update_port
|
||||||
expected_aap = {'port': {'allowed_address_pairs':
|
expected_aap = {'port': {'allowed_address_pairs':
|
||||||
[{'ip_address': lb.vip.ip_address}]}}
|
[{'ip_address': lb.vip.ip_address}]}}
|
||||||
amps = self.driver.plug_vip(lb, lb.vip)
|
amp = self.driver.plug_aap_port(lb, lb.vip, lb.amphorae[0], subnet)
|
||||||
self.assertEqual(5, update_port.call_count)
|
|
||||||
for amp in amps:
|
|
||||||
update_port.assert_any_call(amp.vrrp_port_id, expected_aap)
|
update_port.assert_any_call(amp.vrrp_port_id, expected_aap)
|
||||||
self.assertIn(amp.vrrp_ip, [t_constants.MOCK_VRRP_IP1,
|
self.assertIn(amp.vrrp_ip, [t_constants.MOCK_VRRP_IP1,
|
||||||
t_constants.MOCK_VRRP_IP2])
|
t_constants.MOCK_VRRP_IP2])
|
||||||
@ -533,38 +513,44 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
self.assertEqual(t_constants.MOCK_PORT_ID, vip.port_id)
|
self.assertEqual(t_constants.MOCK_PORT_ID, vip.port_id)
|
||||||
self.assertEqual(fake_lb.id, vip.load_balancer_id)
|
self.assertEqual(fake_lb.id, vip.load_balancer_id)
|
||||||
|
|
||||||
def test_unplug_vip_errors_when_update_port_cant_find_port(self):
|
def test_unplug_aap_port_errors_when_update_port_cant_find_port(self):
|
||||||
lb = dmh.generate_load_balancer_tree()
|
lb = dmh.generate_load_balancer_tree()
|
||||||
list_ports = self.driver.neutron_client.list_ports
|
list_ports = self.driver.neutron_client.list_ports
|
||||||
show_subnet = self.driver.neutron_client.show_subnet
|
|
||||||
show_subnet.return_value = t_constants.MOCK_SUBNET
|
|
||||||
port1 = t_constants.MOCK_NEUTRON_PORT['port']
|
port1 = t_constants.MOCK_NEUTRON_PORT['port']
|
||||||
port2 = {
|
port2 = {
|
||||||
'id': '4', 'network_id': '3', 'fixed_ips':
|
'id': '4', 'network_id': '3', 'fixed_ips':
|
||||||
[{'ip_address': '10.0.0.2'}]
|
[{'ip_address': '10.0.0.2'}]
|
||||||
}
|
}
|
||||||
|
subnet = network_models.Subnet(
|
||||||
|
id=t_constants.MOCK_MANAGEMENT_SUBNET_ID,
|
||||||
|
network_id='3')
|
||||||
list_ports.return_value = {'ports': [port1, port2]}
|
list_ports.return_value = {'ports': [port1, port2]}
|
||||||
update_port = self.driver.neutron_client.update_port
|
update_port = self.driver.neutron_client.update_port
|
||||||
update_port.side_effect = neutron_exceptions.PortNotFoundClient
|
update_port.side_effect = neutron_exceptions.PortNotFoundClient
|
||||||
self.assertRaises(network_base.UnplugVIPException,
|
self.assertRaises(network_base.UnplugVIPException,
|
||||||
self.driver.unplug_vip, lb, lb.vip)
|
self.driver.unplug_aap_port, lb.vip, lb.amphorae[0],
|
||||||
|
subnet)
|
||||||
|
|
||||||
def test_unplug_vip_errors_when_update_port_fails(self):
|
def test_unplug_aap_errors_when_update_port_fails(self):
|
||||||
lb = dmh.generate_load_balancer_tree()
|
lb = dmh.generate_load_balancer_tree()
|
||||||
show_subnet = self.driver.neutron_client.show_subnet
|
|
||||||
show_subnet.return_value = t_constants.MOCK_SUBNET
|
|
||||||
port1 = t_constants.MOCK_NEUTRON_PORT['port']
|
port1 = t_constants.MOCK_NEUTRON_PORT['port']
|
||||||
port2 = {
|
port2 = {
|
||||||
'id': '4', 'network_id': '3', 'fixed_ips':
|
'id': '4', 'network_id': '3', 'fixed_ips':
|
||||||
[{'ip_address': '10.0.0.2'}]
|
[{'ip_address': '10.0.0.2'}]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
subnet = network_models.Subnet(
|
||||||
|
id=t_constants.MOCK_MANAGEMENT_SUBNET_ID,
|
||||||
|
network_id='3')
|
||||||
|
|
||||||
list_ports = self.driver.neutron_client.list_ports
|
list_ports = self.driver.neutron_client.list_ports
|
||||||
list_ports.return_value = {'ports': [port1, port2]}
|
list_ports.return_value = {'ports': [port1, port2]}
|
||||||
|
|
||||||
update_port = self.driver.neutron_client.update_port
|
update_port = self.driver.neutron_client.update_port
|
||||||
update_port.side_effect = TypeError
|
update_port.side_effect = TypeError
|
||||||
self.assertRaises(network_base.UnplugVIPException,
|
self.assertRaises(network_base.UnplugVIPException,
|
||||||
self.driver.unplug_vip, lb, lb.vip)
|
self.driver.unplug_aap_port, lb.vip,
|
||||||
|
lb.amphorae[0], subnet)
|
||||||
|
|
||||||
def test_unplug_vip_errors_when_vip_subnet_not_found(self):
|
def test_unplug_vip_errors_when_vip_subnet_not_found(self):
|
||||||
lb = dmh.generate_load_balancer_tree()
|
lb = dmh.generate_load_balancer_tree()
|
||||||
@ -573,25 +559,33 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
self.assertRaises(network_base.PluggedVIPNotFound,
|
self.assertRaises(network_base.PluggedVIPNotFound,
|
||||||
self.driver.unplug_vip, lb, lb.vip)
|
self.driver.unplug_vip, lb, lb.vip)
|
||||||
|
|
||||||
def test_unplug_vip(self):
|
@mock.patch('octavia.network.drivers.neutron.allowed_address_pairs.'
|
||||||
|
'AllowedAddressPairsDriver.unplug_aap_port')
|
||||||
|
def test_unplug_vip(self, mock):
|
||||||
lb = dmh.generate_load_balancer_tree()
|
lb = dmh.generate_load_balancer_tree()
|
||||||
show_subnet = self.driver.neutron_client.show_subnet
|
show_subnet = self.driver.neutron_client.show_subnet
|
||||||
show_subnet.return_value = t_constants.MOCK_SUBNET
|
show_subnet.return_value = t_constants.MOCK_SUBNET
|
||||||
|
self.driver.unplug_vip(lb, lb.vip)
|
||||||
|
self.assertEqual(len(lb.amphorae), mock.call_count)
|
||||||
|
|
||||||
|
def test_unplug_aap_port(self):
|
||||||
|
lb = dmh.generate_load_balancer_tree()
|
||||||
update_port = self.driver.neutron_client.update_port
|
update_port = self.driver.neutron_client.update_port
|
||||||
port1 = t_constants.MOCK_NEUTRON_PORT['port']
|
port1 = t_constants.MOCK_NEUTRON_PORT['port']
|
||||||
port2 = {
|
port2 = {
|
||||||
'id': '4', 'network_id': '3', 'fixed_ips':
|
'id': '4', 'network_id': '3', 'fixed_ips':
|
||||||
[{'ip_address': '10.0.0.2'}]
|
[{'ip_address': '10.0.0.2'}]
|
||||||
}
|
}
|
||||||
|
subnet = network_models.Subnet(
|
||||||
|
id=t_constants.MOCK_MANAGEMENT_SUBNET_ID,
|
||||||
|
network_id='3')
|
||||||
list_ports = self.driver.neutron_client.list_ports
|
list_ports = self.driver.neutron_client.list_ports
|
||||||
list_ports.return_value = {'ports': [port1, port2]}
|
list_ports.return_value = {'ports': [port1, port2]}
|
||||||
get_port = self.driver.neutron_client.get_port
|
get_port = self.driver.neutron_client.get_port
|
||||||
get_port.side_effect = neutron_exceptions.NotFound
|
get_port.side_effect = neutron_exceptions.NotFound
|
||||||
self.driver.unplug_vip(lb, lb.vip)
|
self.driver.unplug_aap_port(lb.vip, lb.amphorae[0], subnet)
|
||||||
self.assertEqual(len(lb.amphorae), update_port.call_count)
|
|
||||||
clear_aap = {'port': {'allowed_address_pairs': []}}
|
clear_aap = {'port': {'allowed_address_pairs': []}}
|
||||||
update_port.assert_has_calls([mock.call(port1.get('id'), clear_aap),
|
update_port.assert_called_once_with(port2.get('id'), clear_aap)
|
||||||
mock.call(port1.get('id'), clear_aap)])
|
|
||||||
|
|
||||||
def test_plug_network_when_compute_instance_cant_be_found(self):
|
def test_plug_network_when_compute_instance_cant_be_found(self):
|
||||||
net_id = t_constants.MOCK_NOVA_INTERFACE.net_id
|
net_id = t_constants.MOCK_NOVA_INTERFACE.net_id
|
||||||
|
@ -67,6 +67,7 @@ class TestNoopNetworkDriver(base.TestCase):
|
|||||||
self.amphora2.ha_ip = '10.0.2.11'
|
self.amphora2.ha_ip = '10.0.2.11'
|
||||||
self.load_balancer.amphorae = [self.amphora1, self.amphora2]
|
self.load_balancer.amphorae = [self.amphora1, self.amphora2]
|
||||||
self.load_balancer.vip = self.vip
|
self.load_balancer.vip = self.vip
|
||||||
|
self.subnet = mock.MagicMock()
|
||||||
|
|
||||||
def test_allocate_vip(self):
|
def test_allocate_vip(self):
|
||||||
self.driver.allocate_vip(self.load_balancer)
|
self.driver.allocate_vip(self.load_balancer)
|
||||||
@ -88,6 +89,13 @@ class TestNoopNetworkDriver(base.TestCase):
|
|||||||
self.driver.driver.networkconfigconfig[(
|
self.driver.driver.networkconfigconfig[(
|
||||||
self.load_balancer.id, self.vip.ip_address)])
|
self.load_balancer.id, self.vip.ip_address)])
|
||||||
|
|
||||||
|
def test_update_vip_sg(self):
|
||||||
|
self.driver.update_vip_sg(self.load_balancer, self.vip)
|
||||||
|
self.assertEqual((self.load_balancer, self.vip,
|
||||||
|
'update_vip_sg'),
|
||||||
|
self.driver.driver.networkconfigconfig[(
|
||||||
|
self.load_balancer.id, self.vip.ip_address)])
|
||||||
|
|
||||||
def test_unplug_vip(self):
|
def test_unplug_vip(self):
|
||||||
self.driver.unplug_vip(self.load_balancer, self.vip)
|
self.driver.unplug_vip(self.load_balancer, self.vip)
|
||||||
self.assertEqual((self.load_balancer, self.vip,
|
self.assertEqual((self.load_balancer, self.vip,
|
||||||
@ -210,3 +218,22 @@ class TestNoopNetworkDriver(base.TestCase):
|
|||||||
self.driver.driver.networkconfigconfig[self.qos_policy_id,
|
self.driver.driver.networkconfigconfig[self.qos_policy_id,
|
||||||
self.vrrp_port_id]
|
self.vrrp_port_id]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def test_plug_aap_port(self):
|
||||||
|
self.driver.plug_aap_port(self.load_balancer, self.vip, self.amphora1,
|
||||||
|
self.subnet)
|
||||||
|
self.assertEqual(
|
||||||
|
(self.load_balancer, self.vip, self.amphora1, self.subnet,
|
||||||
|
'plug_aap_port'),
|
||||||
|
self.driver.driver.networkconfigconfig[self.amphora1.id,
|
||||||
|
self.vip.ip_address]
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_unplug_aap(self):
|
||||||
|
self.driver.unplug_aap_port(self.vip, self.amphora1, self.subnet)
|
||||||
|
self.assertEqual(
|
||||||
|
(self.vip, self.amphora1, self.subnet,
|
||||||
|
'unplug_aap_port'),
|
||||||
|
self.driver.driver.networkconfigconfig[self.amphora1.id,
|
||||||
|
self.vip.ip_address]
|
||||||
|
)
|
||||||
|
6
releasenotes/notes/lb_flow_amp_vip-a83db5d84e17a26a.yaml
Normal file
6
releasenotes/notes/lb_flow_amp_vip-a83db5d84e17a26a.yaml
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- |
|
||||||
|
This will speed up lb creation by allocating AAP ports in parallel for
|
||||||
|
LBs with more than one amp. As a side effect the AAP driver will be
|
||||||
|
simplified and thus easier to mainain.
|
Loading…
x
Reference in New Issue
Block a user