Merge "Added update_vip method to network driver"
This commit is contained in:
commit
88049e87ae
@ -19,6 +19,7 @@ from octavia.common import constants
|
|||||||
from octavia.controller.worker.tasks import amphora_driver_tasks
|
from octavia.controller.worker.tasks import amphora_driver_tasks
|
||||||
from octavia.controller.worker.tasks import database_tasks
|
from octavia.controller.worker.tasks import database_tasks
|
||||||
from octavia.controller.worker.tasks import model_tasks
|
from octavia.controller.worker.tasks import model_tasks
|
||||||
|
from octavia.controller.worker.tasks import network_tasks
|
||||||
|
|
||||||
|
|
||||||
class ListenerFlows(object):
|
class ListenerFlows(object):
|
||||||
@ -31,6 +32,8 @@ class ListenerFlows(object):
|
|||||||
create_listener_flow = linear_flow.Flow(constants.CREATE_LISTENER_FLOW)
|
create_listener_flow = linear_flow.Flow(constants.CREATE_LISTENER_FLOW)
|
||||||
create_listener_flow.add(amphora_driver_tasks.ListenerUpdate(
|
create_listener_flow.add(amphora_driver_tasks.ListenerUpdate(
|
||||||
requires=['listener', 'vip']))
|
requires=['listener', 'vip']))
|
||||||
|
create_listener_flow.add(network_tasks.UpdateVIP(
|
||||||
|
requires=constants.LOADBALANCER))
|
||||||
create_listener_flow.add(database_tasks.
|
create_listener_flow.add(database_tasks.
|
||||||
MarkLBAndListenerActiveInDB(
|
MarkLBAndListenerActiveInDB(
|
||||||
requires=['loadbalancer', 'listener']))
|
requires=['loadbalancer', 'listener']))
|
||||||
@ -45,6 +48,8 @@ class ListenerFlows(object):
|
|||||||
delete_listener_flow = linear_flow.Flow(constants.DELETE_LISTENER_FLOW)
|
delete_listener_flow = linear_flow.Flow(constants.DELETE_LISTENER_FLOW)
|
||||||
delete_listener_flow.add(amphora_driver_tasks.ListenerDelete(
|
delete_listener_flow.add(amphora_driver_tasks.ListenerDelete(
|
||||||
requires=['listener', 'vip']))
|
requires=['listener', 'vip']))
|
||||||
|
delete_listener_flow.add(network_tasks.UpdateVIP(
|
||||||
|
requires=constants.LOADBALANCER))
|
||||||
delete_listener_flow.add(database_tasks.MarkListenerDeletedInDB(
|
delete_listener_flow.add(database_tasks.MarkListenerDeletedInDB(
|
||||||
requires='listener'))
|
requires='listener'))
|
||||||
delete_listener_flow.add(database_tasks.
|
delete_listener_flow.add(database_tasks.
|
||||||
|
@ -211,10 +211,7 @@ class AllocateVIP(BaseNetworkTask):
|
|||||||
loadbalancer.vip.port_id,
|
loadbalancer.vip.port_id,
|
||||||
loadbalancer.vip.network_id,
|
loadbalancer.vip.network_id,
|
||||||
loadbalancer.vip.ip_address)
|
loadbalancer.vip.ip_address)
|
||||||
return self.network_driver.allocate_vip(
|
return self.network_driver.allocate_vip(loadbalancer)
|
||||||
port_id=loadbalancer.vip.port_id,
|
|
||||||
network_id=loadbalancer.vip.network_id,
|
|
||||||
ip_address=loadbalancer.vip.ip_address)
|
|
||||||
|
|
||||||
def revert(self, result, loadbalancer, *args, **kwargs):
|
def revert(self, result, loadbalancer, *args, **kwargs):
|
||||||
"""Handle a failure to allocate vip."""
|
"""Handle a failure to allocate vip."""
|
||||||
@ -237,3 +234,12 @@ class DeallocateVIP(BaseNetworkTask):
|
|||||||
|
|
||||||
self.network_driver.deallocate_vip(vip)
|
self.network_driver.deallocate_vip(vip)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
|
class UpdateVIP(BaseNetworkTask):
|
||||||
|
"""Task to update a VIP."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer):
|
||||||
|
LOG.debug("Updating VIP of load_balancer %s." % loadbalancer.id)
|
||||||
|
|
||||||
|
self.network_driver.update_vip(loadbalancer)
|
||||||
|
@ -80,18 +80,13 @@ class AbstractNetworkDriver(object):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def allocate_vip(self, port_id=None, network_id=None, ip_address=None):
|
def allocate_vip(self, load_balancer):
|
||||||
"""Allocates a virtual ip.
|
"""Allocates a virtual ip.
|
||||||
|
|
||||||
Reserves it for later use as the frontend connection of a load
|
Reserves it for later use as the frontend connection of a load
|
||||||
balancer.
|
balancer.
|
||||||
|
|
||||||
:param port_id: id of port that has already been created. If this is
|
:param load_balancer: octavia.common.data_models.LoadBalancer instance
|
||||||
provided, it will be used regardless of the other
|
|
||||||
parameters.
|
|
||||||
:param network_id: if port_id is not provided, this should be
|
|
||||||
provided to create the virtual ip on this network.
|
|
||||||
:param ip_address: will attempt to allocate this specific IP address
|
|
||||||
:return: octavia.common.data_models.VIP
|
:return: octavia.common.data_models.VIP
|
||||||
:raises: AllocateVIPException, PortNotFound, NetworkNotFound
|
:raises: AllocateVIPException, PortNotFound, NetworkNotFound
|
||||||
"""
|
"""
|
||||||
@ -171,4 +166,17 @@ class AbstractNetworkDriver(object):
|
|||||||
:param amphora_id: id of an amphora in the compute service
|
:param amphora_id: id of an amphora in the compute service
|
||||||
:return: [octavia.network.data_models.Instance]
|
:return: [octavia.network.data_models.Instance]
|
||||||
:raises: AmphoraNotFound
|
:raises: AmphoraNotFound
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
def update_vip(self, load_balancer):
|
||||||
|
"""Hook for the driver to update the VIP information.
|
||||||
|
|
||||||
|
This method will be called upon the change of a load_balancer
|
||||||
|
configuration. It is an optional method to be implemented by drivers.
|
||||||
|
It allows the driver to update any VIP information based on the
|
||||||
|
state of the passed in load_balancer.
|
||||||
|
|
||||||
|
:param load_balancer: octavia.common.data_models.LoadBalancer instance
|
||||||
|
:return: None
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
@ -130,6 +130,34 @@ class AllowedAddressPairsDriver(base.AbstractNetworkDriver):
|
|||||||
if len(sec_grps.get('security_groups')):
|
if len(sec_grps.get('security_groups')):
|
||||||
return sec_grps.get('security_groups')[0]
|
return sec_grps.get('security_groups')[0]
|
||||||
|
|
||||||
|
def _update_security_group_rules(self, load_balancer, sec_grp_id):
|
||||||
|
rules = self.neutron_client.list_security_group_rules(
|
||||||
|
security_group_id=sec_grp_id)
|
||||||
|
updated_ports = [listener.protocol_port
|
||||||
|
for listener in load_balancer.listeners]
|
||||||
|
# Just going to use port_range_max for now because we can assume that
|
||||||
|
# port_range_max and min will be the same since this driver is
|
||||||
|
# responsible for creating these rules
|
||||||
|
old_ports = [rule.get('port_range_max')
|
||||||
|
for rule in rules.get('security_group_rules', [])]
|
||||||
|
add_ports = set(updated_ports) - set(old_ports)
|
||||||
|
del_ports = set(old_ports) - set(updated_ports)
|
||||||
|
for rule in rules.get('security_group_rules', []):
|
||||||
|
if rule.get('port_range_max') in del_ports:
|
||||||
|
self.neutron_client.delete_security_group_rule(rule.get('id'))
|
||||||
|
|
||||||
|
for port in add_ports:
|
||||||
|
rule = {
|
||||||
|
'security_group_rule': {
|
||||||
|
'security_group_id': sec_grp_id,
|
||||||
|
'direction': 'ingress',
|
||||||
|
'protocol': 'TCP',
|
||||||
|
'port_range_min': port,
|
||||||
|
'port_range_max': port
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.neutron_client.create_security_group_rule(rule)
|
||||||
|
|
||||||
def _update_vip_security_group(self, load_balancer, vip):
|
def _update_vip_security_group(self, load_balancer, vip):
|
||||||
sec_grp = self._get_lb_security_group(load_balancer.id)
|
sec_grp = self._get_lb_security_group(load_balancer.id)
|
||||||
if not sec_grp:
|
if not sec_grp:
|
||||||
@ -137,21 +165,7 @@ class AllowedAddressPairsDriver(base.AbstractNetworkDriver):
|
|||||||
new_sec_grp = {'security_group': {'name': sec_grp_name}}
|
new_sec_grp = {'security_group': {'name': sec_grp_name}}
|
||||||
sec_grp = self.neutron_client.create_security_group(new_sec_grp)
|
sec_grp = self.neutron_client.create_security_group(new_sec_grp)
|
||||||
sec_grp = sec_grp['security_group']
|
sec_grp = sec_grp['security_group']
|
||||||
for listener in load_balancer.listeners:
|
self._update_security_group_rules(load_balancer, sec_grp.get('id'))
|
||||||
rule = {
|
|
||||||
'security_group_rule': {
|
|
||||||
'security_group_id': sec_grp.get('id'),
|
|
||||||
'direction': 'ingress',
|
|
||||||
'protocol': 'TCP',
|
|
||||||
'port_range_min': listener.protocol_port,
|
|
||||||
'port_range_max': listener.protocol_port
|
|
||||||
}
|
|
||||||
}
|
|
||||||
try:
|
|
||||||
self.neutron_client.create_security_group_rule(rule)
|
|
||||||
except neutron_client_exceptions.Conflict as conflict_e:
|
|
||||||
if 'already exists' not in conflict_e.message.lower():
|
|
||||||
raise conflict_e
|
|
||||||
port_update = {'port': {'security_groups': [sec_grp.get('id')]}}
|
port_update = {'port': {'security_groups': [sec_grp.get('id')]}}
|
||||||
try:
|
try:
|
||||||
self.neutron_client.update_port(vip.port_id, port_update)
|
self.neutron_client.update_port(vip.port_id, port_update)
|
||||||
@ -197,28 +211,29 @@ class AllowedAddressPairsDriver(base.AbstractNetworkDriver):
|
|||||||
ha_ip=vip.ip_address))
|
ha_ip=vip.ip_address))
|
||||||
return plugged_amphorae
|
return plugged_amphorae
|
||||||
|
|
||||||
def allocate_vip(self, port_id=None, network_id=None, ip_address=None):
|
def allocate_vip(self, load_balancer):
|
||||||
if not port_id and not network_id:
|
if not load_balancer.vip.port_id and not load_balancer.vip.network_id:
|
||||||
raise base.AllocateVIPException('Cannot allocate a vip '
|
raise base.AllocateVIPException('Cannot allocate a vip '
|
||||||
'without a port_id or '
|
'without a port_id or '
|
||||||
'a network_id.')
|
'a network_id.')
|
||||||
if port_id:
|
if load_balancer.vip.port_id:
|
||||||
LOG.info(_LI('Port {port_id} already exists. Nothing to be '
|
LOG.info(_LI('Port {port_id} already exists. Nothing to be '
|
||||||
'done.').format(port_id=port_id))
|
'done.').format(port_id=load_balancer.vip.port_id))
|
||||||
try:
|
try:
|
||||||
port = self.neutron_client.show_port(port_id)
|
port = self.neutron_client.show_port(load_balancer.vip.port_id)
|
||||||
except neutron_client_exceptions.PortNotFoundClient as e:
|
except neutron_client_exceptions.PortNotFoundClient as e:
|
||||||
raise base.PortNotFound(e.message)
|
raise base.PortNotFound(e.message)
|
||||||
except Exception:
|
except Exception:
|
||||||
message = _LE('Error retrieving info about port '
|
message = _LE('Error retrieving info about port '
|
||||||
'{port_id}.').format(port_id=port_id)
|
'{port_id}.').format(
|
||||||
|
port_id=load_balancer.vip.port_id)
|
||||||
LOG.exception(message)
|
LOG.exception(message)
|
||||||
raise base.AllocateVIPException(message)
|
raise base.AllocateVIPException(message)
|
||||||
return self._port_to_vip(port)
|
return self._port_to_vip(port)
|
||||||
|
|
||||||
# It can be assumed that network_id exists
|
# It can be assumed that network_id exists
|
||||||
port = {'port': {'name': 'octavia-port',
|
port = {'port': {'name': 'octavia-lb-' + load_balancer.id,
|
||||||
'network_id': network_id,
|
'network_id': load_balancer.vip.network_id,
|
||||||
'admin_state_up': False,
|
'admin_state_up': False,
|
||||||
'device_id': '',
|
'device_id': '',
|
||||||
'device_owner': ''}}
|
'device_owner': ''}}
|
||||||
@ -228,7 +243,8 @@ class AllowedAddressPairsDriver(base.AbstractNetworkDriver):
|
|||||||
raise base.NetworkNotFound(e.message)
|
raise base.NetworkNotFound(e.message)
|
||||||
except Exception:
|
except Exception:
|
||||||
message = _LE('Error creating neutron port on network '
|
message = _LE('Error creating neutron port on network '
|
||||||
'{network_id}.').format(network_id=network_id)
|
'{network_id}.').format(
|
||||||
|
network_id=load_balancer.vip.network_id)
|
||||||
LOG.exception(message)
|
LOG.exception(message)
|
||||||
raise base.AllocateVIPException(message)
|
raise base.AllocateVIPException(message)
|
||||||
return self._port_to_vip(new_port)
|
return self._port_to_vip(new_port)
|
||||||
@ -329,3 +345,7 @@ class AllowedAddressPairsDriver(base.AbstractNetworkDriver):
|
|||||||
'unplugged.').format(base=message)
|
'unplugged.').format(base=message)
|
||||||
LOG.exception(message)
|
LOG.exception(message)
|
||||||
raise base.UnplugNetworkException(message)
|
raise base.UnplugNetworkException(message)
|
||||||
|
|
||||||
|
def update_vip(self, load_balancer):
|
||||||
|
sec_grp = self._get_lb_security_group(load_balancer.id)
|
||||||
|
self._update_security_group_rules(load_balancer, sec_grp.get('id'))
|
||||||
|
@ -24,12 +24,11 @@ class NoopManager(object):
|
|||||||
super(NoopManager, self).__init__()
|
super(NoopManager, self).__init__()
|
||||||
self.networkconfigconfig = {}
|
self.networkconfigconfig = {}
|
||||||
|
|
||||||
def allocate_vip(self, port_id=None, network_id=None, ip_address=None):
|
def allocate_vip(self, load_balancer):
|
||||||
LOG.debug("Network %s no-op, allocate_vip port_id %s, network_id %s,"
|
LOG.debug("Network %s no-op, allocate_vip load_balancer %s",
|
||||||
"ip_address %s",
|
self.__class__.__name__, load_balancer)
|
||||||
self.__class__.__name__, port_id, network_id, ip_address)
|
self.networkconfigconfig[load_balancer] = (
|
||||||
self.networkconfigconfig[(port_id, network_id, ip_address)] = (
|
load_balancer, 'allocate_vip')
|
||||||
port_id, network_id, ip_address, 'allocate_vip')
|
|
||||||
|
|
||||||
def deallocate_vip(self, vip):
|
def deallocate_vip(self, vip):
|
||||||
LOG.debug("Network %s no-op, deallocate_vip vip %s",
|
LOG.debug("Network %s no-op, deallocate_vip vip %s",
|
||||||
@ -73,14 +72,19 @@ class NoopManager(object):
|
|||||||
self.networkconfigconfig[amphora_id] = (
|
self.networkconfigconfig[amphora_id] = (
|
||||||
amphora_id, 'get_plugged_networks')
|
amphora_id, 'get_plugged_networks')
|
||||||
|
|
||||||
|
def update_vip(self, load_balancer):
|
||||||
|
LOG.debug("Network %s no-op, update_vip load_balancer %s",
|
||||||
|
self.__class__.__name__, load_balancer)
|
||||||
|
self.networkconfigconfig[load_balancer] = (load_balancer, 'update_vip')
|
||||||
|
|
||||||
|
|
||||||
class NoopNetworkDriver(driver_base.AbstractNetworkDriver):
|
class NoopNetworkDriver(driver_base.AbstractNetworkDriver):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(NoopNetworkDriver, self).__init__()
|
super(NoopNetworkDriver, self).__init__()
|
||||||
self.driver = NoopManager()
|
self.driver = NoopManager()
|
||||||
|
|
||||||
def allocate_vip(self, port_id=None, network_id=None, ip_address=None):
|
def allocate_vip(self, load_balancer):
|
||||||
self.driver.allocate_vip(port_id, network_id, ip_address)
|
self.driver.allocate_vip(load_balancer)
|
||||||
|
|
||||||
def deallocate_vip(self, vip):
|
def deallocate_vip(self, vip):
|
||||||
self.driver.deallocate_vip(vip)
|
self.driver.deallocate_vip(vip)
|
||||||
@ -99,3 +103,6 @@ class NoopNetworkDriver(driver_base.AbstractNetworkDriver):
|
|||||||
|
|
||||||
def get_plugged_networks(self, amphora_id):
|
def get_plugged_networks(self, amphora_id):
|
||||||
self.driver.get_plugged_networks(amphora_id)
|
self.driver.get_plugged_networks(amphora_id)
|
||||||
|
|
||||||
|
def update_vip(self, load_balancer):
|
||||||
|
self.driver.update_vip(load_balancer)
|
||||||
|
@ -194,8 +194,7 @@ class TestNetworkTasks(base.TestCase):
|
|||||||
|
|
||||||
mock_driver.reset_mock()
|
mock_driver.reset_mock()
|
||||||
self.assertEqual(LB.vip, net.execute(LB))
|
self.assertEqual(LB.vip, net.execute(LB))
|
||||||
mock_driver.allocate_vip.assert_called_once_with(
|
mock_driver.allocate_vip.assert_called_once_with(LB)
|
||||||
port_id=PORT_ID, network_id=NETWORK_ID, ip_address=IP_ADDRESS)
|
|
||||||
# revert
|
# revert
|
||||||
vip_mock = mock.MagicMock()
|
vip_mock = mock.MagicMock()
|
||||||
net.revert(vip_mock, LB)
|
net.revert(vip_mock, LB)
|
||||||
|
@ -161,12 +161,15 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
self.assertIn(amp.id, [lb.amphorae[0].id, lb.amphorae[1].id])
|
self.assertIn(amp.id, [lb.amphorae[0].id, lb.amphorae[1].id])
|
||||||
|
|
||||||
def test_allocate_vip(self):
|
def test_allocate_vip(self):
|
||||||
|
fake_lb_vip = data_models.Vip()
|
||||||
|
fake_lb = data_models.LoadBalancer(id='1', vip=fake_lb_vip)
|
||||||
self.assertRaises(network_base.AllocateVIPException,
|
self.assertRaises(network_base.AllocateVIPException,
|
||||||
self.driver.allocate_vip, port_id=None,
|
self.driver.allocate_vip, fake_lb)
|
||||||
network_id=None)
|
|
||||||
show_port = self.driver.neutron_client.show_port
|
show_port = self.driver.neutron_client.show_port
|
||||||
show_port.return_value = MOCK_NEUTRON_PORT
|
show_port.return_value = MOCK_NEUTRON_PORT
|
||||||
vip = self.driver.allocate_vip(port_id=MOCK_NEUTRON_PORT['port']['id'])
|
fake_lb_vip = data_models.Vip(port_id=MOCK_NEUTRON_PORT['port']['id'])
|
||||||
|
fake_lb = data_models.LoadBalancer(id='1', vip=fake_lb_vip)
|
||||||
|
vip = self.driver.allocate_vip(fake_lb)
|
||||||
self.assertIsInstance(vip, data_models.Vip)
|
self.assertIsInstance(vip, data_models.Vip)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
MOCK_NEUTRON_PORT['port']['fixed_ips'][0]['ip_address'],
|
MOCK_NEUTRON_PORT['port']['fixed_ips'][0]['ip_address'],
|
||||||
@ -178,8 +181,10 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
|
|
||||||
create_port = self.driver.neutron_client.create_port
|
create_port = self.driver.neutron_client.create_port
|
||||||
create_port.return_value = MOCK_NEUTRON_PORT
|
create_port.return_value = MOCK_NEUTRON_PORT
|
||||||
vip = self.driver.allocate_vip(
|
fake_lb_vip = data_models.Vip(
|
||||||
network_id=MOCK_NEUTRON_PORT['port']['network_id'])
|
network_id=MOCK_NEUTRON_PORT['port']['network_id'])
|
||||||
|
fake_lb = data_models.LoadBalancer(id='1', vip=fake_lb_vip)
|
||||||
|
vip = self.driver.allocate_vip(fake_lb)
|
||||||
self.assertIsInstance(vip, data_models.Vip)
|
self.assertIsInstance(vip, data_models.Vip)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
MOCK_NEUTRON_PORT['port']['fixed_ips'][0]['ip_address'],
|
MOCK_NEUTRON_PORT['port']['fixed_ips'][0]['ip_address'],
|
||||||
@ -289,3 +294,32 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
self.driver.unplug_network(amp_id, if2.net_id)
|
self.driver.unplug_network(amp_id, if2.net_id)
|
||||||
interface_detach.assert_called_once_with(server=amp_id,
|
interface_detach.assert_called_once_with(server=amp_id,
|
||||||
port_id=if2.port_id)
|
port_id=if2.port_id)
|
||||||
|
|
||||||
|
def test_update_vip(self):
|
||||||
|
listeners = [data_models.Listener(protocol_port=80),
|
||||||
|
data_models.Listener(protocol_port=443)]
|
||||||
|
lb = data_models.LoadBalancer(id='1', listeners=listeners)
|
||||||
|
list_sec_grps = self.driver.neutron_client.list_security_groups
|
||||||
|
list_sec_grps.return_value = {'security_groups': [{'id': 'secgrp-1'}]}
|
||||||
|
fake_rules = {
|
||||||
|
'security_group_rules': [
|
||||||
|
{'id': 'rule-80', 'port_range_max': 80},
|
||||||
|
{'id': 'rule-22', 'port_range_max': 22}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
list_rules = self.driver.neutron_client.list_security_group_rules
|
||||||
|
list_rules.return_value = fake_rules
|
||||||
|
delete_rule = self.driver.neutron_client.delete_security_group_rule
|
||||||
|
create_rule = self.driver.neutron_client.create_security_group_rule
|
||||||
|
self.driver.update_vip(lb)
|
||||||
|
delete_rule.assert_called_once_with('rule-22')
|
||||||
|
expected_create_rule = {
|
||||||
|
'security_group_rule': {
|
||||||
|
'security_group_id': 'secgrp-1',
|
||||||
|
'direction': 'ingress',
|
||||||
|
'protocol': 'TCP',
|
||||||
|
'port_range_min': 443,
|
||||||
|
'port_range_max': 443
|
||||||
|
}
|
||||||
|
}
|
||||||
|
create_rule.assert_called_once_with(expected_create_rule)
|
||||||
|
@ -42,12 +42,10 @@ class TestNoopNetworkDriver(base.TestCase):
|
|||||||
self.amphora_id = self.FAKE_UUID_1
|
self.amphora_id = self.FAKE_UUID_1
|
||||||
|
|
||||||
def test_allocate_vip(self):
|
def test_allocate_vip(self):
|
||||||
self.driver.allocate_vip(self.port_id, self.network_id,
|
self.driver.allocate_vip(self.load_balancer)
|
||||||
self.ip_address)
|
self.assertEqual(
|
||||||
self.assertEqual((self.port_id, self.network_id, self.ip_address,
|
(self.load_balancer, 'allocate_vip'),
|
||||||
'allocate_vip'),
|
self.driver.driver.networkconfigconfig[self.load_balancer])
|
||||||
self.driver.driver.networkconfigconfig[(
|
|
||||||
self.port_id, self.network_id, self.ip_address)])
|
|
||||||
|
|
||||||
def test_deallocate_vip(self):
|
def test_deallocate_vip(self):
|
||||||
self.driver.deallocate_vip(self.vip)
|
self.driver.deallocate_vip(self.vip)
|
||||||
@ -91,3 +89,10 @@ class TestNoopNetworkDriver(base.TestCase):
|
|||||||
self.assertEqual((self.amphora_id, 'get_plugged_networks'),
|
self.assertEqual((self.amphora_id, 'get_plugged_networks'),
|
||||||
self.driver.driver.networkconfigconfig[(
|
self.driver.driver.networkconfigconfig[(
|
||||||
self.amphora_id)])
|
self.amphora_id)])
|
||||||
|
|
||||||
|
def test_update_vip(self):
|
||||||
|
self.driver.update_vip(self.load_balancer)
|
||||||
|
self.assertEqual((self.load_balancer, 'update_vip'),
|
||||||
|
self.driver.driver.networkconfigconfig[(
|
||||||
|
self.load_balancer
|
||||||
|
)])
|
@ -115,15 +115,11 @@ class AbstractNetworkDriver
|
|||||||
* returns None
|
* returns None
|
||||||
* raises UnplugVIPException, PluggedVIPNotFound
|
* raises UnplugVIPException, PluggedVIPNotFound
|
||||||
|
|
||||||
* allocate_vip(port_id=None, network_id=None, ip_address=None)
|
* allocate_vip(loadbalancer)
|
||||||
|
|
||||||
* Allocates a virtual ip and reserves it for later use as the frontend
|
* Allocates a virtual ip and reserves it for later use as the frontend
|
||||||
connection of a load balancer.
|
connection of a load balancer.
|
||||||
* port_id = id of port that has already been created. If this is
|
* loadbalancer = instance of a data_models.LoadBalancer
|
||||||
provided, it will be used regardless of the other parameters.
|
|
||||||
* network_id = if port_id is not provided, this should be provided
|
|
||||||
to create the virtual IP on this network.
|
|
||||||
* ip_address = will attempt to allocate this specific IP address
|
|
||||||
* returns VIP instance
|
* returns VIP instance
|
||||||
* raises AllocateVIPException, PortNotFound, NetworkNotFound
|
* raises AllocateVIPException, PortNotFound, NetworkNotFound
|
||||||
|
|
||||||
@ -160,6 +156,12 @@ class AbstractNetworkDriver
|
|||||||
* returns = list of Instance instances
|
* returns = list of Instance instances
|
||||||
* raises AmphoraNotFound
|
* raises AmphoraNotFound
|
||||||
|
|
||||||
|
* update_vip(loadbalancer):
|
||||||
|
|
||||||
|
* Hook for the driver to update the VIP information based on the state
|
||||||
|
of the passed in loadbalancer
|
||||||
|
* loadbalancer: instance of a data_models.LoadBalancer
|
||||||
|
|
||||||
Alternatives
|
Alternatives
|
||||||
------------
|
------------
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user