Implement provider drivers - Load Balancer
This patch adds provider driver support to the Octavia v2 API, starting with the load balancer API. This patch also creates a provider driver for Octavia, initially fully implementing the load balancer methods. Follow on patches will implement the remain parts of the API. Change-Id: Ia15280827799d1800c23ed76d2af0e3596b9d2f7 Story: 1655768 Task: 5165
This commit is contained in:
parent
dfa7ef2ab3
commit
7b2621fe29
@ -43,6 +43,12 @@
|
||||
# Enable/disable ability for users to create PING type Health Monitors
|
||||
# allow_ping_health_monitors = True
|
||||
|
||||
# List of enabled provider drivers
|
||||
# enabled_provider_drivers = octavia, amphora
|
||||
|
||||
# Default provider driver
|
||||
# default_provider_driver = amphora
|
||||
|
||||
[database]
|
||||
# This line MUST be changed to actually run the plugin.
|
||||
# Example:
|
||||
|
@ -28,6 +28,7 @@ app = {
|
||||
# WSME Configurations
|
||||
# See https://wsme.readthedocs.org/en/latest/integrate.html#configuration
|
||||
wsme = {
|
||||
# Keeping debug True for now so we can easily troubleshoot.
|
||||
'debug': True
|
||||
# Provider driver uses 501 if the driver is not installed.
|
||||
# Don't dump a stack trace for 501s
|
||||
'debug': False
|
||||
}
|
||||
|
11
octavia/api/drivers/amphora_driver/__init__.py
Normal file
11
octavia/api/drivers/amphora_driver/__init__.py
Normal file
@ -0,0 +1,11 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
163
octavia/api/drivers/amphora_driver/driver.py
Normal file
163
octavia/api/drivers/amphora_driver/driver.py
Normal file
@ -0,0 +1,163 @@
|
||||
# Copyright 2018 Rackspace, US Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import oslo_messaging as messaging
|
||||
|
||||
from octavia.api.drivers import exceptions
|
||||
from octavia.api.drivers import provider_base as driver_base
|
||||
from octavia.api.drivers import utils as driver_utils
|
||||
from octavia.common import constants as consts
|
||||
from octavia.common import data_models
|
||||
from octavia.common import utils
|
||||
from octavia.network import base as network_base
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_group('oslo_messaging', 'octavia.common.config')
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AmphoraProviderDriver(driver_base.ProviderDriver):
|
||||
def __init__(self):
|
||||
super(AmphoraProviderDriver, self).__init__()
|
||||
topic = cfg.CONF.oslo_messaging.topic
|
||||
self.transport = messaging.get_rpc_transport(cfg.CONF)
|
||||
self.target = messaging.Target(
|
||||
namespace=consts.RPC_NAMESPACE_CONTROLLER_AGENT,
|
||||
topic=topic, version="1.0", fanout=False)
|
||||
self.client = messaging.RPCClient(self.transport, target=self.target)
|
||||
|
||||
# Load Balancer
|
||||
def create_vip_port(self, loadbalancer_id, project_id, vip_dictionary):
|
||||
vip_obj = driver_utils.provider_vip_dict_to_vip_obj(vip_dictionary)
|
||||
lb_obj = data_models.LoadBalancer(id=loadbalancer_id,
|
||||
project_id=project_id, vip=vip_obj)
|
||||
|
||||
network_driver = utils.get_network_driver()
|
||||
try:
|
||||
vip = network_driver.allocate_vip(lb_obj)
|
||||
except network_base.AllocateVIPException as e:
|
||||
raise exceptions.DriverError(user_fault_string=e.orig_msg,
|
||||
operator_fault_string=e.orig_msg)
|
||||
|
||||
LOG.info('Amphora provider created VIP port %s for load balancer %s.',
|
||||
vip.port_id, loadbalancer_id)
|
||||
return driver_utils.vip_dict_to_provider_dict(vip.to_dict())
|
||||
|
||||
def loadbalancer_create(self, loadbalancer):
|
||||
payload = {consts.LOAD_BALANCER_ID: loadbalancer.loadbalancer_id}
|
||||
self.client.cast({}, 'create_load_balancer', **payload)
|
||||
|
||||
def loadbalancer_delete(self, loadbalancer_id, cascade=False):
|
||||
payload = {consts.LOAD_BALANCER_ID: loadbalancer_id,
|
||||
'cascade': cascade}
|
||||
self.client.cast({}, 'delete_load_balancer', **payload)
|
||||
|
||||
def loadbalancer_failover(self, loadbalancer_id):
|
||||
payload = {consts.LOAD_BALANCER_ID: loadbalancer_id}
|
||||
self.client.cast({}, 'failover_load_balancer', **payload)
|
||||
|
||||
def loadbalancer_update(self, loadbalancer):
|
||||
# Adapt the provider data model to the queue schema
|
||||
lb_dict = loadbalancer.to_dict()
|
||||
if 'admin_state_up' in lb_dict:
|
||||
lb_dict['enabled'] = lb_dict.pop('admin_state_up')
|
||||
lb_id = lb_dict.pop('loadbalancer_id')
|
||||
|
||||
payload = {consts.LOAD_BALANCER_ID: lb_id,
|
||||
consts.LOAD_BALANCER_UPDATES: lb_dict}
|
||||
self.client.cast({}, 'update_load_balancer', **payload)
|
||||
|
||||
# Listener
|
||||
def listener_create(self, listener):
|
||||
payload = {consts.LISTENER_ID: listener.listener_id}
|
||||
self.client.cast({}, 'create_listener', **payload)
|
||||
|
||||
def listener_delete(self, listener_id):
|
||||
payload = {consts.LISTENER_ID: listener_id}
|
||||
self.client.cast({}, 'delete_listener', **payload)
|
||||
|
||||
def listener_update(self, listener):
|
||||
pass
|
||||
|
||||
# Pool
|
||||
def pool_create(self, pool):
|
||||
payload = {consts.POOL_ID: pool.pool_id}
|
||||
self.client.cast({}, 'create_pool', **payload)
|
||||
|
||||
def pool_delete(self, pool_id):
|
||||
payload = {consts.POOL_ID: pool_id}
|
||||
self.client.cast({}, 'delete_pool', **payload)
|
||||
|
||||
def pool_update(self, pool):
|
||||
pass
|
||||
|
||||
# Member
|
||||
def member_create(self, member):
|
||||
payload = {consts.MEMBER_ID: member.member_id}
|
||||
self.client.cast({}, 'create_member', **payload)
|
||||
|
||||
def member_delete(self, member_id):
|
||||
payload = {consts.MEMBER_ID: member_id}
|
||||
self.client.cast({}, 'delete_member', **payload)
|
||||
|
||||
def member_update(self, member):
|
||||
pass
|
||||
|
||||
def member_batch_update(self, members):
|
||||
pass
|
||||
|
||||
# Health Monitor
|
||||
def health_monitor_create(self, healthmonitor):
|
||||
payload = {consts.HEALTH_MONITOR_ID: healthmonitor.healthmonitor_id}
|
||||
self.client.cast({}, 'create_health_monitor', **payload)
|
||||
|
||||
def health_monitor_delete(self, healthmonitor_id):
|
||||
payload = {consts.HEALTH_MONITOR_ID: healthmonitor_id}
|
||||
self.client.cast({}, 'delete_health_monitor', **payload)
|
||||
|
||||
def health_monitor_update(self, healthmonitor):
|
||||
pass
|
||||
|
||||
# L7 Policy
|
||||
def l7policy_create(self, l7policy):
|
||||
payload = {consts.L7POLICY_ID: l7policy.l7policy_id}
|
||||
self.client.cast({}, 'create_l7policy', **payload)
|
||||
|
||||
def l7policy_delete(self, l7policy_id):
|
||||
payload = {consts.L7POLICY_ID: l7policy_id}
|
||||
self.client.cast({}, 'delete_l7policy', **payload)
|
||||
|
||||
def l7policy_update(self, l7policy):
|
||||
pass
|
||||
|
||||
# L7 Rule
|
||||
def l7rule_create(self, l7rule):
|
||||
payload = {consts.L7RULE_ID: l7rule.l7rule_id}
|
||||
self.client.cast({}, 'create_l7rule', **payload)
|
||||
|
||||
def l7rule_delete(self, l7rule_id):
|
||||
payload = {consts.L7RULE_ID: l7rule_id}
|
||||
self.client.cast({}, 'delete_l7rule', **payload)
|
||||
|
||||
def l7rule_update(self, l7rule):
|
||||
pass
|
||||
|
||||
# Flavor
|
||||
def get_supported_flavor_metadata(self):
|
||||
pass
|
||||
|
||||
def validate_flavor(self, flavor_metadata):
|
||||
pass
|
@ -17,9 +17,14 @@
|
||||
|
||||
import six
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseDataModel(object):
|
||||
def to_dict(self, calling_classes=None, recurse=False, **kwargs):
|
||||
def to_dict(self, calling_classes=None, recurse=False,
|
||||
render_unsets=False, **kwargs):
|
||||
"""Converts a data model to a dictionary."""
|
||||
calling_classes = calling_classes or []
|
||||
ret = {}
|
||||
@ -36,24 +41,35 @@ class BaseDataModel(object):
|
||||
if type(self) not in calling_classes:
|
||||
ret[attr].append(
|
||||
item.to_dict(calling_classes=(
|
||||
calling_classes + [type(self)])))
|
||||
calling_classes + [type(self)]),
|
||||
render_unsets=render_unsets))
|
||||
else:
|
||||
ret[attr] = None
|
||||
ret[attr].append(None)
|
||||
else:
|
||||
ret[attr] = item
|
||||
ret[attr].append(item)
|
||||
elif isinstance(getattr(self, attr), BaseDataModel):
|
||||
if type(self) not in calling_classes:
|
||||
ret[attr] = value.to_dict(
|
||||
render_unsets=render_unsets,
|
||||
calling_classes=calling_classes + [type(self)])
|
||||
else:
|
||||
ret[attr] = None
|
||||
elif six.PY2 and isinstance(value, six.text_type):
|
||||
ret[attr.encode('utf8')] = value.encode('utf8')
|
||||
elif isinstance(value, UnsetType):
|
||||
if render_unsets:
|
||||
ret[attr] = None
|
||||
else:
|
||||
continue
|
||||
else:
|
||||
ret[attr] = value
|
||||
else:
|
||||
if isinstance(getattr(self, attr), (BaseDataModel, list)):
|
||||
ret[attr] = None
|
||||
if (isinstance(getattr(self, attr), (BaseDataModel, list)) or
|
||||
isinstance(value, UnsetType)):
|
||||
if render_unsets:
|
||||
ret[attr] = None
|
||||
else:
|
||||
continue
|
||||
else:
|
||||
ret[attr] = value
|
||||
|
||||
@ -72,32 +88,49 @@ class BaseDataModel(object):
|
||||
return cls(**dict)
|
||||
|
||||
|
||||
class UnsetType(object):
|
||||
def __bool__(self):
|
||||
return False
|
||||
__nonzero__ = __bool__
|
||||
|
||||
def __repr__(self):
|
||||
return 'Unset'
|
||||
|
||||
|
||||
Unset = UnsetType()
|
||||
|
||||
|
||||
class LoadBalancer(BaseDataModel):
|
||||
def __init__(self, admin_state_up=None, description=None, flavor=None,
|
||||
listeners=None, loadbalancer_id=None, name=None,
|
||||
project_id=None, vip_address=None, vip_network_id=None,
|
||||
vip_port_id=None, vip_subnet_id=None):
|
||||
def __init__(self, admin_state_up=Unset, description=Unset, flavor=Unset,
|
||||
listeners=Unset, loadbalancer_id=Unset, name=Unset,
|
||||
pools=Unset, project_id=Unset, vip_address=Unset,
|
||||
vip_network_id=Unset, vip_port_id=Unset, vip_subnet_id=Unset,
|
||||
vip_qos_policy_id=Unset):
|
||||
|
||||
self.admin_state_up = admin_state_up
|
||||
self.description = description
|
||||
self.flavor = flavor or {}
|
||||
self.listeners = listeners or []
|
||||
self.flavor = flavor
|
||||
self.listeners = listeners
|
||||
self.loadbalancer_id = loadbalancer_id
|
||||
self.name = name
|
||||
self.pools = pools
|
||||
self.project_id = project_id
|
||||
self.vip_address = vip_address
|
||||
self.vip_network_id = vip_network_id
|
||||
self.vip_port_id = vip_port_id
|
||||
self.vip_subnet_id = vip_subnet_id
|
||||
self.vip_qos_policy_id = vip_qos_policy_id
|
||||
|
||||
|
||||
class Listener(BaseDataModel):
|
||||
def __init__(self, admin_state_up=None, connection_limit=None,
|
||||
default_pool=None, default_pool_id=None,
|
||||
default_tls_container=None, description=None,
|
||||
insert_headers=None, l7policies=None, listener_id=None,
|
||||
loadbalancer_id=None, name=None, protocol=None,
|
||||
protocol_port=None, sni_containers=None):
|
||||
def __init__(self, admin_state_up=Unset, connection_limit=Unset,
|
||||
default_pool=Unset, default_pool_id=Unset,
|
||||
default_tls_container=Unset, description=Unset,
|
||||
insert_headers=Unset, l7policies=Unset, listener_id=Unset,
|
||||
loadbalancer_id=Unset, name=Unset, protocol=Unset,
|
||||
protocol_port=Unset, sni_containers=Unset,
|
||||
timeout_client_data=Unset, timeout_member_connect=Unset,
|
||||
timeout_member_data=Unset, timeout_tcp_inspect=Unset):
|
||||
|
||||
self.admin_state_up = admin_state_up
|
||||
self.connection_limit = connection_limit
|
||||
@ -105,40 +138,43 @@ class Listener(BaseDataModel):
|
||||
self.default_pool_id = default_pool_id
|
||||
self.default_tls_container = default_tls_container
|
||||
self.description = description
|
||||
self.insert_headers = insert_headers or {}
|
||||
self.l7policies = l7policies or []
|
||||
self.insert_headers = insert_headers
|
||||
self.l7policies = l7policies
|
||||
self.listener_id = listener_id
|
||||
self.loadbalancer_id = loadbalancer_id
|
||||
self.name = name
|
||||
self.protocol = protocol
|
||||
self.protocol_port = protocol_port
|
||||
self.sni_containers = sni_containers
|
||||
self.timeout_client_data = timeout_client_data
|
||||
self.timeout_member_connect = timeout_member_connect
|
||||
self.timeout_member_data = timeout_member_data
|
||||
self.timeout_tcp_inspect = timeout_tcp_inspect
|
||||
|
||||
|
||||
class Pool(BaseDataModel):
|
||||
def __init__(self, admin_state_up=None, description=None,
|
||||
healthmonitor=None, lb_algorithm=None, listener_id=None,
|
||||
loadbalancer_id=None, members=None, name=None, pool_id=None,
|
||||
protocol=None, session_persistence=None):
|
||||
def __init__(self, admin_state_up=Unset, description=Unset,
|
||||
healthmonitor=Unset, lb_algorithm=Unset,
|
||||
loadbalancer_id=Unset, members=Unset, name=Unset,
|
||||
pool_id=Unset, protocol=Unset, session_persistence=Unset):
|
||||
|
||||
self.admin_state_up = admin_state_up
|
||||
self.description = description
|
||||
self.healthmonitor = healthmonitor
|
||||
self.lb_algorithm = lb_algorithm
|
||||
self.listener_id = listener_id
|
||||
self.loadbalancer_id = loadbalancer_id
|
||||
self.members = members or []
|
||||
self.members = members
|
||||
self.name = name
|
||||
self.pool_id = pool_id
|
||||
self.protocol = protocol
|
||||
self.session_persistence = session_persistence or {}
|
||||
self.session_persistence = session_persistence
|
||||
|
||||
|
||||
class Member(BaseDataModel):
|
||||
def __init__(self, address=None, admin_state_up=None, member_id=None,
|
||||
monitor_address=None, monitor_port=None, name=None,
|
||||
pool_id=None, protocol_port=None, subnet_id=None,
|
||||
weight=None):
|
||||
def __init__(self, address=Unset, admin_state_up=Unset, member_id=Unset,
|
||||
monitor_address=Unset, monitor_port=Unset, name=Unset,
|
||||
pool_id=Unset, protocol_port=Unset, subnet_id=Unset,
|
||||
weight=Unset, backup=Unset):
|
||||
|
||||
self.address = address
|
||||
self.admin_state_up = admin_state_up
|
||||
@ -150,13 +186,14 @@ class Member(BaseDataModel):
|
||||
self.protocol_port = protocol_port
|
||||
self.subnet_id = subnet_id
|
||||
self.weight = weight
|
||||
self.backup = backup
|
||||
|
||||
|
||||
class HealthMonitor(BaseDataModel):
|
||||
def __init__(self, admin_state_up=None, delay=None, expected_codes=None,
|
||||
healthmonitor_id=None, http_method=None, max_retries=None,
|
||||
max_retries_down=None, name=None, pool_id=None, timeout=None,
|
||||
type=None, url_path=None):
|
||||
def __init__(self, admin_state_up=Unset, delay=Unset, expected_codes=Unset,
|
||||
healthmonitor_id=Unset, http_method=Unset, max_retries=Unset,
|
||||
max_retries_down=Unset, name=Unset, pool_id=Unset,
|
||||
timeout=Unset, type=Unset, url_path=Unset):
|
||||
|
||||
self.admin_state_up = admin_state_up
|
||||
self.delay = delay
|
||||
@ -173,9 +210,10 @@ class HealthMonitor(BaseDataModel):
|
||||
|
||||
|
||||
class L7Policy(BaseDataModel):
|
||||
def __init__(self, action=None, admin_state_up=None, description=None,
|
||||
l7policy_id=None, listener_id=None, name=None, position=None,
|
||||
redirect_pool_id=None, redirect_url=None, rules=None):
|
||||
def __init__(self, action=Unset, admin_state_up=Unset, description=Unset,
|
||||
l7policy_id=Unset, listener_id=Unset, name=Unset,
|
||||
position=Unset, redirect_pool_id=Unset, redirect_url=Unset,
|
||||
rules=Unset):
|
||||
|
||||
self.action = action
|
||||
self.admin_state_up = admin_state_up
|
||||
@ -186,13 +224,13 @@ class L7Policy(BaseDataModel):
|
||||
self.position = position
|
||||
self.redirect_pool_id = redirect_pool_id
|
||||
self.redirect_url = redirect_url
|
||||
self.rules = rules or []
|
||||
self.rules = rules
|
||||
|
||||
|
||||
class L7Rule(BaseDataModel):
|
||||
def __init__(self, admin_state_up=None, compare_type=None, invert=None,
|
||||
key=None, l7policy_id=None, l7rule_id=None, type=None,
|
||||
value=None):
|
||||
def __init__(self, admin_state_up=Unset, compare_type=Unset, invert=Unset,
|
||||
key=Unset, l7policy_id=Unset, l7rule_id=Unset, type=Unset,
|
||||
value=Unset):
|
||||
|
||||
self.admin_state_up = admin_state_up
|
||||
self.compare_type = compare_type
|
||||
@ -205,10 +243,12 @@ class L7Rule(BaseDataModel):
|
||||
|
||||
|
||||
class VIP(BaseDataModel):
|
||||
def __init__(self, vip_address=None, vip_network_id=None, vip_port_id=None,
|
||||
vip_subnet_id=None):
|
||||
def __init__(self, vip_address=Unset, vip_network_id=Unset,
|
||||
vip_port_id=Unset, vip_subnet_id=Unset,
|
||||
vip_qos_policy_id=Unset):
|
||||
|
||||
self.vip_address = vip_address
|
||||
self.vip_network_id = vip_network_id
|
||||
self.vip_port_id = vip_port_id
|
||||
self.vip_subnet_id = vip_subnet_id
|
||||
self.vip_qos_policy_id = vip_qos_policy_id
|
||||
|
50
octavia/api/drivers/driver_factory.py
Normal file
50
octavia/api/drivers/driver_factory.py
Normal file
@ -0,0 +1,50 @@
|
||||
# Copyright 2018 Rackspace, US Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from stevedore import driver as stevedore_driver
|
||||
from wsme import types as wtypes
|
||||
|
||||
from octavia.common import exceptions
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_driver(provider):
|
||||
# If this came in None it must be a load balancer that existed before
|
||||
# provider support was added. These must be of type 'amphora' and not
|
||||
# whatever the current "default" is set to.
|
||||
if isinstance(provider, wtypes.UnsetType):
|
||||
provider = CONF.api_settings.default_provider_driver
|
||||
elif not provider:
|
||||
provider = 'amphora'
|
||||
|
||||
if provider not in CONF.api_settings.enabled_provider_drivers:
|
||||
LOG.warning("Requested provider driver '%s' was not enabled in the "
|
||||
"configuration file.", provider)
|
||||
raise exceptions.ProviderNotEnabled(prov=provider)
|
||||
|
||||
try:
|
||||
driver = stevedore_driver.DriverManager(
|
||||
namespace='octavia.api.drivers',
|
||||
name=provider,
|
||||
invoke_on_load=True).driver
|
||||
driver.name = provider
|
||||
except Exception as e:
|
||||
LOG.error('Unable to load provider driver %s due to: %s',
|
||||
provider, e)
|
||||
raise exceptions.ProviderNotFound(prov=provider)
|
||||
return driver
|
@ -27,14 +27,15 @@ class NoopManager(object):
|
||||
self.driverconfig = {}
|
||||
|
||||
# Load Balancer
|
||||
def create_vip_port(self, loadbalancer_id, vip_dictionary):
|
||||
def create_vip_port(self, loadbalancer_id, project_id, vip_dictionary):
|
||||
LOG.debug('Provider %s no-op, create_vip_port loadbalancer %s',
|
||||
self.__class__.__name__, loadbalancer_id)
|
||||
|
||||
self.driverconfig[loadbalancer_id] = (loadbalancer_id, vip_dictionary,
|
||||
self.driverconfig[loadbalancer_id] = (loadbalancer_id, project_id,
|
||||
vip_dictionary,
|
||||
'create_vip_port')
|
||||
|
||||
vip_address = vip_dictionary.get('vip_address', '192.0.2.5')
|
||||
vip_address = vip_dictionary.get('vip_address', '198.0.2.5')
|
||||
vip_network_id = vip_dictionary.get('vip_network_id',
|
||||
uuidutils.generate_uuid())
|
||||
vip_port_id = vip_dictionary.get('vip_port_id',
|
||||
@ -222,8 +223,9 @@ class NoopProviderDriver(driver_base.ProviderDriver):
|
||||
self.driver = NoopManager()
|
||||
|
||||
# Load Balancer
|
||||
def create_vip_port(self, loadbalancer_id, vip_dictionary):
|
||||
return self.driver.create_vip_port(loadbalancer_id, vip_dictionary)
|
||||
def create_vip_port(self, loadbalancer_id, project_id, vip_dictionary):
|
||||
return self.driver.create_vip_port(loadbalancer_id, project_id,
|
||||
vip_dictionary)
|
||||
|
||||
def loadbalancer_create(self, loadbalancer):
|
||||
self.driver.loadbalancer_create(loadbalancer)
|
||||
|
@ -19,8 +19,11 @@ from octavia.api.drivers import exceptions
|
||||
|
||||
|
||||
class ProviderDriver(object):
|
||||
# name is for internal Octavia use and should not be used by drivers
|
||||
name = None
|
||||
|
||||
# Load Balancer
|
||||
def create_vip_port(self, loadbalancer_id, vip_dictionary):
|
||||
def create_vip_port(self, loadbalancer_id, project_id, vip_dictionary):
|
||||
"""Creates a port for a load balancer VIP.
|
||||
|
||||
If the driver supports creating VIP ports, the driver will create a
|
||||
@ -30,6 +33,8 @@ class ProviderDriver(object):
|
||||
|
||||
:param loadbalancer_id: ID of loadbalancer.
|
||||
:type loadbalancer_id: string
|
||||
:param project_id: The project ID to create the VIP under.
|
||||
:type project_id: string
|
||||
:param: vip_dictionary: The VIP dictionary.
|
||||
:type vip_dictionary: dict
|
||||
:returns: VIP dictionary with vip_port_id.
|
||||
@ -37,7 +42,11 @@ class ProviderDriver(object):
|
||||
:raises NotImplementedError: The driver does not support creating
|
||||
VIP ports.
|
||||
"""
|
||||
raise exceptions.NotImplementedError()
|
||||
raise exceptions.NotImplementedError(
|
||||
user_fault_string='This provider does not support creating VIP '
|
||||
'ports.',
|
||||
operator_fault_string='This provider does not support creating '
|
||||
'VIP ports. Octavia will create it.')
|
||||
|
||||
def loadbalancer_create(self, loadbalancer):
|
||||
"""Creates a new load balancer.
|
||||
@ -50,7 +59,11 @@ class ProviderDriver(object):
|
||||
:raises UnsupportedOptionError: The driver does not
|
||||
support one of the configuration options.
|
||||
"""
|
||||
raise exceptions.NotImplementedError()
|
||||
raise exceptions.NotImplementedError(
|
||||
user_fault_string='This provider does not support creating '
|
||||
'load balancers.',
|
||||
operator_fault_string='This provider does not support creating '
|
||||
'load balancers. What?')
|
||||
|
||||
def loadbalancer_delete(self, loadbalancer_id, cascade=False):
|
||||
"""Deletes a load balancer.
|
||||
@ -64,7 +77,11 @@ class ProviderDriver(object):
|
||||
:raises DriverError: An unexpected error occurred in the driver.
|
||||
:raises NotImplementedError: if driver does not support request.
|
||||
"""
|
||||
raise exceptions.NotImplementedError()
|
||||
raise exceptions.NotImplementedError(
|
||||
user_fault_string='This provider does not support deleting '
|
||||
'load balancers.',
|
||||
operator_fault_string='This provider does not support deleting '
|
||||
'load balancers.')
|
||||
|
||||
def loadbalancer_failover(self, loadbalancer_id):
|
||||
"""Performs a fail over of a load balancer.
|
||||
@ -75,7 +92,11 @@ class ProviderDriver(object):
|
||||
:raises DriverError: An unexpected error occurred in the driver.
|
||||
:raises: NotImplementedError if driver does not support request.
|
||||
"""
|
||||
raise exceptions.NotImplementedError()
|
||||
raise exceptions.NotImplementedError(
|
||||
user_fault_string='This provider does not support failing over '
|
||||
'load balancers.',
|
||||
operator_fault_string='This provider does not support failing '
|
||||
'over load balancers.')
|
||||
|
||||
def loadbalancer_update(self, loadbalancer):
|
||||
"""Updates a load balancer.
|
||||
@ -88,7 +109,11 @@ class ProviderDriver(object):
|
||||
:raises UnsupportedOptionError: The driver does not
|
||||
support one of the configuration options.
|
||||
"""
|
||||
raise exceptions.NotImplementedError()
|
||||
raise exceptions.NotImplementedError(
|
||||
user_fault_string='This provider does not support updating '
|
||||
'load balancers.',
|
||||
operator_fault_string='This provider does not support updating '
|
||||
'load balancers.')
|
||||
|
||||
# Listener
|
||||
def listener_create(self, listener):
|
||||
@ -102,7 +127,11 @@ class ProviderDriver(object):
|
||||
:raises UnsupportedOptionError: if driver does not
|
||||
support one of the configuration options.
|
||||
"""
|
||||
raise exceptions.NotImplementedError()
|
||||
raise exceptions.NotImplementedError(
|
||||
user_fault_string='This provider does not support creating '
|
||||
'listeners.',
|
||||
operator_fault_string='This provider does not support creating '
|
||||
'listeners.')
|
||||
|
||||
def listener_delete(self, listener_id):
|
||||
"""Deletes a listener.
|
||||
@ -113,7 +142,11 @@ class ProviderDriver(object):
|
||||
:raises DriverError: An unexpected error occurred in the driver.
|
||||
:raises NotImplementedError: if driver does not support request.
|
||||
"""
|
||||
raise exceptions.NotImplementedError()
|
||||
raise exceptions.NotImplementedError(
|
||||
user_fault_string='This provider does not support deleting '
|
||||
'listeners.',
|
||||
operator_fault_string='This provider does not support deleting '
|
||||
'listeners.')
|
||||
|
||||
def listener_update(self, listener):
|
||||
"""Updates a listener.
|
||||
@ -126,7 +159,11 @@ class ProviderDriver(object):
|
||||
:raises UnsupportedOptionError: if driver does not
|
||||
support one of the configuration options.
|
||||
"""
|
||||
raise exceptions.NotImplementedError()
|
||||
raise exceptions.NotImplementedError(
|
||||
user_fault_string='This provider does not support updating '
|
||||
'listeners.',
|
||||
operator_fault_string='This provider does not support updating '
|
||||
'listeners.')
|
||||
|
||||
# Pool
|
||||
def pool_create(self, pool):
|
||||
@ -140,7 +177,11 @@ class ProviderDriver(object):
|
||||
:raises UnsupportedOptionError: if driver does not
|
||||
support one of the configuration options.
|
||||
"""
|
||||
raise exceptions.NotImplementedError()
|
||||
raise exceptions.NotImplementedError(
|
||||
user_fault_string='This provider does not support creating '
|
||||
'pools.',
|
||||
operator_fault_string='This provider does not support creating '
|
||||
'pools.')
|
||||
|
||||
def pool_delete(self, pool_id):
|
||||
"""Deletes a pool and its members.
|
||||
@ -151,7 +192,11 @@ class ProviderDriver(object):
|
||||
:raises DriverError: An unexpected error occurred in the driver.
|
||||
:raises NotImplementedError: if driver does not support request.
|
||||
"""
|
||||
raise exceptions.NotImplementedError()
|
||||
raise exceptions.NotImplementedError(
|
||||
user_fault_string='This provider does not support deleting '
|
||||
'pools.',
|
||||
operator_fault_string='This provider does not support deleting '
|
||||
'pools.')
|
||||
|
||||
def pool_update(self, pool):
|
||||
"""Updates a pool.
|
||||
@ -164,7 +209,11 @@ class ProviderDriver(object):
|
||||
:raises UnsupportedOptionError: if driver does not
|
||||
support one of the configuration options.
|
||||
"""
|
||||
raise exceptions.NotImplementedError()
|
||||
raise exceptions.NotImplementedError(
|
||||
user_fault_string='This provider does not support updating '
|
||||
'pools.',
|
||||
operator_fault_string='This provider does not support updating '
|
||||
'pools.')
|
||||
|
||||
# Member
|
||||
def member_create(self, member):
|
||||
@ -178,7 +227,11 @@ class ProviderDriver(object):
|
||||
:raises UnsupportedOptionError: if driver does not
|
||||
support one of the configuration options.
|
||||
"""
|
||||
raise exceptions.NotImplementedError()
|
||||
raise exceptions.NotImplementedError(
|
||||
user_fault_string='This provider does not support creating '
|
||||
'members.',
|
||||
operator_fault_string='This provider does not support creating '
|
||||
'members.')
|
||||
|
||||
def member_delete(self, member_id):
|
||||
"""Deletes a pool member.
|
||||
@ -189,7 +242,11 @@ class ProviderDriver(object):
|
||||
:raises DriverError: An unexpected error occurred in the driver.
|
||||
:raises NotImplementedError: if driver does not support request.
|
||||
"""
|
||||
raise exceptions.NotImplementedError()
|
||||
raise exceptions.NotImplementedError(
|
||||
user_fault_string='This provider does not support deleting '
|
||||
'members.',
|
||||
operator_fault_string='This provider does not support deleting '
|
||||
'members.')
|
||||
|
||||
def member_update(self, member):
|
||||
"""Updates a pool member.
|
||||
@ -202,7 +259,11 @@ class ProviderDriver(object):
|
||||
:raises UnsupportedOptionError: if driver does not
|
||||
support one of the configuration options.
|
||||
"""
|
||||
raise exceptions.NotImplementedError()
|
||||
raise exceptions.NotImplementedError(
|
||||
user_fault_string='This provider does not support updating '
|
||||
'members.',
|
||||
operator_fault_string='This provider does not support updating '
|
||||
'members.')
|
||||
|
||||
def member_batch_update(self, members):
|
||||
"""Creates, updates, or deletes a set of pool members.
|
||||
@ -215,7 +276,11 @@ class ProviderDriver(object):
|
||||
:raises UnsupportedOptionError: if driver does not
|
||||
support one of the configuration options.
|
||||
"""
|
||||
raise exceptions.NotImplementedError()
|
||||
raise exceptions.NotImplementedError(
|
||||
user_fault_string='This provider does not support batch '
|
||||
'updating members.',
|
||||
operator_fault_string='This provider does not support batch '
|
||||
'updating members.')
|
||||
|
||||
# Health Monitor
|
||||
def health_monitor_create(self, healthmonitor):
|
||||
@ -229,7 +294,11 @@ class ProviderDriver(object):
|
||||
:raises UnsupportedOptionError: if driver does not
|
||||
support one of the configuration options.
|
||||
"""
|
||||
raise exceptions.NotImplementedError()
|
||||
raise exceptions.NotImplementedError(
|
||||
user_fault_string='This provider does not support creating '
|
||||
'health monitors.',
|
||||
operator_fault_string='This provider does not support creating '
|
||||
'health monitors.')
|
||||
|
||||
def health_monitor_delete(self, healthmonitor_id):
|
||||
"""Deletes a healthmonitor_id.
|
||||
@ -240,7 +309,11 @@ class ProviderDriver(object):
|
||||
:raises DriverError: An unexpected error occurred in the driver.
|
||||
:raises NotImplementedError: if driver does not support request.
|
||||
"""
|
||||
raise exceptions.NotImplementedError()
|
||||
raise exceptions.NotImplementedError(
|
||||
user_fault_string='This provider does not support deleting '
|
||||
'health monitors.',
|
||||
operator_fault_string='This provider does not support deleting '
|
||||
'health monitors.')
|
||||
|
||||
def health_monitor_update(self, healthmonitor):
|
||||
"""Updates a health monitor.
|
||||
@ -253,7 +326,11 @@ class ProviderDriver(object):
|
||||
:raises UnsupportedOptionError: if driver does not
|
||||
support one of the configuration options.
|
||||
"""
|
||||
raise exceptions.NotImplementedError()
|
||||
raise exceptions.NotImplementedError(
|
||||
user_fault_string='This provider does not support updating '
|
||||
'health monitors.',
|
||||
operator_fault_string='This provider does not support updating '
|
||||
'health monitors.')
|
||||
|
||||
# L7 Policy
|
||||
def l7policy_create(self, l7policy):
|
||||
@ -267,7 +344,11 @@ class ProviderDriver(object):
|
||||
:raises UnsupportedOptionError: if driver does not
|
||||
support one of the configuration options.
|
||||
"""
|
||||
raise exceptions.NotImplementedError()
|
||||
raise exceptions.NotImplementedError(
|
||||
user_fault_string='This provider does not support creating '
|
||||
'l7policies.',
|
||||
operator_fault_string='This provider does not support creating '
|
||||
'l7policies.')
|
||||
|
||||
def l7policy_delete(self, l7policy_id):
|
||||
"""Deletes an L7 policy.
|
||||
@ -278,7 +359,11 @@ class ProviderDriver(object):
|
||||
:raises DriverError: An unexpected error occurred in the driver.
|
||||
:raises NotImplementedError: if driver does not support request.
|
||||
"""
|
||||
raise exceptions.NotImplementedError()
|
||||
raise exceptions.NotImplementedError(
|
||||
user_fault_string='This provider does not support deleting '
|
||||
'l7policies.',
|
||||
operator_fault_string='This provider does not support deleting '
|
||||
'l7policies.')
|
||||
|
||||
def l7policy_update(self, l7policy):
|
||||
"""Updates an L7 policy.
|
||||
@ -291,7 +376,11 @@ class ProviderDriver(object):
|
||||
:raises UnsupportedOptionError: if driver does not
|
||||
support one of the configuration options.
|
||||
"""
|
||||
raise exceptions.NotImplementedError()
|
||||
raise exceptions.NotImplementedError(
|
||||
user_fault_string='This provider does not support updating '
|
||||
'l7policies.',
|
||||
operator_fault_string='This provider does not support updating '
|
||||
'l7policies.')
|
||||
|
||||
# L7 Rule
|
||||
def l7rule_create(self, l7rule):
|
||||
@ -305,7 +394,11 @@ class ProviderDriver(object):
|
||||
:raises UnsupportedOptionError: if driver does not
|
||||
support one of the configuration options.
|
||||
"""
|
||||
raise exceptions.NotImplementedError()
|
||||
raise exceptions.NotImplementedError(
|
||||
user_fault_string='This provider does not support creating '
|
||||
'l7rules.',
|
||||
operator_fault_string='This provider does not support creating '
|
||||
'l7rules.')
|
||||
|
||||
def l7rule_delete(self, l7rule_id):
|
||||
"""Deletes an L7 rule.
|
||||
@ -316,7 +409,11 @@ class ProviderDriver(object):
|
||||
:raises DriverError: An unexpected error occurred in the driver.
|
||||
:raises NotImplementedError: if driver does not support request.
|
||||
"""
|
||||
raise exceptions.NotImplementedError()
|
||||
raise exceptions.NotImplementedError(
|
||||
user_fault_string='This provider does not support deleting '
|
||||
'l7rules.',
|
||||
operator_fault_string='This provider does not support deleting '
|
||||
'l7rules.')
|
||||
|
||||
def l7rule_update(self, l7rule):
|
||||
"""Updates an L7 rule.
|
||||
@ -329,7 +426,11 @@ class ProviderDriver(object):
|
||||
:raises UnsupportedOptionError: if driver does not
|
||||
support one of the configuration options.
|
||||
"""
|
||||
raise exceptions.NotImplementedError()
|
||||
raise exceptions.NotImplementedError(
|
||||
user_fault_string='This provider does not support updating '
|
||||
'l7rules.',
|
||||
operator_fault_string='This provider does not support updating '
|
||||
'l7rules.')
|
||||
|
||||
# Flavor
|
||||
def get_supported_flavor_metadata(self):
|
||||
@ -342,7 +443,11 @@ class ProviderDriver(object):
|
||||
:raises DriverError: An unexpected error occurred in the driver.
|
||||
:raises NotImplementedError: The driver does not support flavors.
|
||||
"""
|
||||
raise exceptions.NotImplementedError()
|
||||
raise exceptions.NotImplementedError(
|
||||
user_fault_string='This provider does not support getting the '
|
||||
'supported flavor metadata.',
|
||||
operator_fault_string='This provider does not support getting '
|
||||
'the supported flavor metadata.')
|
||||
|
||||
def validate_flavor(self, flavor_metadata):
|
||||
"""Validates if driver can support the flavor.
|
||||
@ -355,4 +460,8 @@ class ProviderDriver(object):
|
||||
:raises UnsupportedOptionError: if driver does not
|
||||
support one of the configuration options.
|
||||
"""
|
||||
raise exceptions.NotImplementedError()
|
||||
raise exceptions.NotImplementedError(
|
||||
user_fault_string='This provider does not support validating '
|
||||
'flavors.',
|
||||
operator_fault_string='This provider does not support validating '
|
||||
'the supported flavor metadata.')
|
||||
|
353
octavia/api/drivers/utils.py
Normal file
353
octavia/api/drivers/utils.py
Normal file
@ -0,0 +1,353 @@
|
||||
# Copyright 2018 Rackspace, US Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from stevedore import driver as stevedore_driver
|
||||
|
||||
from octavia.api.drivers import data_models as driver_dm
|
||||
from octavia.api.drivers import exceptions as driver_exceptions
|
||||
from octavia.common import data_models
|
||||
from octavia.common import exceptions
|
||||
from octavia.common.tls_utils import cert_parser
|
||||
from octavia.i18n import _
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
def call_provider(provider, driver_method, *args, **kwargs):
|
||||
"""Wrap calls to the provider driver to handle driver errors.
|
||||
|
||||
This allows Octavia to return user friendly errors when a provider driver
|
||||
has an issue.
|
||||
|
||||
:param driver_method: Method in the driver to call.
|
||||
:raises ProviderDriverError: Catch all driver error.
|
||||
:raises ProviderNotImplementedError: The driver doesn't support this
|
||||
action.
|
||||
:raises ProviderUnsupportedOptionError: The driver doesn't support a
|
||||
provided option.
|
||||
"""
|
||||
|
||||
try:
|
||||
return driver_method(*args, **kwargs)
|
||||
except driver_exceptions.DriverError as e:
|
||||
LOG.exception("Provider '%s' raised a driver error: %s",
|
||||
provider, e.operator_fault_string)
|
||||
raise exceptions.ProviderDriverError(prov=provider,
|
||||
user_msg=e.user_fault_string)
|
||||
except driver_exceptions.NotImplementedError as e:
|
||||
LOG.info("Provider '%s' raised a not implemented error: %s",
|
||||
provider, e.operator_fault_string)
|
||||
raise exceptions.ProviderNotImplementedError(
|
||||
prov=provider, user_msg=e.user_fault_string)
|
||||
except driver_exceptions.UnsupportedOptionError as e:
|
||||
LOG.info("Provider '%s' raised an unsupported option error: "
|
||||
"%s", provider, e.operator_fault_string)
|
||||
raise exceptions.ProviderUnsupportedOptionError(
|
||||
prov=provider, user_msg=e.user_fault_string)
|
||||
except Exception as e:
|
||||
LOG.exception("Provider '%s' raised an unkown error: %s",
|
||||
provider, e)
|
||||
raise exceptions.ProviderDriverError(prov=provider, user_msg=e)
|
||||
|
||||
|
||||
def _base_to_provider_dict(current_dict, include_project_id=False):
|
||||
new_dict = copy.deepcopy(current_dict)
|
||||
if 'provisioning_status' in new_dict:
|
||||
del new_dict['provisioning_status']
|
||||
if 'operating_status' in new_dict:
|
||||
del new_dict['operating_status']
|
||||
if 'provider' in new_dict:
|
||||
del new_dict['provider']
|
||||
if 'created_at' in new_dict:
|
||||
del new_dict['created_at']
|
||||
if 'updated_at' in new_dict:
|
||||
del new_dict['updated_at']
|
||||
if 'enabled' in new_dict:
|
||||
new_dict['admin_state_up'] = new_dict.pop('enabled')
|
||||
if 'project_id' in new_dict and not include_project_id:
|
||||
del new_dict['project_id']
|
||||
return new_dict
|
||||
|
||||
|
||||
# Note: The provider dict returned from this method will have provider
|
||||
# data model objects in it.
|
||||
def lb_dict_to_provider_dict(lb_dict, vip=None,
|
||||
db_pools=None, db_listeners=None):
|
||||
new_lb_dict = _base_to_provider_dict(lb_dict, include_project_id=True)
|
||||
new_lb_dict['loadbalancer_id'] = new_lb_dict.pop('id')
|
||||
if vip:
|
||||
new_lb_dict['vip_address'] = vip.ip_address
|
||||
new_lb_dict['vip_network_id'] = vip.network_id
|
||||
new_lb_dict['vip_port_id'] = vip.port_id
|
||||
new_lb_dict['vip_subnet_id'] = vip.subnet_id
|
||||
new_lb_dict['vip_qos_policy_id'] = vip.qos_policy_id
|
||||
|
||||
if db_pools:
|
||||
new_lb_dict['pools'] = db_pools_to_provider_pools(db_pools)
|
||||
if db_listeners:
|
||||
new_lb_dict['listeners'] = db_listeners_to_provider_listeners(
|
||||
db_listeners)
|
||||
return new_lb_dict
|
||||
|
||||
|
||||
def db_listeners_to_provider_listeners(db_listeners):
|
||||
provider_listeners = []
|
||||
for listener in db_listeners:
|
||||
new_listener_dict = listener_dict_to_provider_dict(
|
||||
listener.to_dict(recurse=True))
|
||||
if ('default_pool' in new_listener_dict and
|
||||
new_listener_dict['default_pool']):
|
||||
provider_pool = db_pool_to_provider_pool(listener.default_pool)
|
||||
new_listener_dict['default_pool_id'] = provider_pool.pool_id
|
||||
new_listener_dict['default_pool'] = provider_pool
|
||||
if 'l7policies' in new_listener_dict:
|
||||
new_listener_dict['l7policies'] = (
|
||||
db_l7policies_to_provider_l7policies(listener.l7policies))
|
||||
provider_listeners.append(
|
||||
driver_dm.Listener.from_dict(new_listener_dict))
|
||||
return provider_listeners
|
||||
|
||||
|
||||
def listener_dict_to_provider_dict(listener_dict):
|
||||
new_listener_dict = _base_to_provider_dict(listener_dict)
|
||||
new_listener_dict['listener_id'] = new_listener_dict.pop('id')
|
||||
if 'load_balancer_id' in new_listener_dict:
|
||||
new_listener_dict['loadbalancer_id'] = new_listener_dict.pop(
|
||||
'load_balancer_id')
|
||||
|
||||
# Pull the certs out of the certificate manager to pass to the provider
|
||||
if 'tls_certificate_id' in new_listener_dict:
|
||||
del new_listener_dict['tls_certificate_id']
|
||||
if 'sni_containers' in new_listener_dict:
|
||||
del new_listener_dict['sni_containers']
|
||||
listener_obj = data_models.Listener(**listener_dict)
|
||||
if listener_obj.tls_certificate_id or listener_obj.sni_containers:
|
||||
SNI_objs = []
|
||||
for sni in listener_obj.sni_containers:
|
||||
if isinstance(sni, data_models.SNI):
|
||||
SNI_objs.append(sni)
|
||||
elif isinstance(sni, dict):
|
||||
sni_obj = data_models.SNI(**sni)
|
||||
SNI_objs.append(sni_obj)
|
||||
else:
|
||||
raise Exception(_('Invalid SNI container on listener'))
|
||||
listener_obj.sni_containers = SNI_objs
|
||||
cert_manager = stevedore_driver.DriverManager(
|
||||
namespace='octavia.cert_manager',
|
||||
name=CONF.certificates.cert_manager,
|
||||
invoke_on_load=True,
|
||||
).driver
|
||||
cert_dict = cert_parser.load_certificates_data(cert_manager,
|
||||
listener_obj)
|
||||
new_listener_dict['default_tls_container'] = cert_dict['tls_cert']
|
||||
new_listener_dict['sni_containers'] = cert_dict['sni_certs']
|
||||
|
||||
# Remove the DB back references
|
||||
if 'load_balancer' in new_listener_dict:
|
||||
del new_listener_dict['load_balancer']
|
||||
if 'peer_port' in new_listener_dict:
|
||||
del new_listener_dict['peer_port']
|
||||
if 'pools' in new_listener_dict:
|
||||
del new_listener_dict['pools']
|
||||
if 'stats' in new_listener_dict:
|
||||
del new_listener_dict['stats']
|
||||
|
||||
if ('default_pool' in new_listener_dict and
|
||||
new_listener_dict['default_pool']):
|
||||
pool = new_listener_dict.pop('default_pool')
|
||||
new_listener_dict['default_pool'] = pool_dict_to_provider_dict(pool)
|
||||
provider_l7policies = []
|
||||
l7policies = new_listener_dict.pop('l7policies')
|
||||
for l7policy in l7policies:
|
||||
provider_l7policy = l7policy_dict_to_provider_dict(l7policy)
|
||||
provider_l7policies.append(provider_l7policy)
|
||||
new_listener_dict['l7policies'] = provider_l7policies
|
||||
return new_listener_dict
|
||||
|
||||
|
||||
def db_pools_to_provider_pools(db_pools):
|
||||
provider_pools = []
|
||||
for pool in db_pools:
|
||||
provider_pools.append(db_pool_to_provider_pool(pool))
|
||||
return provider_pools
|
||||
|
||||
|
||||
def db_pool_to_provider_pool(db_pool):
|
||||
new_pool_dict = pool_dict_to_provider_dict(db_pool.to_dict(recurse=True))
|
||||
# Replace the sub-dicts with objects
|
||||
if 'health_monitor' in new_pool_dict:
|
||||
del new_pool_dict['health_monitor']
|
||||
if db_pool.health_monitor:
|
||||
provider_healthmonitor = db_HM_to_provider_HM(db_pool.health_monitor)
|
||||
new_pool_dict['healthmonitor'] = provider_healthmonitor
|
||||
# Don't leave a 'members' None here, we want it to pass through to Unset
|
||||
if 'members' in new_pool_dict:
|
||||
del new_pool_dict['members']
|
||||
if db_pool.members:
|
||||
provider_members = db_members_to_provider_members(db_pool.members)
|
||||
new_pool_dict['members'] = provider_members
|
||||
return driver_dm.Pool.from_dict(new_pool_dict)
|
||||
|
||||
|
||||
def pool_dict_to_provider_dict(pool_dict):
|
||||
new_pool_dict = _base_to_provider_dict(pool_dict)
|
||||
new_pool_dict['pool_id'] = new_pool_dict.pop('id')
|
||||
# Remove the DB back references
|
||||
if ('session_persistence' in new_pool_dict and
|
||||
new_pool_dict['session_persistence']):
|
||||
if 'pool_id' in new_pool_dict['session_persistence']:
|
||||
del new_pool_dict['session_persistence']['pool_id']
|
||||
if 'pool' in new_pool_dict['session_persistence']:
|
||||
del new_pool_dict['session_persistence']['pool']
|
||||
if 'l7policies' in new_pool_dict:
|
||||
del new_pool_dict['l7policies']
|
||||
if 'listeners' in new_pool_dict:
|
||||
del new_pool_dict['listeners']
|
||||
if 'load_balancer' in new_pool_dict:
|
||||
del new_pool_dict['load_balancer']
|
||||
if 'load_balancer_id' in new_pool_dict:
|
||||
new_pool_dict['loadbalancer_id'] = new_pool_dict.pop(
|
||||
'load_balancer_id')
|
||||
if 'health_monitor' in new_pool_dict and new_pool_dict['health_monitor']:
|
||||
hm = new_pool_dict.pop('health_monitor')
|
||||
new_pool_dict['healthmonitor'] = hm_dict_to_provider_dict(hm)
|
||||
if 'members' in new_pool_dict and new_pool_dict['members']:
|
||||
members = new_pool_dict.pop('members')
|
||||
provider_members = []
|
||||
for member in members:
|
||||
provider_member = member_dict_to_provider_dict(member)
|
||||
provider_members.append(provider_member)
|
||||
new_pool_dict['members'] = provider_members
|
||||
return new_pool_dict
|
||||
|
||||
|
||||
def db_members_to_provider_members(db_members):
|
||||
provider_members = []
|
||||
for member in db_members:
|
||||
new_member_dict = member_dict_to_provider_dict(member.to_dict())
|
||||
provider_members.append(driver_dm.Member.from_dict(new_member_dict))
|
||||
return provider_members
|
||||
|
||||
|
||||
def member_dict_to_provider_dict(member_dict):
|
||||
new_member_dict = _base_to_provider_dict(member_dict)
|
||||
new_member_dict['member_id'] = new_member_dict.pop('id')
|
||||
if 'ip_address' in new_member_dict:
|
||||
new_member_dict['address'] = new_member_dict.pop('ip_address')
|
||||
# Remove the DB back references
|
||||
if 'pool' in new_member_dict:
|
||||
del new_member_dict['pool']
|
||||
return new_member_dict
|
||||
|
||||
|
||||
def db_HM_to_provider_HM(db_hm):
|
||||
new_HM_dict = hm_dict_to_provider_dict(db_hm.to_dict())
|
||||
return driver_dm.HealthMonitor.from_dict(new_HM_dict)
|
||||
|
||||
|
||||
def hm_dict_to_provider_dict(hm_dict):
|
||||
new_hm_dict = _base_to_provider_dict(hm_dict)
|
||||
new_hm_dict['healthmonitor_id'] = new_hm_dict.pop('id')
|
||||
if 'fall_threshold' in new_hm_dict:
|
||||
new_hm_dict['max_retries_down'] = new_hm_dict.pop('fall_threshold')
|
||||
if 'rise_threshold' in new_hm_dict:
|
||||
new_hm_dict['max_retries'] = new_hm_dict.pop('rise_threshold')
|
||||
# Remove the DB back references
|
||||
if 'pool' in new_hm_dict:
|
||||
del new_hm_dict['pool']
|
||||
return new_hm_dict
|
||||
|
||||
|
||||
def db_l7policies_to_provider_l7policies(db_l7policies):
|
||||
provider_l7policies = []
|
||||
for l7policy in db_l7policies:
|
||||
new_l7policy_dict = l7policy_dict_to_provider_dict(
|
||||
l7policy.to_dict(recurse=True))
|
||||
if 'l7rules' in new_l7policy_dict:
|
||||
del new_l7policy_dict['l7rules']
|
||||
new_l7rules = db_l7rules_to_provider_l7rules(l7policy.l7rules)
|
||||
new_l7policy_dict['rules'] = new_l7rules
|
||||
provider_l7policies.append(
|
||||
driver_dm.L7Policy.from_dict(new_l7policy_dict))
|
||||
return provider_l7policies
|
||||
|
||||
|
||||
def l7policy_dict_to_provider_dict(l7policy_dict):
|
||||
new_l7policy_dict = _base_to_provider_dict(l7policy_dict)
|
||||
new_l7policy_dict['l7policy_id'] = new_l7policy_dict.pop('id')
|
||||
# Remove the DB back references
|
||||
if 'listener' in new_l7policy_dict:
|
||||
del new_l7policy_dict['listener']
|
||||
if 'redirect_pool' in new_l7policy_dict:
|
||||
del new_l7policy_dict['redirect_pool']
|
||||
if 'l7rules' in new_l7policy_dict and new_l7policy_dict['l7rules']:
|
||||
rules = new_l7policy_dict.pop('l7rules')
|
||||
provider_rules = []
|
||||
for rule in rules:
|
||||
provider_rule = l7rule_dict_to_provider_dict(rule)
|
||||
provider_rules.append(provider_rule)
|
||||
new_l7policy_dict['rules'] = provider_rules
|
||||
return new_l7policy_dict
|
||||
|
||||
|
||||
def db_l7rules_to_provider_l7rules(db_l7rules):
|
||||
provider_l7rules = []
|
||||
for l7rule in db_l7rules:
|
||||
new_l7rule_dict = l7rule_dict_to_provider_dict(l7rule.to_dict())
|
||||
provider_l7rules.append(driver_dm.L7Rule.from_dict(new_l7rule_dict))
|
||||
return provider_l7rules
|
||||
|
||||
|
||||
def l7rule_dict_to_provider_dict(l7rule_dict):
|
||||
new_l7rule_dict = _base_to_provider_dict(l7rule_dict)
|
||||
new_l7rule_dict['l7rule_id'] = new_l7rule_dict.pop('id')
|
||||
# Remove the DB back references
|
||||
if 'l7policy' in new_l7rule_dict:
|
||||
del new_l7rule_dict['l7policy']
|
||||
return new_l7rule_dict
|
||||
|
||||
|
||||
def vip_dict_to_provider_dict(vip_dict):
|
||||
new_vip_dict = {}
|
||||
if 'ip_address' in vip_dict:
|
||||
new_vip_dict['vip_address'] = vip_dict['ip_address']
|
||||
if 'network_id' in vip_dict:
|
||||
new_vip_dict['vip_network_id'] = vip_dict['network_id']
|
||||
if 'port_id' in vip_dict:
|
||||
new_vip_dict['vip_port_id'] = vip_dict['port_id']
|
||||
if 'subnet_id' in vip_dict:
|
||||
new_vip_dict['vip_subnet_id'] = vip_dict['subnet_id']
|
||||
if 'qos_policy_id' in vip_dict:
|
||||
new_vip_dict['vip_qos_policy_id'] = vip_dict['qos_policy_id']
|
||||
return new_vip_dict
|
||||
|
||||
|
||||
def provider_vip_dict_to_vip_obj(vip_dictionary):
|
||||
vip_obj = data_models.Vip()
|
||||
if 'vip_address' in vip_dictionary:
|
||||
vip_obj.ip_address = vip_dictionary['vip_address']
|
||||
if 'vip_network_id' in vip_dictionary:
|
||||
vip_obj.network_id = vip_dictionary['vip_network_id']
|
||||
if 'vip_port_id' in vip_dictionary:
|
||||
vip_obj.port_id = vip_dictionary['vip_port_id']
|
||||
if 'vip_subnet_id' in vip_dictionary:
|
||||
vip_obj.subnet_id = vip_dictionary['vip_subnet_id']
|
||||
if 'vip_qos_policy_id' in vip_dictionary:
|
||||
vip_obj.qos_policy_id = vip_dictionary['vip_qos_policy_id']
|
||||
return vip_obj
|
@ -197,6 +197,7 @@ class L7PolicyController(base.BaseController):
|
||||
l7rule.L7RuleController(db_policy.id)._graph_create(
|
||||
lock_session, r))
|
||||
|
||||
db_policy.l7rules = new_rules
|
||||
return db_policy
|
||||
|
||||
@wsme_pecan.wsexpose(l7policy_types.L7PolicyRootResponse,
|
||||
|
@ -272,7 +272,8 @@ class ListenersController(base.BaseController):
|
||||
l7p['redirect_pool_id'] = pool_id
|
||||
new_l7ps.append(l7policy.L7PolicyController()._graph_create(
|
||||
lock_session, l7p))
|
||||
return db_listener, new_l7ps
|
||||
db_listener.l7policies = new_l7ps
|
||||
return db_listener
|
||||
|
||||
@wsme_pecan.wsexpose(listener_types.ListenerRootResponse, wtypes.text,
|
||||
body=listener_types.ListenerRootPUT, status_code=200)
|
||||
|
@ -22,6 +22,9 @@ import pecan
|
||||
from wsme import types as wtypes
|
||||
from wsmeext import pecan as wsme_pecan
|
||||
|
||||
from octavia.api.drivers import data_models as driver_dm
|
||||
from octavia.api.drivers import driver_factory
|
||||
from octavia.api.drivers import utils as driver_utils
|
||||
from octavia.api.v2.controllers import base
|
||||
from octavia.api.v2.controllers import listener
|
||||
from octavia.api.v2.controllers import pool
|
||||
@ -249,6 +252,9 @@ class LoadBalancersController(base.BaseController):
|
||||
|
||||
self._validate_vip_request_object(load_balancer)
|
||||
|
||||
# Load the driver early as it also provides validation
|
||||
driver = driver_factory.get_driver(load_balancer.provider)
|
||||
|
||||
lock_session = db_api.get_session(autocommit=False)
|
||||
try:
|
||||
if self.repositories.check_quota_met(
|
||||
@ -265,35 +271,58 @@ class LoadBalancersController(base.BaseController):
|
||||
))
|
||||
vip_dict = lb_dict.pop('vip', {})
|
||||
|
||||
# Make sure we store the right provider in the DB
|
||||
lb_dict['provider'] = driver.name
|
||||
|
||||
# NoneType can be weird here, have to force type a second time
|
||||
listeners = lb_dict.pop('listeners', []) or []
|
||||
pools = lb_dict.pop('pools', []) or []
|
||||
|
||||
# TODO(johnsom) Remove provider and flavor from the lb_dict
|
||||
# as they have not been implemented beyond the API yet.
|
||||
# Remove these lines as they are implemented.
|
||||
if 'provider' in lb_dict:
|
||||
del lb_dict['provider']
|
||||
if 'flavor_id' in lb_dict:
|
||||
del lb_dict['flavor_id']
|
||||
# TODO(johnsom) Remove flavor from the lb_dict
|
||||
# as it has not been implemented beyond the API yet.
|
||||
# Remove this line when it is implemented.
|
||||
lb_dict.pop('flavor', None)
|
||||
|
||||
db_lb = self.repositories.create_load_balancer_and_vip(
|
||||
lock_session, lb_dict, vip_dict)
|
||||
|
||||
# create vip port if not exist
|
||||
vip = self._create_vip_port_if_not_exist(db_lb)
|
||||
# See if the provider driver wants to create the VIP port
|
||||
try:
|
||||
provider_vip_dict = driver_utils.vip_dict_to_provider_dict(
|
||||
vip_dict)
|
||||
vip_dict = driver_utils.call_provider(
|
||||
driver.name, driver.create_vip_port, db_lb.id,
|
||||
db_lb.project_id, provider_vip_dict)
|
||||
vip = driver_utils.provider_vip_dict_to_vip_obj(vip_dict)
|
||||
except exceptions.ProviderNotImplementedError:
|
||||
# create vip port if not exist, driver didn't want to create
|
||||
# the VIP port
|
||||
vip = self._create_vip_port_if_not_exist(db_lb)
|
||||
LOG.info('Created VIP port %s for provider %s.',
|
||||
vip.port_id, driver.name)
|
||||
|
||||
self.repositories.vip.update(
|
||||
lock_session, db_lb.id,
|
||||
ip_address=vip.ip_address,
|
||||
port_id=vip.port_id,
|
||||
network_id=vip.network_id,
|
||||
subnet_id=vip.subnet_id
|
||||
)
|
||||
subnet_id=vip.subnet_id)
|
||||
|
||||
if listeners or pools:
|
||||
db_pools, db_lists = self._graph_create(
|
||||
context.session, lock_session, db_lb, listeners, pools)
|
||||
|
||||
# Prepare the data for the driver data model
|
||||
driver_lb_dict = driver_utils.lb_dict_to_provider_dict(
|
||||
lb_dict, vip, db_pools, db_lists)
|
||||
|
||||
# Dispatch to the driver
|
||||
LOG.info("Sending create Load Balancer %s to provider %s",
|
||||
db_lb.id, driver.name)
|
||||
driver_utils.call_provider(
|
||||
driver.name, driver.loadbalancer_create,
|
||||
driver_dm.LoadBalancer.from_dict(driver_lb_dict))
|
||||
|
||||
lock_session.commit()
|
||||
except odb_exceptions.DBDuplicateEntry:
|
||||
lock_session.rollback()
|
||||
@ -302,17 +331,6 @@ class LoadBalancersController(base.BaseController):
|
||||
with excutils.save_and_reraise_exception():
|
||||
lock_session.rollback()
|
||||
|
||||
# Handler will be responsible for sending to controller
|
||||
try:
|
||||
LOG.info("Sending created Load Balancer %s to the handler",
|
||||
db_lb.id)
|
||||
self.handler.create(db_lb)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception(reraise=False):
|
||||
self.repositories.load_balancer.update(
|
||||
context.session, db_lb.id,
|
||||
provisioning_status=constants.ERROR)
|
||||
|
||||
db_lb = self._get_db_lb(context.session, db_lb.id)
|
||||
|
||||
result = self._convert_db_to_type(
|
||||
@ -394,9 +412,8 @@ class LoadBalancersController(base.BaseController):
|
||||
attr=attr))
|
||||
p['load_balancer_id'] = db_lb.id
|
||||
p['project_id'] = db_lb.project_id
|
||||
new_pool, new_hm, new_members = (
|
||||
pool.PoolsController()._graph_create(
|
||||
session, lock_session, p))
|
||||
new_pool = (pool.PoolsController()._graph_create(
|
||||
session, lock_session, p))
|
||||
new_pools.append(new_pool)
|
||||
pool_name_ids[new_pool.name] = new_pool.id
|
||||
|
||||
@ -442,14 +459,39 @@ class LoadBalancersController(base.BaseController):
|
||||
wtypes.UnsetType) and
|
||||
db_lb.vip.qos_policy_id != load_balancer.vip_qos_policy_id):
|
||||
validate.qos_policy_exists(load_balancer.vip_qos_policy_id)
|
||||
self._test_lb_status(context.session, id)
|
||||
try:
|
||||
LOG.info("Sending updated Load Balancer %s to the handler", id)
|
||||
self.handler.update(db_lb, load_balancer)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception(reraise=False):
|
||||
self.repositories.load_balancer.update(
|
||||
context.session, id, provisioning_status=constants.ERROR)
|
||||
|
||||
# Load the driver early as it also provides validation
|
||||
driver = driver_factory.get_driver(db_lb.provider)
|
||||
|
||||
with db_api.get_lock_session() as lock_session:
|
||||
self._test_lb_status(lock_session, id)
|
||||
|
||||
# Prepare the data for the driver data model
|
||||
lb_dict = load_balancer.to_dict(render_unsets=False)
|
||||
lb_dict['id'] = id
|
||||
vip_dict = lb_dict.pop('vip', {})
|
||||
lb_dict = driver_utils.lb_dict_to_provider_dict(lb_dict)
|
||||
if 'qos_policy_id' in vip_dict:
|
||||
lb_dict['vip_qos_policy_id'] = vip_dict['qos_policy_id']
|
||||
|
||||
# Dispatch to the driver
|
||||
LOG.info("Sending update Load Balancer %s to provider "
|
||||
"%s", id, driver.name)
|
||||
driver_utils.call_provider(
|
||||
driver.name, driver.loadbalancer_update,
|
||||
driver_dm.LoadBalancer.from_dict(lb_dict))
|
||||
|
||||
db_lb_dict = load_balancer.to_dict(render_unsets=False)
|
||||
if 'vip' in db_lb_dict:
|
||||
db_vip_dict = db_lb_dict.pop('vip')
|
||||
self.repositories.vip.update(lock_session, id, **db_vip_dict)
|
||||
if db_lb_dict:
|
||||
self.repositories.load_balancer.update(lock_session, id,
|
||||
**db_lb_dict)
|
||||
|
||||
# Force SQL alchemy to query the DB, otherwise we get inconsistent
|
||||
# results
|
||||
context.session.expire_all()
|
||||
db_lb = self._get_db_lb(context.session, id)
|
||||
result = self._convert_db_to_type(db_lb, lb_types.LoadBalancerResponse)
|
||||
return lb_types.LoadBalancerRootResponse(loadbalancer=result)
|
||||
@ -464,6 +506,9 @@ class LoadBalancersController(base.BaseController):
|
||||
self._auth_validate_action(context, db_lb.project_id,
|
||||
constants.RBAC_DELETE)
|
||||
|
||||
# Load the driver early as it also provides validation
|
||||
driver = driver_factory.get_driver(db_lb.provider)
|
||||
|
||||
with db_api.get_lock_session() as lock_session:
|
||||
if (db_lb.listeners or db_lb.pools) and not cascade:
|
||||
msg = _("Cannot delete Load Balancer %s - "
|
||||
@ -473,14 +518,10 @@ class LoadBalancersController(base.BaseController):
|
||||
self._test_lb_status(lock_session, id,
|
||||
lb_status=constants.PENDING_DELETE)
|
||||
|
||||
try:
|
||||
LOG.info("Sending deleted Load Balancer %s to the handler", id)
|
||||
self.handler.delete(db_lb, cascade)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception(reraise=False):
|
||||
self.repositories.load_balancer.update(
|
||||
context.session, id,
|
||||
provisioning_status=constants.ERROR)
|
||||
LOG.info("Sending delete Load Balancer %s to provider %s",
|
||||
id, driver.name)
|
||||
driver_utils.call_provider(driver.name, driver.loadbalancer_delete,
|
||||
id, cascade)
|
||||
|
||||
@pecan.expose()
|
||||
def _lookup(self, id, *remainder):
|
||||
@ -579,13 +620,12 @@ class FailoverController(LoadBalancersController):
|
||||
self._auth_validate_action(context, db_lb.project_id,
|
||||
constants.RBAC_PUT_FAILOVER)
|
||||
|
||||
self._test_lb_status(context.session, self.lb_id)
|
||||
try:
|
||||
LOG.info("Sending failover request for lb %s to the handler",
|
||||
self.lb_id)
|
||||
self.handler.failover(db_lb)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception(reraise=False):
|
||||
self.repositories.load_balancer.update(
|
||||
context.session, self.lb_id,
|
||||
provisioning_status=constants.ERROR)
|
||||
# Load the driver early as it also provides validation
|
||||
driver = driver_factory.get_driver(db_lb.provider)
|
||||
|
||||
with db_api.get_lock_session() as lock_session:
|
||||
self._test_lb_status(lock_session, self.lb_id)
|
||||
LOG.info("Sending failover request for load balancer %s to the "
|
||||
"provider %s", self.lb_id, driver.name)
|
||||
driver_utils.call_provider(
|
||||
driver.name, driver.loadbalancer_failover, self.lb_id)
|
||||
|
@ -231,6 +231,7 @@ class PoolsController(base.BaseController):
|
||||
hm['project_id'] = db_pool.project_id
|
||||
new_hm = health_monitor.HealthMonitorController()._graph_create(
|
||||
lock_session, hm)
|
||||
db_pool.health_monitor = new_hm
|
||||
|
||||
# Now check quotas for members
|
||||
if members and self.repositories.check_quota_met(
|
||||
@ -245,7 +246,8 @@ class PoolsController(base.BaseController):
|
||||
new_members.append(
|
||||
member.MembersController(db_pool.id)._graph_create(
|
||||
lock_session, m))
|
||||
return db_pool, new_hm, new_members
|
||||
db_pool.members = new_members
|
||||
return db_pool
|
||||
|
||||
@wsme_pecan.wsexpose(pool_types.PoolRootResponse, wtypes.text,
|
||||
body=pool_types.PoolRootPut, status_code=200)
|
||||
|
@ -120,9 +120,7 @@ class LoadBalancerPOST(BaseLoadBalancerType):
|
||||
project_id = wtypes.wsattr(wtypes.StringType(max_length=36))
|
||||
listeners = wtypes.wsattr([listener.ListenerSingleCreate], default=[])
|
||||
pools = wtypes.wsattr([pool.PoolSingleCreate], default=[])
|
||||
# TODO(johnsom) This should be dynamic based on the loaded providers
|
||||
# once providers are implemented.
|
||||
provider = wtypes.wsattr(wtypes.Enum(str, *constants.SUPPORTED_PROVIDERS))
|
||||
provider = wtypes.wsattr(wtypes.StringType(max_length=64))
|
||||
# TODO(johnsom) This should be dynamic based on the loaded flavors
|
||||
# once flavors are implemented.
|
||||
flavor_id = wtypes.wsattr(wtypes.Enum(str, *constants.SUPPORTED_FLAVORS))
|
||||
|
@ -102,6 +102,12 @@ api_opts = [
|
||||
help=_("Allow users to create TLS Terminated listeners?")),
|
||||
cfg.BoolOpt('allow_ping_health_monitors', default=True,
|
||||
help=_("Allow users to create PING type Health Monitors?")),
|
||||
cfg.ListOpt('enabled_provider_drivers',
|
||||
help=_('List of enabled provider drivers. Must match the '
|
||||
'driver name in the octavia.api.drivers entrypoint.'),
|
||||
default=['amphora', 'octavia']),
|
||||
cfg.StrOpt('default_provider_driver', default='amphora',
|
||||
help=_('Default provider driver.')),
|
||||
]
|
||||
|
||||
# Options only used by the amphora agent
|
||||
|
@ -193,8 +193,10 @@ DELTAS = 'deltas'
|
||||
HEALTH_MON = 'health_mon'
|
||||
LISTENER = 'listener'
|
||||
LISTENERS = 'listeners'
|
||||
LISTENER_ID = 'listener_id'
|
||||
LOADBALANCER = 'loadbalancer'
|
||||
LOADBALANCER_ID = 'loadbalancer_id'
|
||||
LOAD_BALANCER_ID = 'load_balancer_id'
|
||||
SERVER_GROUP_ID = 'server_group_id'
|
||||
ANTI_AFFINITY = 'anti-affinity'
|
||||
SOFT_ANTI_AFFINITY = 'soft-anti-affinity'
|
||||
@ -220,6 +222,10 @@ ADDED_PORTS = 'added_ports'
|
||||
PORTS = 'ports'
|
||||
MEMBER_PORTS = 'member_ports'
|
||||
LOADBALANCER_TOPOLOGY = 'topology'
|
||||
HEALTH_MONITOR_ID = 'health_monitor_id'
|
||||
L7POLICY_ID = 'l7policy_id'
|
||||
L7RULE_ID = 'l7rule_id'
|
||||
LOAD_BALANCER_UPDATES = 'load_balancer_updates'
|
||||
|
||||
CERT_ROTATE_AMPHORA_FLOW = 'octavia-cert-rotate-amphora-flow'
|
||||
CREATE_AMPHORA_FLOW = 'octavia-create-amphora-flow'
|
||||
@ -489,7 +495,6 @@ RBAC_GET_STATUS = 'get_status'
|
||||
# PROVIDERS
|
||||
# TODO(johnsom) When providers are implemented, this should be removed.
|
||||
OCTAVIA = 'octavia'
|
||||
SUPPORTED_PROVIDERS = OCTAVIA,
|
||||
|
||||
# FLAVORS
|
||||
# TODO(johnsom) When flavors are implemented, this should be removed.
|
||||
|
@ -419,7 +419,7 @@ class LoadBalancer(BaseDataModel):
|
||||
provisioning_status=None, operating_status=None, enabled=None,
|
||||
topology=None, vip=None, listeners=None, amphorae=None,
|
||||
pools=None, vrrp_group=None, server_group_id=None,
|
||||
created_at=None, updated_at=None):
|
||||
created_at=None, updated_at=None, provider=None):
|
||||
|
||||
self.id = id
|
||||
self.project_id = project_id
|
||||
@ -437,6 +437,7 @@ class LoadBalancer(BaseDataModel):
|
||||
self.server_group_id = server_group_id
|
||||
self.created_at = created_at
|
||||
self.updated_at = updated_at
|
||||
self.provider = provider
|
||||
|
||||
def update(self, update_dict):
|
||||
for key, value in update_dict.items():
|
||||
|
@ -340,3 +340,30 @@ class InvalidLimit(APIException):
|
||||
|
||||
class MissingVIPSecurityGroup(OctaviaException):
|
||||
message = _('VIP security group is missing for load balancer: %(lb_id)s')
|
||||
|
||||
|
||||
class ProviderNotEnabled(APIException):
|
||||
msg = _("Provider '%(prov)s' is not enabled.")
|
||||
code = 400
|
||||
|
||||
|
||||
class ProviderNotFound(APIException):
|
||||
msg = _("Provider '%(prov)s' was not found.")
|
||||
code = 501
|
||||
|
||||
|
||||
class ProviderDriverError(APIException):
|
||||
msg = _("Provider '%(prov)s' reports error: %(user_msg)s")
|
||||
code = 500
|
||||
|
||||
|
||||
class ProviderNotImplementedError(APIException):
|
||||
msg = _("Provider '%(prov)s' does not support a requested action: "
|
||||
"%(user_msg)s")
|
||||
code = 501
|
||||
|
||||
|
||||
class ProviderUnsupportedOptionError(APIException):
|
||||
msg = _("Provider '%(prov)s' does not support a requested option: "
|
||||
"%(user_msg)s")
|
||||
code = 501
|
||||
|
@ -0,0 +1,37 @@
|
||||
# Copyright 2018 Rackspace, US Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Add provider column
|
||||
|
||||
Revision ID: 0f242cf02c74
|
||||
Revises: 0fd2c131923f
|
||||
Create Date: 2018-04-23 16:22:26.971048
|
||||
|
||||
"""
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '0f242cf02c74'
|
||||
down_revision = '0fd2c131923f'
|
||||
|
||||
|
||||
def upgrade():
|
||||
op.add_column(
|
||||
u'load_balancer',
|
||||
sa.Column(u'provider', sa.String(64), nullable=True)
|
||||
)
|
||||
op.execute("UPDATE load_balancer set provider='amphora' where provider "
|
||||
"is null")
|
@ -343,6 +343,7 @@ class LoadBalancer(base_models.BASE, base_models.IdMixin,
|
||||
backref=orm.backref("load_balancer",
|
||||
uselist=False))
|
||||
server_group_id = sa.Column(sa.String(36), nullable=True)
|
||||
provider = sa.Column(sa.String(64), nullable=True)
|
||||
|
||||
|
||||
class VRRPGroup(base_models.BASE):
|
||||
|
@ -79,6 +79,12 @@ class BaseAPITest(base_db_test.OctaviaDBTestBase):
|
||||
self.conf.config(group="controller_worker",
|
||||
network_driver='network_noop_driver')
|
||||
self.conf.config(group='api_settings', auth_strategy=constants.NOAUTH)
|
||||
self.conf.config(group='api_settings',
|
||||
default_provider_driver='noop_driver')
|
||||
# We still need to test with the "octavia" alias
|
||||
self.conf.config(group='api_settings',
|
||||
enabled_provider_drivers='amphora, noop_driver, '
|
||||
'octavia')
|
||||
self.lb_repo = repositories.LoadBalancerRepository()
|
||||
self.listener_repo = repositories.ListenerRepository()
|
||||
self.listener_stats_repo = repositories.ListenerStatisticsRepository()
|
||||
|
@ -128,10 +128,11 @@ class TestAmphora(base.BaseAPITest):
|
||||
self.put(self.AMPHORA_FAILOVER_PATH.format(
|
||||
amphora_id=new_amp.id), body={}, status=404)
|
||||
|
||||
def test_failover_bad_amp_id(self):
|
||||
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||
def test_failover_bad_amp_id(self, mock_cast):
|
||||
self.put(self.AMPHORA_FAILOVER_PATH.format(
|
||||
amphora_id='asdf'), body={}, status=404)
|
||||
self.assertFalse(self.handler_mock().amphora.failover.called)
|
||||
self.assertFalse(mock_cast.called)
|
||||
|
||||
def test_get_authorized(self):
|
||||
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
|
||||
|
@ -20,9 +20,11 @@ from oslo_config import cfg
|
||||
from oslo_config import fixture as oslo_fixture
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from octavia.api.drivers import exceptions as provider_exceptions
|
||||
from octavia.common import constants
|
||||
import octavia.common.context
|
||||
from octavia.common import data_models
|
||||
from octavia.common import exceptions
|
||||
from octavia.network import base as network_base
|
||||
from octavia.network import data_models as network_models
|
||||
from octavia.tests.functional.api.v2 import base
|
||||
@ -314,13 +316,19 @@ class TestLoadBalancer(base.BaseAPITest):
|
||||
'vip_port_id': port.id, 'admin_state_up': False,
|
||||
'project_id': self.project_id}
|
||||
body = self._build_body(lb_json)
|
||||
# This test needs the provider driver to not supply the VIP port
|
||||
# so mocking noop to not supply a VIP port.
|
||||
with mock.patch(
|
||||
"octavia.network.drivers.noop_driver.driver.NoopManager"
|
||||
".get_network") as mock_get_network, mock.patch(
|
||||
"octavia.network.drivers.noop_driver.driver.NoopManager"
|
||||
".get_port") as mock_get_port:
|
||||
".get_port") as mock_get_port, mock.patch(
|
||||
"octavia.api.drivers.noop_driver.driver.NoopManager."
|
||||
"create_vip_port") as mock_provider:
|
||||
mock_get_network.return_value = network
|
||||
mock_get_port.return_value = port
|
||||
mock_provider.side_effect = (provider_exceptions.
|
||||
NotImplementedError())
|
||||
response = self.post(self.LBS_PATH, body)
|
||||
api_lb = response.json.get(self.root_tag)
|
||||
self._assert_request_matches_response(lb_json, api_lb)
|
||||
@ -466,17 +474,23 @@ class TestLoadBalancer(base.BaseAPITest):
|
||||
'vip_network_id': network.id, 'vip_port_id': port.id,
|
||||
'admin_state_up': False, 'project_id': self.project_id}
|
||||
body = self._build_body(lb_json)
|
||||
# This test needs the provider driver to not supply the VIP port
|
||||
# so mocking noop to not supply a VIP port.
|
||||
with mock.patch(
|
||||
"octavia.network.drivers.noop_driver.driver.NoopManager"
|
||||
".get_network") as mock_get_network, mock.patch(
|
||||
"octavia.network.drivers.noop_driver.driver.NoopManager"
|
||||
".get_port") as mock_get_port, mock.patch(
|
||||
"octavia.network.drivers.noop_driver.driver.NoopManager"
|
||||
".allocate_vip") as mock_allocate_vip:
|
||||
".allocate_vip") as mock_allocate_vip, mock.patch(
|
||||
"octavia.api.drivers.noop_driver.driver.NoopManager."
|
||||
"create_vip_port") as mock_provider:
|
||||
mock_get_network.return_value = network
|
||||
mock_get_port.return_value = port
|
||||
mock_allocate_vip.side_effect = TestNeutronException(
|
||||
"octavia_msg", "neutron_msg", 409)
|
||||
mock_provider.side_effect = (provider_exceptions.
|
||||
NotImplementedError())
|
||||
response = self.post(self.LBS_PATH, body, status=409)
|
||||
# Make sure the faultstring contains the neutron error and not
|
||||
# the octavia error message
|
||||
@ -786,7 +800,10 @@ class TestLoadBalancer(base.BaseAPITest):
|
||||
}
|
||||
lb_json.update(optionals)
|
||||
body = self._build_body(lb_json)
|
||||
response = self.post(self.LBS_PATH, body)
|
||||
with mock.patch('oslo_messaging.get_rpc_transport'):
|
||||
with mock.patch('oslo_messaging.Target'):
|
||||
with mock.patch('oslo_messaging.RPCClient'):
|
||||
response = self.post(self.LBS_PATH, body)
|
||||
api_lb = response.json.get(self.root_tag)
|
||||
self._assert_request_matches_response(lb_json, api_lb)
|
||||
return api_lb
|
||||
@ -800,8 +817,7 @@ class TestLoadBalancer(base.BaseAPITest):
|
||||
lb_json.update(optionals)
|
||||
body = self._build_body(lb_json)
|
||||
response = self.post(self.LBS_PATH, body, status=400)
|
||||
self.assertIn("Invalid input for field/attribute provider. Value: "
|
||||
"'BOGUS'. Value should be one of:",
|
||||
self.assertIn("Provider 'BOGUS' is not enabled.",
|
||||
response.json.get('faultstring'))
|
||||
|
||||
def test_create_flavor_bogus(self, **optionals):
|
||||
@ -1259,7 +1275,7 @@ class TestLoadBalancer(base.BaseAPITest):
|
||||
lb_json)
|
||||
api_lb = response.json.get(self.root_tag)
|
||||
self.assertIsNotNone(api_lb.get('vip_subnet_id'))
|
||||
self.assertEqual('lb1', api_lb.get('name'))
|
||||
self.assertEqual('lb2', api_lb.get('name'))
|
||||
self.assertEqual(project_id, api_lb.get('project_id'))
|
||||
self.assertEqual('desc1', api_lb.get('description'))
|
||||
self.assertFalse(api_lb.get('admin_state_up'))
|
||||
@ -1360,7 +1376,7 @@ class TestLoadBalancer(base.BaseAPITest):
|
||||
api_lb = response.json.get(self.root_tag)
|
||||
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
|
||||
self.assertIsNotNone(api_lb.get('vip_subnet_id'))
|
||||
self.assertEqual('lb1', api_lb.get('name'))
|
||||
self.assertEqual('lb2', api_lb.get('name'))
|
||||
self.assertEqual(project_id, api_lb.get('project_id'))
|
||||
self.assertEqual('desc1', api_lb.get('description'))
|
||||
self.assertFalse(api_lb.get('admin_state_up'))
|
||||
@ -1820,28 +1836,33 @@ class TestLoadBalancer(base.BaseAPITest):
|
||||
lb_id=lb_dict.get('id')) + "/failover")
|
||||
self.app.put(path, status=404)
|
||||
|
||||
def test_create_with_bad_handler(self):
|
||||
self.handler_mock().load_balancer.create.side_effect = Exception()
|
||||
api_lb = self.create_load_balancer(
|
||||
uuidutils.generate_uuid()).get(self.root_tag)
|
||||
self.assert_correct_status(
|
||||
lb_id=api_lb.get('id'),
|
||||
lb_prov_status=constants.ERROR,
|
||||
lb_op_status=constants.OFFLINE)
|
||||
@mock.patch('octavia.api.drivers.utils.call_provider')
|
||||
def test_create_with_bad_provider(self, mock_provider):
|
||||
mock_provider.side_effect = exceptions.ProviderDriverError(
|
||||
prov='bad_driver', user_msg='broken')
|
||||
lb_json = {'name': 'test-lb',
|
||||
'vip_subnet_id': uuidutils.generate_uuid(),
|
||||
'project_id': self.project_id}
|
||||
response = self.post(self.LBS_PATH, self._build_body(lb_json),
|
||||
status=500)
|
||||
self.assertIn('Provider \'bad_driver\' reports error: broken',
|
||||
response.json.get('faultstring'))
|
||||
|
||||
def test_update_with_bad_handler(self):
|
||||
@mock.patch('octavia.api.drivers.utils.call_provider')
|
||||
def test_update_with_bad_provider(self, mock_provider):
|
||||
api_lb = self.create_load_balancer(
|
||||
uuidutils.generate_uuid()).get(self.root_tag)
|
||||
self.set_lb_status(lb_id=api_lb.get('id'))
|
||||
new_listener = {'name': 'new_name'}
|
||||
self.handler_mock().load_balancer.update.side_effect = Exception()
|
||||
self.put(self.LB_PATH.format(lb_id=api_lb.get('id')),
|
||||
self._build_body(new_listener))
|
||||
self.assert_correct_status(
|
||||
lb_id=api_lb.get('id'),
|
||||
lb_prov_status=constants.ERROR)
|
||||
mock_provider.side_effect = exceptions.ProviderDriverError(
|
||||
prov='bad_driver', user_msg='broken')
|
||||
response = self.put(self.LB_PATH.format(lb_id=api_lb.get('id')),
|
||||
self._build_body(new_listener), status=500)
|
||||
self.assertIn('Provider \'bad_driver\' reports error: broken',
|
||||
response.json.get('faultstring'))
|
||||
|
||||
def test_delete_with_bad_handler(self):
|
||||
@mock.patch('octavia.api.drivers.utils.call_provider')
|
||||
def test_delete_with_bad_provider(self, mock_provider):
|
||||
api_lb = self.create_load_balancer(
|
||||
uuidutils.generate_uuid()).get(self.root_tag)
|
||||
self.set_lb_status(lb_id=api_lb.get('id'))
|
||||
@ -1854,11 +1875,95 @@ class TestLoadBalancer(base.BaseAPITest):
|
||||
self.assertIsNone(api_lb.pop('updated_at'))
|
||||
self.assertIsNotNone(response.pop('updated_at'))
|
||||
self.assertEqual(api_lb, response)
|
||||
self.handler_mock().load_balancer.delete.side_effect = Exception()
|
||||
self.delete(self.LB_PATH.format(lb_id=api_lb.get('id')))
|
||||
self.assert_correct_status(
|
||||
lb_id=api_lb.get('id'),
|
||||
lb_prov_status=constants.ERROR)
|
||||
mock_provider.side_effect = exceptions.ProviderDriverError(
|
||||
prov='bad_driver', user_msg='broken')
|
||||
self.delete(self.LB_PATH.format(lb_id=api_lb.get('id')), status=500)
|
||||
|
||||
@mock.patch('octavia.api.drivers.utils.call_provider')
|
||||
def test_create_with_provider_not_implemented(self, mock_provider):
|
||||
mock_provider.side_effect = exceptions.ProviderNotImplementedError(
|
||||
prov='bad_driver', user_msg='broken')
|
||||
lb_json = {'name': 'test-lb',
|
||||
'vip_subnet_id': uuidutils.generate_uuid(),
|
||||
'project_id': self.project_id}
|
||||
response = self.post(self.LBS_PATH, self._build_body(lb_json),
|
||||
status=501)
|
||||
self.assertIn('Provider \'bad_driver\' does not support a requested '
|
||||
'action: broken', response.json.get('faultstring'))
|
||||
|
||||
@mock.patch('octavia.api.drivers.utils.call_provider')
|
||||
def test_update_with_provider_not_implemented(self, mock_provider):
|
||||
api_lb = self.create_load_balancer(
|
||||
uuidutils.generate_uuid()).get(self.root_tag)
|
||||
self.set_lb_status(lb_id=api_lb.get('id'))
|
||||
new_listener = {'name': 'new_name'}
|
||||
mock_provider.side_effect = exceptions.ProviderNotImplementedError(
|
||||
prov='bad_driver', user_msg='broken')
|
||||
response = self.put(self.LB_PATH.format(lb_id=api_lb.get('id')),
|
||||
self._build_body(new_listener), status=501)
|
||||
self.assertIn('Provider \'bad_driver\' does not support a requested '
|
||||
'action: broken', response.json.get('faultstring'))
|
||||
|
||||
@mock.patch('octavia.api.drivers.utils.call_provider')
|
||||
def test_delete_with_provider_not_implemented(self, mock_provider):
|
||||
api_lb = self.create_load_balancer(
|
||||
uuidutils.generate_uuid()).get(self.root_tag)
|
||||
self.set_lb_status(lb_id=api_lb.get('id'))
|
||||
# Set status to ACTIVE/ONLINE because set_lb_status did it in the db
|
||||
api_lb['provisioning_status'] = constants.ACTIVE
|
||||
api_lb['operating_status'] = constants.ONLINE
|
||||
response = self.get(self.LB_PATH.format(
|
||||
lb_id=api_lb.get('id'))).json.get(self.root_tag)
|
||||
|
||||
self.assertIsNone(api_lb.pop('updated_at'))
|
||||
self.assertIsNotNone(response.pop('updated_at'))
|
||||
self.assertEqual(api_lb, response)
|
||||
mock_provider.side_effect = exceptions.ProviderNotImplementedError(
|
||||
prov='bad_driver', user_msg='broken')
|
||||
self.delete(self.LB_PATH.format(lb_id=api_lb.get('id')), status=501)
|
||||
|
||||
@mock.patch('octavia.api.drivers.utils.call_provider')
|
||||
def test_create_with_provider_unsupport_option(self, mock_provider):
|
||||
mock_provider.side_effect = exceptions.ProviderUnsupportedOptionError(
|
||||
prov='bad_driver', user_msg='broken')
|
||||
lb_json = {'name': 'test-lb',
|
||||
'vip_subnet_id': uuidutils.generate_uuid(),
|
||||
'project_id': self.project_id}
|
||||
response = self.post(self.LBS_PATH, self._build_body(lb_json),
|
||||
status=501)
|
||||
self.assertIn('Provider \'bad_driver\' does not support a requested '
|
||||
'option: broken', response.json.get('faultstring'))
|
||||
|
||||
@mock.patch('octavia.api.drivers.utils.call_provider')
|
||||
def test_update_with_provider_unsupport_option(self, mock_provider):
|
||||
api_lb = self.create_load_balancer(
|
||||
uuidutils.generate_uuid()).get(self.root_tag)
|
||||
self.set_lb_status(lb_id=api_lb.get('id'))
|
||||
new_listener = {'name': 'new_name'}
|
||||
mock_provider.side_effect = exceptions.ProviderUnsupportedOptionError(
|
||||
prov='bad_driver', user_msg='broken')
|
||||
response = self.put(self.LB_PATH.format(lb_id=api_lb.get('id')),
|
||||
self._build_body(new_listener), status=501)
|
||||
self.assertIn('Provider \'bad_driver\' does not support a requested '
|
||||
'option: broken', response.json.get('faultstring'))
|
||||
|
||||
@mock.patch('octavia.api.drivers.utils.call_provider')
|
||||
def test_delete_with_provider_unsupport_option(self, mock_provider):
|
||||
api_lb = self.create_load_balancer(
|
||||
uuidutils.generate_uuid()).get(self.root_tag)
|
||||
self.set_lb_status(lb_id=api_lb.get('id'))
|
||||
# Set status to ACTIVE/ONLINE because set_lb_status did it in the db
|
||||
api_lb['provisioning_status'] = constants.ACTIVE
|
||||
api_lb['operating_status'] = constants.ONLINE
|
||||
response = self.get(self.LB_PATH.format(
|
||||
lb_id=api_lb.get('id'))).json.get(self.root_tag)
|
||||
|
||||
self.assertIsNone(api_lb.pop('updated_at'))
|
||||
self.assertIsNotNone(response.pop('updated_at'))
|
||||
self.assertEqual(api_lb, response)
|
||||
mock_provider.side_effect = exceptions.ProviderUnsupportedOptionError(
|
||||
prov='bad_driver', user_msg='broken')
|
||||
self.delete(self.LB_PATH.format(lb_id=api_lb.get('id')), status=501)
|
||||
|
||||
|
||||
class TestLoadBalancerGraph(base.BaseAPITest):
|
||||
@ -1952,6 +2057,7 @@ class TestLoadBalancerGraph(base.BaseAPITest):
|
||||
'vip_subnet_id': uuidutils.generate_uuid(),
|
||||
'vip_port_id': uuidutils.generate_uuid(),
|
||||
'vip_address': '198.51.100.10',
|
||||
'provider': 'noop_driver',
|
||||
'listeners': create_listeners,
|
||||
'pools': create_pools or []
|
||||
}
|
||||
@ -1968,7 +2074,7 @@ class TestLoadBalancerGraph(base.BaseAPITest):
|
||||
'vip_network_id': mock.ANY,
|
||||
'vip_qos_policy_id': None,
|
||||
'flavor_id': '',
|
||||
'provider': 'octavia'
|
||||
'provider': 'noop_driver'
|
||||
}
|
||||
expected_lb.update(create_lb)
|
||||
expected_lb['listeners'] = expected_listeners
|
||||
@ -2241,7 +2347,11 @@ class TestLoadBalancerGraph(base.BaseAPITest):
|
||||
api_lb = response.json.get(self.root_tag)
|
||||
self._assert_graphs_equal(expected_lb, api_lb)
|
||||
|
||||
def test_with_one_listener_sni_containers(self):
|
||||
# TODO(johnsom) Fix this when there is a noop certificate manager
|
||||
@mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data')
|
||||
def test_with_one_listener_sni_containers(self, mock_cert_data):
|
||||
mock_cert_data.return_value = {'tls_cert': 'cert 1',
|
||||
'sni_certs': ['cert 2', 'cert 3']}
|
||||
create_sni_containers, expected_sni_containers = (
|
||||
self._get_sni_container_bodies())
|
||||
create_listener, expected_listener = self._get_listener_bodies(
|
||||
@ -2399,7 +2509,12 @@ class TestLoadBalancerGraph(base.BaseAPITest):
|
||||
body = self._build_body(create_lb)
|
||||
return body, expected_lb
|
||||
|
||||
def test_with_one_of_everything(self):
|
||||
# TODO(johnsom) Fix this when there is a noop certificate manager
|
||||
@mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data')
|
||||
def test_with_one_of_everything(self, mock_cert_data):
|
||||
mock_cert_data.return_value = {'tls_cert': 'cert 1',
|
||||
'sni_certs': ['cert 2', 'cert 3']}
|
||||
|
||||
body, expected_lb = self._test_with_one_of_everything_helper()
|
||||
response = self.post(self.LBS_PATH, body)
|
||||
api_lb = response.json.get(self.root_tag)
|
||||
@ -2481,7 +2596,12 @@ class TestLoadBalancerGraph(base.BaseAPITest):
|
||||
self.start_quota_mock(data_models.HealthMonitor)
|
||||
self.post(self.LBS_PATH, body, status=403)
|
||||
|
||||
def test_create_over_quota_sanity_check(self):
|
||||
# TODO(johnsom) Fix this when there is a noop certificate manager
|
||||
@mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data')
|
||||
def test_create_over_quota_sanity_check(self, mock_cert_data):
|
||||
mock_cert_data.return_value = {'tls_cert': 'cert 1',
|
||||
'sni_certs': ['cert 2', 'cert 3']}
|
||||
|
||||
# This one should create, as we don't check quotas on L7Policies
|
||||
body, _ = self._test_with_one_of_everything_helper()
|
||||
self.start_quota_mock(data_models.L7Policy)
|
||||
|
@ -127,6 +127,7 @@ class AllRepositoriesTest(base.OctaviaDBTestBase):
|
||||
'operating_status': constants.OFFLINE,
|
||||
'topology': constants.TOPOLOGY_ACTIVE_STANDBY,
|
||||
'vrrp_group': None,
|
||||
'provider': 'amphora',
|
||||
'server_group_id': uuidutils.generate_uuid(),
|
||||
'project_id': uuidutils.generate_uuid(),
|
||||
'id': uuidutils.generate_uuid()}
|
||||
|
@ -32,6 +32,7 @@ class TestProviderDataModels(base.TestCase):
|
||||
self.vip_port_id = uuidutils.generate_uuid()
|
||||
self.vip_subnet_id = uuidutils.generate_uuid()
|
||||
self.listener_id = uuidutils.generate_uuid()
|
||||
self.vip_qos_policy_id = uuidutils.generate_uuid()
|
||||
|
||||
self.ref_listener = data_models.Listener(
|
||||
admin_state_up=True,
|
||||
@ -59,7 +60,8 @@ class TestProviderDataModels(base.TestCase):
|
||||
vip_address=self.vip_address,
|
||||
vip_network_id=self.vip_network_id,
|
||||
vip_port_id=self.vip_port_id,
|
||||
vip_subnet_id=self.vip_subnet_id)
|
||||
vip_subnet_id=self.vip_subnet_id,
|
||||
vip_qos_policy_id=self.vip_qos_policy_id)
|
||||
|
||||
self.ref_lb_dict = {'project_id': self.project_id,
|
||||
'flavor': {'cake': 'chocolate'},
|
||||
@ -67,37 +69,38 @@ class TestProviderDataModels(base.TestCase):
|
||||
'admin_state_up': False,
|
||||
'loadbalancer_id': self.loadbalancer_id,
|
||||
'vip_port_id': self.vip_port_id,
|
||||
'listeners': None,
|
||||
'vip_address': self.vip_address,
|
||||
'description': 'One great load balancer',
|
||||
'vip_subnet_id': self.vip_subnet_id,
|
||||
'name': 'favorite_lb'}
|
||||
'name': 'favorite_lb',
|
||||
'vip_qos_policy_id': self.vip_qos_policy_id}
|
||||
|
||||
self.ref_listener = {'admin_state_up': True,
|
||||
'connection_limit': 5000,
|
||||
'default_pool_id': None,
|
||||
'default_tls_container': 'a_pkcs12_bundle',
|
||||
'description': 'The listener',
|
||||
'insert_headers': {'X-Forwarded-For': 'true'},
|
||||
'listener_id': self.listener_id,
|
||||
'loadbalancer_id': self.loadbalancer_id,
|
||||
'name': 'super_listener',
|
||||
'protocol': 'avian',
|
||||
'protocol_port': 42,
|
||||
'sni_containers': 'another_pkcs12_bundle'}
|
||||
|
||||
self.ref_lb_dict_with_listener = {
|
||||
'admin_state_up': False,
|
||||
'description': 'One great load balancer',
|
||||
'flavor': {'cake': 'chocolate'},
|
||||
'listeners': [{'admin_state_up': True,
|
||||
'connection_limit': 5000,
|
||||
'default_pool': None,
|
||||
'default_pool_id': None,
|
||||
'default_tls_container': 'a_pkcs12_bundle',
|
||||
'description': 'The listener',
|
||||
'insert_headers': {'X-Forwarded-For': 'true'},
|
||||
'l7policies': None,
|
||||
'listener_id': self.listener_id,
|
||||
'loadbalancer_id': self.loadbalancer_id,
|
||||
'name': 'super_listener',
|
||||
'protocol': 'avian',
|
||||
'protocol_port': 42,
|
||||
'sni_containers': 'another_pkcs12_bundle'}],
|
||||
'listeners': [self.ref_listener],
|
||||
'loadbalancer_id': self.loadbalancer_id,
|
||||
'name': 'favorite_lb',
|
||||
'project_id': self.project_id,
|
||||
'vip_address': self.vip_address,
|
||||
'vip_network_id': self.vip_network_id,
|
||||
'vip_port_id': self.vip_port_id,
|
||||
'vip_subnet_id': self.vip_subnet_id}
|
||||
'vip_subnet_id': self.vip_subnet_id,
|
||||
'vip_qos_policy_id': self.vip_qos_policy_id}
|
||||
|
||||
def test_equality(self):
|
||||
second_ref_lb = deepcopy(self.ref_lb)
|
||||
@ -126,12 +129,67 @@ class TestProviderDataModels(base.TestCase):
|
||||
|
||||
self.assertEqual(self.ref_lb_dict, ref_lb_converted_to_dict)
|
||||
|
||||
def test_to_dict_partial(self):
|
||||
ref_lb = data_models.LoadBalancer(loadbalancer_id=self.loadbalancer_id)
|
||||
ref_lb_dict = {'loadbalancer_id': self.loadbalancer_id}
|
||||
|
||||
ref_lb_converted_to_dict = ref_lb.to_dict()
|
||||
|
||||
self.assertEqual(ref_lb_dict, ref_lb_converted_to_dict)
|
||||
|
||||
def test_to_dict_render_unsets(self):
|
||||
|
||||
ref_lb_converted_to_dict = self.ref_lb.to_dict(render_unsets=True)
|
||||
|
||||
new_ref_lib_dict = deepcopy(self.ref_lb_dict)
|
||||
new_ref_lib_dict['pools'] = None
|
||||
new_ref_lib_dict['listeners'] = None
|
||||
|
||||
self.assertEqual(new_ref_lib_dict, ref_lb_converted_to_dict)
|
||||
|
||||
def test_to_dict_recursive(self):
|
||||
ref_lb_converted_to_dict = self.ref_lb.to_dict(recurse=True)
|
||||
|
||||
self.assertEqual(self.ref_lb_dict_with_listener,
|
||||
ref_lb_converted_to_dict)
|
||||
|
||||
def test_to_dict_recursive_partial(self):
|
||||
ref_lb = data_models.LoadBalancer(
|
||||
loadbalancer_id=self.loadbalancer_id,
|
||||
listeners=[self.ref_listener])
|
||||
|
||||
ref_lb_dict_with_listener = {
|
||||
'loadbalancer_id': self.loadbalancer_id,
|
||||
'listeners': [self.ref_listener]}
|
||||
|
||||
ref_lb_converted_to_dict = ref_lb.to_dict(recurse=True)
|
||||
|
||||
self.assertEqual(ref_lb_dict_with_listener, ref_lb_converted_to_dict)
|
||||
|
||||
def test_to_dict_recursive_render_unset(self):
|
||||
ref_lb = data_models.LoadBalancer(
|
||||
admin_state_up=False,
|
||||
description='One great load balancer',
|
||||
flavor={'cake': 'chocolate'},
|
||||
listeners=[self.ref_listener],
|
||||
loadbalancer_id=self.loadbalancer_id,
|
||||
project_id=self.project_id,
|
||||
vip_address=self.vip_address,
|
||||
vip_network_id=self.vip_network_id,
|
||||
vip_port_id=self.vip_port_id,
|
||||
vip_subnet_id=self.vip_subnet_id,
|
||||
vip_qos_policy_id=self.vip_qos_policy_id)
|
||||
|
||||
ref_lb_dict_with_listener = deepcopy(self.ref_lb_dict_with_listener)
|
||||
ref_lb_dict_with_listener['pools'] = None
|
||||
ref_lb_dict_with_listener['name'] = None
|
||||
|
||||
ref_lb_converted_to_dict = ref_lb.to_dict(recurse=True,
|
||||
render_unsets=True)
|
||||
|
||||
self.assertEqual(ref_lb_dict_with_listener,
|
||||
ref_lb_converted_to_dict)
|
||||
|
||||
def test_from_dict(self):
|
||||
lb_object = data_models.LoadBalancer.from_dict(self.ref_lb_dict)
|
||||
|
||||
|
@ -30,7 +30,7 @@ class TestProviderBase(base.TestCase):
|
||||
def test_create_vip_port(self):
|
||||
self.assertRaises(exceptions.NotImplementedError,
|
||||
self.driver.create_vip_port,
|
||||
False, False)
|
||||
False, False, False)
|
||||
|
||||
def test_loadbalancer_create(self):
|
||||
self.assertRaises(exceptions.NotImplementedError,
|
||||
|
@ -37,6 +37,7 @@ class TestNoopProviderDriver(base.TestCase):
|
||||
self.healthmonitor_id = uuidutils.generate_uuid()
|
||||
self.l7policy_id = uuidutils.generate_uuid()
|
||||
self.l7rule_id = uuidutils.generate_uuid()
|
||||
self.project_id = uuidutils.generate_uuid()
|
||||
|
||||
self.ref_vip = data_models.VIP(
|
||||
vip_address=self.vip_address,
|
||||
@ -75,7 +76,6 @@ class TestNoopProviderDriver(base.TestCase):
|
||||
description='Olympic swimming pool',
|
||||
healthmonitor=self.ref_healthmonitor,
|
||||
lb_algorithm='A_Fast_One',
|
||||
listener_id=self.listener_id,
|
||||
loadbalancer_id=self.loadbalancer_id,
|
||||
members=[self.ref_member],
|
||||
name='Osborn',
|
||||
@ -128,7 +128,7 @@ class TestNoopProviderDriver(base.TestCase):
|
||||
listeners=[self.ref_listener],
|
||||
loadbalancer_id=self.loadbalancer_id,
|
||||
name='favorite_lb',
|
||||
project_id=uuidutils.generate_uuid(),
|
||||
project_id=self.project_id,
|
||||
vip_address=self.vip_address,
|
||||
vip_network_id=self.vip_network_id,
|
||||
vip_port_id=self.vip_port_id,
|
||||
@ -140,6 +140,7 @@ class TestNoopProviderDriver(base.TestCase):
|
||||
|
||||
def test_create_vip_port(self):
|
||||
vip_dict = self.driver.create_vip_port(self.loadbalancer_id,
|
||||
self.project_id,
|
||||
self.ref_vip.to_dict())
|
||||
|
||||
self.assertEqual(self.ref_vip.to_dict(), vip_dict)
|
||||
|
640
octavia/tests/unit/api/drivers/test_utils.py
Normal file
640
octavia/tests/unit/api/drivers/test_utils.py
Normal file
@ -0,0 +1,640 @@
|
||||
# Copyright 2018 Rackspace, US Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import copy
|
||||
|
||||
import mock
|
||||
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from octavia.api.drivers import data_models as driver_dm
|
||||
from octavia.api.drivers import exceptions as driver_exceptions
|
||||
from octavia.api.drivers import utils
|
||||
from octavia.common import constants
|
||||
from octavia.common import data_models
|
||||
from octavia.common import exceptions
|
||||
from octavia.tests.unit import base
|
||||
|
||||
|
||||
class TestUtils(base.TestCase):
|
||||
def setUp(self):
|
||||
super(TestUtils, self).setUp()
|
||||
|
||||
hm1_id = uuidutils.generate_uuid()
|
||||
hm2_id = uuidutils.generate_uuid()
|
||||
l7policy1_id = uuidutils.generate_uuid()
|
||||
l7policy2_id = uuidutils.generate_uuid()
|
||||
l7rule1_id = uuidutils.generate_uuid()
|
||||
l7rule2_id = uuidutils.generate_uuid()
|
||||
listener1_id = uuidutils.generate_uuid()
|
||||
listener2_id = uuidutils.generate_uuid()
|
||||
member1_id = uuidutils.generate_uuid()
|
||||
member2_id = uuidutils.generate_uuid()
|
||||
member3_id = uuidutils.generate_uuid()
|
||||
member4_id = uuidutils.generate_uuid()
|
||||
pool1_id = uuidutils.generate_uuid()
|
||||
pool2_id = uuidutils.generate_uuid()
|
||||
self.lb_id = uuidutils.generate_uuid()
|
||||
self.project_id = uuidutils.generate_uuid()
|
||||
self.ip_address = '192.0.2.30'
|
||||
self.port_id = uuidutils.generate_uuid()
|
||||
self.network_id = uuidutils.generate_uuid()
|
||||
self.subnet_id = uuidutils.generate_uuid()
|
||||
self.qos_policy_id = uuidutils.generate_uuid()
|
||||
self.sni_containers = [{'tls_container_id': '2'},
|
||||
{'tls_container_id': '3'}]
|
||||
|
||||
_common_test_dict = {'provisioning_status': constants.ACTIVE,
|
||||
'operating_status': constants.ONLINE,
|
||||
'project_id': self.project_id,
|
||||
'created_at': 'then',
|
||||
'updated_at': 'now',
|
||||
'enabled': True}
|
||||
|
||||
# Setup Health Monitors
|
||||
self.test_hm1_dict = {'id': hm1_id,
|
||||
'type': constants.HEALTH_MONITOR_PING,
|
||||
'delay': 1, 'timeout': 3, 'fall_threshold': 1,
|
||||
'rise_threshold': 2, 'http_method': 'GET',
|
||||
'url_path': '/', 'expected_codes': '200',
|
||||
'name': 'hm1', 'pool_id': pool1_id}
|
||||
|
||||
self.test_hm1_dict.update(_common_test_dict)
|
||||
|
||||
self.test_hm2_dict = copy.deepcopy(self.test_hm1_dict)
|
||||
self.test_hm2_dict['id'] = hm2_id
|
||||
self.test_hm2_dict['name'] = 'hm2'
|
||||
|
||||
self.db_hm1 = data_models.HealthMonitor(**self.test_hm1_dict)
|
||||
self.db_hm2 = data_models.HealthMonitor(**self.test_hm2_dict)
|
||||
|
||||
self.provider_hm1_dict = {'admin_state_up': True,
|
||||
'delay': 1, 'expected_codes': '200',
|
||||
'healthmonitor_id': hm1_id,
|
||||
'http_method': 'GET',
|
||||
'max_retries': 2,
|
||||
'max_retries_down': 1,
|
||||
'name': 'hm1',
|
||||
'pool_id': pool1_id,
|
||||
'timeout': 3,
|
||||
'type': constants.HEALTH_MONITOR_PING,
|
||||
'url_path': '/'}
|
||||
|
||||
self.provider_hm2_dict = copy.deepcopy(self.provider_hm1_dict)
|
||||
self.provider_hm2_dict['healthmonitor_id'] = hm2_id
|
||||
self.provider_hm2_dict['name'] = 'hm2'
|
||||
|
||||
self.provider_hm1 = driver_dm.HealthMonitor(**self.provider_hm1_dict)
|
||||
self.provider_hm2 = driver_dm.HealthMonitor(**self.provider_hm2_dict)
|
||||
|
||||
# Setup Members
|
||||
self.test_member1_dict = {'id': member1_id,
|
||||
'pool_id': pool1_id,
|
||||
'ip_address': '192.0.2.16',
|
||||
'protocol_port': 80, 'weight': 0,
|
||||
'backup': False,
|
||||
'subnet_id': self.subnet_id,
|
||||
'pool': None,
|
||||
'name': 'member1',
|
||||
'monitor_address': '192.0.2.26',
|
||||
'monitor_port': 81}
|
||||
|
||||
self.test_member1_dict.update(_common_test_dict)
|
||||
|
||||
self.test_member2_dict = copy.deepcopy(self.test_member1_dict)
|
||||
self.test_member2_dict['id'] = member2_id
|
||||
self.test_member2_dict['ip_address'] = '192.0.2.17'
|
||||
self.test_member2_dict['monitor_address'] = '192.0.2.27'
|
||||
self.test_member2_dict['name'] = 'member2'
|
||||
|
||||
self.test_member3_dict = copy.deepcopy(self.test_member1_dict)
|
||||
self.test_member3_dict['id'] = member3_id
|
||||
self.test_member3_dict['ip_address'] = '192.0.2.18'
|
||||
self.test_member3_dict['monitor_address'] = '192.0.2.28'
|
||||
self.test_member3_dict['name'] = 'member3'
|
||||
self.test_member3_dict['pool_id'] = pool2_id
|
||||
|
||||
self.test_member4_dict = copy.deepcopy(self.test_member1_dict)
|
||||
self.test_member4_dict['id'] = member4_id
|
||||
self.test_member4_dict['ip_address'] = '192.0.2.19'
|
||||
self.test_member4_dict['monitor_address'] = '192.0.2.29'
|
||||
self.test_member4_dict['name'] = 'member4'
|
||||
self.test_member4_dict['pool_id'] = pool2_id
|
||||
|
||||
self.test_pool1_members_dict = [self.test_member1_dict,
|
||||
self.test_member2_dict]
|
||||
self.test_pool2_members_dict = [self.test_member3_dict,
|
||||
self.test_member4_dict]
|
||||
|
||||
self.db_member1 = data_models.Member(**self.test_member1_dict)
|
||||
self.db_member2 = data_models.Member(**self.test_member2_dict)
|
||||
self.db_member3 = data_models.Member(**self.test_member3_dict)
|
||||
self.db_member4 = data_models.Member(**self.test_member4_dict)
|
||||
|
||||
self.db_pool1_members = [self.db_member1, self.db_member2]
|
||||
self.db_pool2_members = [self.db_member3, self.db_member4]
|
||||
|
||||
self.provider_member1_dict = {'address': '192.0.2.16',
|
||||
'admin_state_up': True,
|
||||
'member_id': member1_id,
|
||||
'monitor_address': '192.0.2.26',
|
||||
'monitor_port': 81,
|
||||
'name': 'member1',
|
||||
'pool_id': pool1_id,
|
||||
'protocol_port': 80,
|
||||
'subnet_id': self.subnet_id,
|
||||
'weight': 0,
|
||||
'backup': False}
|
||||
|
||||
self.provider_member2_dict = copy.deepcopy(self.provider_member1_dict)
|
||||
self.provider_member2_dict['member_id'] = member2_id
|
||||
self.provider_member2_dict['address'] = '192.0.2.17'
|
||||
self.provider_member2_dict['monitor_address'] = '192.0.2.27'
|
||||
self.provider_member2_dict['name'] = 'member2'
|
||||
|
||||
self.provider_member3_dict = copy.deepcopy(self.provider_member1_dict)
|
||||
self.provider_member3_dict['member_id'] = member3_id
|
||||
self.provider_member3_dict['address'] = '192.0.2.18'
|
||||
self.provider_member3_dict['monitor_address'] = '192.0.2.28'
|
||||
self.provider_member3_dict['name'] = 'member3'
|
||||
self.provider_member3_dict['pool_id'] = pool2_id
|
||||
|
||||
self.provider_member4_dict = copy.deepcopy(self.provider_member1_dict)
|
||||
self.provider_member4_dict['member_id'] = member4_id
|
||||
self.provider_member4_dict['address'] = '192.0.2.19'
|
||||
self.provider_member4_dict['monitor_address'] = '192.0.2.29'
|
||||
self.provider_member4_dict['name'] = 'member4'
|
||||
self.provider_member4_dict['pool_id'] = pool2_id
|
||||
|
||||
self.provider_pool1_members_dict = [self.provider_member1_dict,
|
||||
self.provider_member2_dict]
|
||||
|
||||
self.provider_pool2_members_dict = [self.provider_member3_dict,
|
||||
self.provider_member4_dict]
|
||||
|
||||
self.provider_member1 = driver_dm.Member(**self.provider_member1_dict)
|
||||
self.provider_member2 = driver_dm.Member(**self.provider_member2_dict)
|
||||
self.provider_member3 = driver_dm.Member(**self.provider_member3_dict)
|
||||
self.provider_member4 = driver_dm.Member(**self.provider_member4_dict)
|
||||
|
||||
self.provider_pool1_members = [self.provider_member1,
|
||||
self.provider_member2]
|
||||
self.provider_pool2_members = [self.provider_member3,
|
||||
self.provider_member4]
|
||||
|
||||
# Setup test pools
|
||||
self.test_pool1_dict = {'id': pool1_id,
|
||||
'name': 'pool1', 'description': 'Pool 1',
|
||||
'load_balancer_id': self.lb_id,
|
||||
'protocol': 'avian',
|
||||
'lb_algorithm': 'round_robin',
|
||||
'members': self.test_pool1_members_dict,
|
||||
'health_monitor': self.test_hm1_dict,
|
||||
'session_persistence': {'type': 'SOURCE'},
|
||||
'listeners': [],
|
||||
'l7policies': []}
|
||||
|
||||
self.test_pool1_dict.update(_common_test_dict)
|
||||
|
||||
self.test_pool2_dict = copy.deepcopy(self.test_pool1_dict)
|
||||
self.test_pool2_dict['id'] = pool2_id
|
||||
self.test_pool2_dict['name'] = 'pool2'
|
||||
self.test_pool2_dict['description'] = 'Pool 2'
|
||||
self.test_pool2_dict['members'] = self.test_pool2_members_dict
|
||||
|
||||
self.test_pools = [self.test_pool1_dict, self.test_pool2_dict]
|
||||
|
||||
self.db_pool1 = data_models.Pool(**self.test_pool1_dict)
|
||||
self.db_pool1.health_monitor = self.db_hm1
|
||||
self.db_pool1.members = self.db_pool1_members
|
||||
self.db_pool2 = data_models.Pool(**self.test_pool2_dict)
|
||||
self.db_pool2.health_monitor = self.db_hm2
|
||||
self.db_pool2.members = self.db_pool2_members
|
||||
|
||||
self.test_db_pools = [self.db_pool1, self.db_pool2]
|
||||
|
||||
self.provider_pool1_dict = {
|
||||
'admin_state_up': True,
|
||||
'description': 'Pool 1',
|
||||
'healthmonitor': self.provider_hm1_dict,
|
||||
'lb_algorithm': 'round_robin',
|
||||
'loadbalancer_id': self.lb_id,
|
||||
'members': self.provider_pool1_members_dict,
|
||||
'name': 'pool1',
|
||||
'pool_id': pool1_id,
|
||||
'protocol': 'avian',
|
||||
'session_persistence': {'type': 'SOURCE'}}
|
||||
|
||||
self.provider_pool2_dict = copy.deepcopy(self.provider_pool1_dict)
|
||||
self.provider_pool2_dict['pool_id'] = pool2_id
|
||||
self.provider_pool2_dict['name'] = 'pool2'
|
||||
self.provider_pool2_dict['description'] = 'Pool 2'
|
||||
self.provider_pool2_dict['members'] = self.provider_pool2_members_dict
|
||||
self.provider_pool2_dict['healthmonitor'] = self.provider_hm2_dict
|
||||
|
||||
self.provider_pool1 = driver_dm.Pool(**self.provider_pool1_dict)
|
||||
self.provider_pool1.members = self.provider_pool1_members
|
||||
self.provider_pool1.healthmonitor = self.provider_hm1
|
||||
self.provider_pool2 = driver_dm.Pool(**self.provider_pool2_dict)
|
||||
self.provider_pool2.members = self.provider_pool2_members
|
||||
self.provider_pool2.healthmonitor = self.provider_hm2
|
||||
|
||||
self.provider_pools = [self.provider_pool1, self.provider_pool2]
|
||||
|
||||
# Setup L7Rules
|
||||
self.test_l7rule1_dict = {'id': l7rule1_id,
|
||||
'l7policy_id': l7policy1_id,
|
||||
'type': 'o',
|
||||
'compare_type': 'fake_type',
|
||||
'key': 'fake_key',
|
||||
'value': 'fake_value',
|
||||
'l7policy': None,
|
||||
'invert': False}
|
||||
|
||||
self.test_l7rule1_dict.update(_common_test_dict)
|
||||
|
||||
self.test_l7rule2_dict = copy.deepcopy(self.test_l7rule1_dict)
|
||||
self.test_l7rule2_dict['id'] = l7rule2_id
|
||||
|
||||
self.test_l7rules = [self.test_l7rule1_dict, self.test_l7rule2_dict]
|
||||
|
||||
self.db_l7Rule1 = data_models.L7Rule(**self.test_l7rule1_dict)
|
||||
self.db_l7Rule2 = data_models.L7Rule(**self.test_l7rule2_dict)
|
||||
|
||||
self.db_l7Rules = [self.db_l7Rule1, self.db_l7Rule2]
|
||||
|
||||
self.provider_l7rule1_dict = {'admin_state_up': True,
|
||||
'compare_type': 'fake_type',
|
||||
'invert': False,
|
||||
'key': 'fake_key',
|
||||
'l7policy_id': l7policy1_id,
|
||||
'l7rule_id': l7rule1_id,
|
||||
'type': 'o',
|
||||
'value': 'fake_value'}
|
||||
|
||||
self.provider_l7rule2_dict = copy.deepcopy(self.provider_l7rule1_dict)
|
||||
self.provider_l7rule2_dict['l7rule_id'] = l7rule2_id
|
||||
|
||||
self.provider_l7rules_dicts = [self.provider_l7rule1_dict,
|
||||
self.provider_l7rule2_dict]
|
||||
|
||||
self.provider_l7rule1 = driver_dm.L7Rule(**self.provider_l7rule1_dict)
|
||||
self.provider_l7rule2 = driver_dm.L7Rule(**self.provider_l7rule2_dict)
|
||||
|
||||
self.provider_rules = [self.provider_l7rule1, self.provider_l7rule2]
|
||||
|
||||
# Setup L7Policies
|
||||
self.test_l7policy1_dict = {'id': l7policy1_id,
|
||||
'name': 'l7policy_1',
|
||||
'description': 'L7policy 1',
|
||||
'listener_id': listener1_id,
|
||||
'action': 'go',
|
||||
'redirect_pool_id': pool1_id,
|
||||
'redirect_url': '/index.html',
|
||||
'position': 1,
|
||||
'listener': None,
|
||||
'redirect_pool': None,
|
||||
'l7rules': self.test_l7rules}
|
||||
|
||||
self.test_l7policy1_dict.update(_common_test_dict)
|
||||
|
||||
self.test_l7policy2_dict = copy.deepcopy(self.test_l7policy1_dict)
|
||||
self.test_l7policy2_dict['id'] = l7policy2_id
|
||||
self.test_l7policy2_dict['name'] = 'l7policy_2'
|
||||
self.test_l7policy2_dict['description'] = 'L7policy 2'
|
||||
|
||||
self.test_l7policies = [self.test_l7policy1_dict,
|
||||
self.test_l7policy2_dict]
|
||||
|
||||
self.db_l7policy1 = data_models.L7Policy(**self.test_l7policy1_dict)
|
||||
self.db_l7policy2 = data_models.L7Policy(**self.test_l7policy2_dict)
|
||||
self.db_l7policy1.l7rules = self.db_l7Rules
|
||||
self.db_l7policy2.l7rules = self.db_l7Rules
|
||||
|
||||
self.db_l7policies = [self.db_l7policy1, self.db_l7policy2]
|
||||
|
||||
self.provider_l7policy1_dict = {'action': 'go',
|
||||
'admin_state_up': True,
|
||||
'description': 'L7policy 1',
|
||||
'l7policy_id': l7policy1_id,
|
||||
'listener_id': listener1_id,
|
||||
'name': 'l7policy_1',
|
||||
'position': 1,
|
||||
'redirect_pool_id': pool1_id,
|
||||
'redirect_url': '/index.html',
|
||||
'rules': self.provider_l7rules_dicts}
|
||||
|
||||
self.provider_l7policy2_dict = copy.deepcopy(
|
||||
self.provider_l7policy1_dict)
|
||||
self.provider_l7policy2_dict['l7policy_id'] = l7policy2_id
|
||||
self.provider_l7policy2_dict['name'] = 'l7policy_2'
|
||||
self.provider_l7policy2_dict['description'] = 'L7policy 2'
|
||||
|
||||
self.provider_l7policies_dict = [self.provider_l7policy1_dict,
|
||||
self.provider_l7policy2_dict]
|
||||
|
||||
self.provider_l7policy1 = driver_dm.L7Policy(
|
||||
**self.provider_l7policy1_dict)
|
||||
self.provider_l7policy1.rules = self.provider_rules
|
||||
self.provider_l7policy2 = driver_dm.L7Policy(
|
||||
**self.provider_l7policy2_dict)
|
||||
self.provider_l7policy2.rules = self.provider_rules
|
||||
|
||||
self.provider_l7policies = [self.provider_l7policy1,
|
||||
self.provider_l7policy2]
|
||||
|
||||
# Setup Listeners
|
||||
self.test_listener1_dict = {'id': listener1_id,
|
||||
'name': 'listener_1',
|
||||
'description': 'Listener 1',
|
||||
'default_pool_id': pool1_id,
|
||||
'load_balancer_id': self.lb_id,
|
||||
'protocol': 'avian',
|
||||
'protocol_port': 90,
|
||||
'connection_limit': 10000,
|
||||
'tls_certificate_id': '1',
|
||||
'stats': None,
|
||||
'default_pool': self.test_pool1_dict,
|
||||
'load_balancer': None,
|
||||
'sni_containers': self.sni_containers,
|
||||
'peer_port': 55,
|
||||
'l7policies': self.test_l7policies,
|
||||
'insert_headers': {},
|
||||
'pools': None,
|
||||
'timeout_client_data': 1000,
|
||||
'timeout_member_connect': 2000,
|
||||
'timeout_member_data': 3000,
|
||||
'timeout_tcp_inspect': 4000}
|
||||
|
||||
self.test_listener1_dict.update(_common_test_dict)
|
||||
|
||||
self.test_listener2_dict = copy.deepcopy(self.test_listener1_dict)
|
||||
self.test_listener2_dict['id'] = listener2_id
|
||||
self.test_listener2_dict['name'] = 'listener_2'
|
||||
self.test_listener2_dict['description'] = 'Listener 1'
|
||||
self.test_listener2_dict['default_pool_id'] = pool2_id
|
||||
self.test_listener2_dict['default_pool'] = self.test_pool2_dict
|
||||
del self.test_listener2_dict['l7policies']
|
||||
del self.test_listener2_dict['sni_containers']
|
||||
|
||||
self.test_listeners = [self.test_listener1_dict,
|
||||
self.test_listener2_dict]
|
||||
|
||||
self.db_listener1 = data_models.Listener(**self.test_listener1_dict)
|
||||
self.db_listener2 = data_models.Listener(**self.test_listener2_dict)
|
||||
self.db_listener1.default_pool = self.db_pool1
|
||||
self.db_listener2.default_pool = self.db_pool2
|
||||
self.db_listener1.l7policies = self.db_l7policies
|
||||
self.db_listener1.sni_containers = [
|
||||
data_models.SNI(tls_container_id='2'),
|
||||
data_models.SNI(tls_container_id='3')]
|
||||
|
||||
self.test_db_listeners = [self.db_listener1, self.db_listener2]
|
||||
|
||||
self.provider_listener1_dict = {
|
||||
'admin_state_up': True,
|
||||
'connection_limit': 10000,
|
||||
'default_pool': self.provider_pool1_dict,
|
||||
'default_pool_id': pool1_id,
|
||||
'default_tls_container': 'cert 1',
|
||||
'description': 'Listener 1',
|
||||
'insert_headers': {},
|
||||
'l7policies': self.provider_l7policies_dict,
|
||||
'listener_id': listener1_id,
|
||||
'loadbalancer_id': self.lb_id,
|
||||
'name': 'listener_1',
|
||||
'protocol': 'avian',
|
||||
'protocol_port': 90,
|
||||
'sni_containers': ['cert 2', 'cert 3'],
|
||||
'timeout_client_data': 1000,
|
||||
'timeout_member_connect': 2000,
|
||||
'timeout_member_data': 3000,
|
||||
'timeout_tcp_inspect': 4000}
|
||||
|
||||
self.provider_listener2_dict = copy.deepcopy(
|
||||
self.provider_listener1_dict)
|
||||
self.provider_listener2_dict['listener_id'] = listener2_id
|
||||
self.provider_listener2_dict['name'] = 'listener_2'
|
||||
self.provider_listener2_dict['description'] = 'Listener 1'
|
||||
self.provider_listener2_dict['default_pool_id'] = pool2_id
|
||||
self.provider_listener2_dict['default_pool'] = self.provider_pool2_dict
|
||||
del self.provider_listener2_dict['l7policies']
|
||||
|
||||
self.provider_listener1 = driver_dm.Listener(
|
||||
**self.provider_listener1_dict)
|
||||
self.provider_listener2 = driver_dm.Listener(
|
||||
**self.provider_listener2_dict)
|
||||
self.provider_listener1.default_pool = self.provider_pool1
|
||||
self.provider_listener2.default_pool = self.provider_pool2
|
||||
self.provider_listener1.l7policies = self.provider_l7policies
|
||||
|
||||
self.provider_listeners = [self.provider_listener1,
|
||||
self.provider_listener2]
|
||||
|
||||
def test_call_provider(self):
|
||||
mock_driver_method = mock.MagicMock()
|
||||
|
||||
# Test happy path
|
||||
utils.call_provider("provider_name", mock_driver_method,
|
||||
"arg1", foo="arg2")
|
||||
mock_driver_method.assert_called_with("arg1", foo="arg2")
|
||||
|
||||
# Test driver raising DriverError
|
||||
mock_driver_method.side_effect = driver_exceptions.DriverError
|
||||
self.assertRaises(exceptions.ProviderDriverError,
|
||||
utils.call_provider, "provider_name",
|
||||
mock_driver_method)
|
||||
|
||||
# Test driver raising NotImplementedError
|
||||
mock_driver_method.side_effect = driver_exceptions.NotImplementedError
|
||||
self.assertRaises(exceptions.ProviderNotImplementedError,
|
||||
utils.call_provider, "provider_name",
|
||||
mock_driver_method)
|
||||
|
||||
# Test driver raising UnsupportedOptionError
|
||||
mock_driver_method.side_effect = (
|
||||
driver_exceptions.UnsupportedOptionError)
|
||||
self.assertRaises(exceptions.ProviderUnsupportedOptionError,
|
||||
utils.call_provider, "provider_name",
|
||||
mock_driver_method)
|
||||
|
||||
# Test driver raising DriverError
|
||||
mock_driver_method.side_effect = Exception
|
||||
self.assertRaises(exceptions.ProviderDriverError,
|
||||
utils.call_provider, "provider_name",
|
||||
mock_driver_method)
|
||||
|
||||
def test_base_to_provider_dict(self):
|
||||
|
||||
test_dict = {'provisioning_status': constants.ACTIVE,
|
||||
'operating_status': constants.ONLINE,
|
||||
'provider': 'octavia',
|
||||
'created_at': 'now',
|
||||
'updated_at': 'then',
|
||||
'enabled': True,
|
||||
'project_id': 1}
|
||||
|
||||
result_dict = utils._base_to_provider_dict(test_dict,
|
||||
include_project_id=True)
|
||||
self.assertEqual({'admin_state_up': True, 'project_id': 1},
|
||||
result_dict)
|
||||
|
||||
result_dict = utils._base_to_provider_dict(test_dict,
|
||||
include_project_id=False)
|
||||
self.assertEqual({'admin_state_up': True},
|
||||
result_dict)
|
||||
|
||||
@mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data')
|
||||
def test_lb_dict_to_provider_dict(self, mock_load_cert):
|
||||
mock_load_cert.return_value = {'tls_cert': 'cert 1',
|
||||
'sni_certs': ['cert 2', 'cert 3']}
|
||||
|
||||
test_lb_dict = {'name': 'lb1', 'project_id': self.project_id,
|
||||
'vip_subnet_id': self.subnet_id,
|
||||
'vip_port_id': self.port_id,
|
||||
'vip_address': self.ip_address,
|
||||
'vip_network_id': self.network_id,
|
||||
'vip_qos_policy_id': self.qos_policy_id,
|
||||
'provider': 'noop_driver',
|
||||
'id': self.lb_id,
|
||||
'listeners': [],
|
||||
'pools': [],
|
||||
'description': '', 'admin_state_up': True,
|
||||
'provisioning_status': constants.PENDING_CREATE,
|
||||
'operating_status': constants.OFFLINE,
|
||||
'flavor_id': '',
|
||||
'provider': 'noop_driver'}
|
||||
ref_prov_lb_dict = {'vip_address': self.ip_address,
|
||||
'admin_state_up': True,
|
||||
'loadbalancer_id': self.lb_id,
|
||||
'vip_subnet_id': self.subnet_id,
|
||||
'listeners': self.provider_listeners,
|
||||
'description': '',
|
||||
'project_id': self.project_id,
|
||||
'flavor_id': '',
|
||||
'vip_port_id': self.port_id,
|
||||
'vip_qos_policy_id': self.qos_policy_id,
|
||||
'vip_network_id': self.network_id,
|
||||
'pools': self.provider_pools,
|
||||
'name': 'lb1'}
|
||||
vip = data_models.Vip(ip_address=self.ip_address,
|
||||
network_id=self.network_id,
|
||||
port_id=self.port_id, subnet_id=self.subnet_id,
|
||||
qos_policy_id=self.qos_policy_id)
|
||||
|
||||
provider_lb_dict = utils.lb_dict_to_provider_dict(
|
||||
test_lb_dict, vip=vip, db_pools=self.test_db_pools,
|
||||
db_listeners=self.test_db_listeners)
|
||||
|
||||
self.assertEqual(ref_prov_lb_dict, provider_lb_dict)
|
||||
|
||||
@mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data')
|
||||
def test_db_listeners_to_provider_listeners(self, mock_load_cert):
|
||||
mock_load_cert.return_value = {'tls_cert': 'cert 1',
|
||||
'sni_certs': ['cert 2', 'cert 3']}
|
||||
provider_listeners = utils.db_listeners_to_provider_listeners(
|
||||
self.test_db_listeners)
|
||||
self.assertEqual(self.provider_listeners, provider_listeners)
|
||||
|
||||
@mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data')
|
||||
def test_listener_dict_to_provider_dict(self, mock_load_cert):
|
||||
mock_load_cert.return_value = {'tls_cert': 'cert 1',
|
||||
'sni_certs': ['cert 2', 'cert 3']}
|
||||
provider_listener = utils.listener_dict_to_provider_dict(
|
||||
self.test_listener1_dict)
|
||||
self.assertEqual(self.provider_listener1_dict, provider_listener)
|
||||
|
||||
def test_db_pool_to_provider_pool(self):
|
||||
provider_pool = utils.db_pool_to_provider_pool(self.db_pool1)
|
||||
self.assertEqual(self.provider_pool1, provider_pool)
|
||||
|
||||
def test_db_pools_to_provider_pools(self):
|
||||
provider_pools = utils.db_pools_to_provider_pools(self.test_db_pools)
|
||||
self.assertEqual(self.provider_pools, provider_pools)
|
||||
|
||||
def test_pool_dict_to_provider_dict(self):
|
||||
provider_pool_dict = utils.pool_dict_to_provider_dict(
|
||||
self.test_pool1_dict)
|
||||
self.assertEqual(self.provider_pool1_dict, provider_pool_dict)
|
||||
|
||||
def test_db_HM_to_provider_HM(self):
|
||||
provider_hm = utils.db_HM_to_provider_HM(self.db_hm1)
|
||||
self.assertEqual(self.provider_hm1, provider_hm)
|
||||
|
||||
def test_hm_dict_to_provider_dict(self):
|
||||
provider_hm_dict = utils.hm_dict_to_provider_dict(self.test_hm1_dict)
|
||||
self.assertEqual(self.provider_hm1_dict, provider_hm_dict)
|
||||
|
||||
def test_db_members_to_provider_members(self):
|
||||
provider_members = utils.db_members_to_provider_members(
|
||||
self.db_pool1_members)
|
||||
self.assertEqual(self.provider_pool1_members, provider_members)
|
||||
|
||||
def test_member_dict_to_provider_dict(self):
|
||||
provider_member_dict = utils.member_dict_to_provider_dict(
|
||||
self.test_member1_dict)
|
||||
self.assertEqual(self.provider_member1_dict, provider_member_dict)
|
||||
|
||||
def test_db_l7policies_to_provider_l7policies(self):
|
||||
provider_rules = utils.db_l7policies_to_provider_l7policies(
|
||||
self.db_l7policies)
|
||||
self.assertEqual(self.provider_l7policies, provider_rules)
|
||||
|
||||
def test_l7policy_dict_to_provider_dict(self):
|
||||
provider_l7policy_dict = utils.l7policy_dict_to_provider_dict(
|
||||
self.test_l7policy1_dict)
|
||||
self.assertEqual(self.provider_l7policy1_dict, provider_l7policy_dict)
|
||||
|
||||
def test_db_l7rules_to_provider_l7rules(self):
|
||||
provider_rules = utils.db_l7rules_to_provider_l7rules(self.db_l7Rules)
|
||||
self.assertEqual(self.provider_rules, provider_rules)
|
||||
|
||||
def test_l7rule_dict_to_provider_dict(self):
|
||||
provider_rules_dict = utils.l7rule_dict_to_provider_dict(
|
||||
self.test_l7rule1_dict)
|
||||
self.assertEqual(self.provider_l7rule1_dict, provider_rules_dict)
|
||||
|
||||
def test_vip_dict_to_provider_dict(self):
|
||||
test_vip_dict = {'ip_address': self.ip_address,
|
||||
'network_id': self.network_id,
|
||||
'port_id': self.port_id,
|
||||
'subnet_id': self.subnet_id,
|
||||
'qos_policy_id': self.qos_policy_id}
|
||||
|
||||
provider_vip_dict = {'vip_address': self.ip_address,
|
||||
'vip_network_id': self.network_id,
|
||||
'vip_port_id': self.port_id,
|
||||
'vip_subnet_id': self.subnet_id,
|
||||
'vip_qos_policy_id': self.qos_policy_id}
|
||||
|
||||
new_vip_dict = utils.vip_dict_to_provider_dict(test_vip_dict)
|
||||
self.assertEqual(provider_vip_dict, new_vip_dict)
|
||||
|
||||
def test_provider_vip_dict_to_vip_obj(self):
|
||||
provider_vip_dict = {'vip_address': self.ip_address,
|
||||
'vip_network_id': self.network_id,
|
||||
'vip_port_id': self.port_id,
|
||||
'vip_subnet_id': self.subnet_id,
|
||||
'vip_qos_policy_id': self.qos_policy_id}
|
||||
|
||||
ref_vip = data_models.Vip(ip_address=self.ip_address,
|
||||
network_id=self.network_id,
|
||||
port_id=self.port_id,
|
||||
subnet_id=self.subnet_id,
|
||||
qos_policy_id=self.qos_policy_id)
|
||||
|
||||
new_provider_vip = utils.provider_vip_dict_to_vip_obj(
|
||||
provider_vip_dict)
|
||||
self.assertEqual(ref_vip, new_provider_vip)
|
@ -71,6 +71,9 @@ console_scripts =
|
||||
haproxy-vrrp-check = octavia.cmd.haproxy_vrrp_check:main
|
||||
octavia.api.drivers =
|
||||
noop_driver = octavia.api.drivers.noop_driver.driver:NoopProviderDriver
|
||||
amphora = octavia.api.drivers.amphora_driver.driver:AmphoraProviderDriver
|
||||
# octavia is an alias for backward compatibility
|
||||
octavia = octavia.api.drivers.amphora_driver.driver:AmphoraProviderDriver
|
||||
octavia.api.handlers =
|
||||
simulated_handler = octavia.api.handlers.controller_simulator.handler:SimulatedControllerHandler
|
||||
queue_producer = octavia.api.handlers.queue.producer:ProducerHandler
|
||||
|
@ -113,6 +113,8 @@ Load balancer
|
||||
+-----------------+--------+-----------------------------------------------+
|
||||
| name | string | Human-readable name of the resource. |
|
||||
+-----------------+--------+-----------------------------------------------+
|
||||
| pools | list | A list of `Pool object`_. |
|
||||
+-----------------+--------+-----------------------------------------------+
|
||||
| project_id | string | ID of the project owning this resource. |
|
||||
+-----------------+--------+-----------------------------------------------+
|
||||
| vip_address | string | The IP address of the Virtual IP (VIP). |
|
||||
@ -121,6 +123,8 @@ Load balancer
|
||||
+-----------------+--------+-----------------------------------------------+
|
||||
| vip_port_id | string | The ID of the VIP port. |
|
||||
+-----------------+--------+-----------------------------------------------+
|
||||
|vip_qos_policy_id| string | The ID of the qos policy for the VIP. |
|
||||
+-----------------+--------+-----------------------------------------------+
|
||||
| vip_subnet_id | string | The ID of the subnet for the VIP. |
|
||||
+-----------------+--------+-----------------------------------------------+
|
||||
|
||||
@ -145,12 +149,16 @@ Load balancer
|
||||
+-----------------+--------+-----------------------------------------------+
|
||||
| Name | Type | Description |
|
||||
+=================+========+===============================================+
|
||||
| project_id | string | ID of the project owning this resource. |
|
||||
+-----------------+--------+-----------------------------------------------+
|
||||
| vip_address | string | The IP address of the Virtual IP (VIP). |
|
||||
+-----------------+--------+-----------------------------------------------+
|
||||
| vip_network_id | string | The ID of the network for the VIP. |
|
||||
+-----------------+--------+-----------------------------------------------+
|
||||
| vip_port_id | string | The ID of the VIP port. |
|
||||
+-----------------+--------+-----------------------------------------------+
|
||||
|vip_qos_policy_id| string | The ID of the qos policy for the VIP. |
|
||||
+-----------------+--------+-----------------------------------------------+
|
||||
| vip_subnet_id | string | The ID of the subnet for the VIP. |
|
||||
+-----------------+--------+-----------------------------------------------+
|
||||
|
||||
@ -364,6 +372,21 @@ Listener
|
||||
+-----------------------+--------+------------------------------------------+
|
||||
| sni_containers | object | A pkcs12 format set of certificates. |
|
||||
+-----------------------+--------+------------------------------------------+
|
||||
| timeout_client_data | int | Frontend client inactivity timeout in |
|
||||
| | | milliseconds. |
|
||||
+-----------------------+--------+------------------------------------------+
|
||||
| timeout_member_connect| int | Backend member connection timeout in |
|
||||
| | | milliseconds. |
|
||||
+-----------------------+--------+------------------------------------------+
|
||||
| timeout_member_data | int | Backend member inactivity timeout in |
|
||||
| | | milliseconds. |
|
||||
+-----------------------+--------+------------------------------------------+
|
||||
| timeout_tcp_inspect | int | Time, in milliseconds, to wait for |
|
||||
| | | additional TCP packets for content |
|
||||
| | | inspection. |
|
||||
+-----------------------+--------+------------------------------------------+
|
||||
|
||||
|
||||
|
||||
.. _Supported HTTP Header Insertions:
|
||||
|
||||
@ -533,11 +556,7 @@ Pool
|
||||
| | | ROUND_ROBIN, LEAST_CONNECTIONS, or |
|
||||
| | | SOURCE_IP. |
|
||||
+-----------------------+--------+------------------------------------------+
|
||||
| listener_id | string | ID of listener. Required if |
|
||||
| | | loadbalancer_id not specified. |
|
||||
+-----------------------+--------+------------------------------------------+
|
||||
| loadbalancer_id | string | ID of load balancer. Required if |
|
||||
| | | listener_id not specified. |
|
||||
| loadbalancer_id | string | ID of load balancer. |
|
||||
+-----------------------+--------+------------------------------------------+
|
||||
| members | list | A list of `Member objects`_. |
|
||||
+-----------------------+--------+------------------------------------------+
|
||||
@ -684,6 +703,10 @@ Member
|
||||
+-----------------------+--------+------------------------------------------+
|
||||
| admin_state_up | bool | Admin state: True if up, False if down. |
|
||||
+-----------------------+--------+------------------------------------------+
|
||||
| backup | bool | Is the member a backup? Backup members |
|
||||
| | | only receive traffic when all non-backup |
|
||||
| | | members are down. |
|
||||
+-----------------------+--------+------------------------------------------+
|
||||
| member_id | string | ID of member to create. |
|
||||
+-----------------------+--------+------------------------------------------+
|
||||
| monitor_address | string | An alternate IP address used for health |
|
||||
|
Loading…
Reference in New Issue
Block a user