OCP-Router: Ingress controller support
This is the second patch of the Ingress Controller capability. In order for the K8S Ingress and OpenShift Route resources to work, the cluster must have an Ingress Controller running. This patch extends LBaaS driver to support L7 load balancing and verifies, retrieves and stores the L7 router LB (pre-created by admin or Devstack) details. The OCP-route and K8S-endpoint handlers (implemented in next patch) will query the ingress controller for the L7 router details. Partially Implements: blueprint openshift-router-support Change-Id: Id55169f6c9c1c607b2aa54c92711dfbd04a9e39d
This commit is contained in:
parent
159fe3e0ae
commit
4ab102afa8
@ -652,6 +652,7 @@ function create_ingress_l7_router {
|
||||
local fake_svc_name
|
||||
local l7_router_fip
|
||||
local project_id
|
||||
local lb_uuid
|
||||
|
||||
lb_name=${KURYR_L7_ROUTER_NAME}
|
||||
max_timeout=600
|
||||
@ -676,6 +677,9 @@ function create_ingress_l7_router {
|
||||
--os-region "$REGION_NAME" \
|
||||
floating ip set --port "$lb_port_id" "$l7_router_fip"
|
||||
|
||||
lb_uuid="$(get_loadbalancer_attribute "$lb_name" "id")"
|
||||
iniset "$KURYR_CONFIG" ingress l7_router_uuid "$lb_uuid"
|
||||
|
||||
if is_service_enabled octavia; then
|
||||
echo -n "Octavia: no need to create fake k8s service for Ingress."
|
||||
else
|
||||
|
@ -207,6 +207,10 @@ cache_defaults = [
|
||||
default="dogpile.cache.memory"),
|
||||
]
|
||||
|
||||
ingress = [
|
||||
cfg.StrOpt('l7_router_uuid',
|
||||
help=_("UUID of the L7 Router")),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(kuryr_k8s_opts)
|
||||
@ -215,6 +219,7 @@ CONF.register_opts(k8s_opts, group='kubernetes')
|
||||
CONF.register_opts(neutron_defaults, group='neutron_defaults')
|
||||
CONF.register_opts(octavia_defaults, group='octavia_defaults')
|
||||
CONF.register_opts(cache_defaults, group='cache_defaults')
|
||||
CONF.register_opts(ingress, group='ingress')
|
||||
|
||||
CONF.register_opts(lib_config.core_opts)
|
||||
CONF.register_opts(lib_config.binding_opts, 'binding')
|
||||
|
@ -38,6 +38,7 @@ CNI_EXCEPTION_CODE = 100
|
||||
CNI_TIMEOUT_CODE = 200
|
||||
|
||||
KURYR_PORT_NAME = 'kuryr-pool-port'
|
||||
KURYR_L7_ROUTER_HTTP_PORT = '80'
|
||||
|
||||
OCTAVIA_L2_MEMBER_MODE = "L2"
|
||||
OCTAVIA_L3_MEMBER_MODE = "L3"
|
||||
|
@ -329,8 +329,9 @@ class PodVIFDriver(DriverBase):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class LBaaSDriver(DriverBase):
|
||||
"""Manages Neutron/Octavia load balancer to support Kubernetes Services."""
|
||||
"""Base class for Openstack loadbalancer services."""
|
||||
|
||||
ALIAS = 'endpoints_lbaas'
|
||||
|
||||
@ -469,6 +470,91 @@ class LBaaSDriver(DriverBase):
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_lb_by_uuid(self, lb_uuid):
|
||||
"""Get loadbalancer by loadbalancer uuid.
|
||||
|
||||
:param lb_uuid: Loadbalancer uuid
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_pool_by_name(self, pool_name, project_id):
|
||||
"""Get pool by pool's name.
|
||||
|
||||
:param pool_name: the pool name
|
||||
:param project_id: project id
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def ensure_l7_policy(self, namespace, route_name, loadbalancer,
|
||||
pool, listener_id):
|
||||
"""Get or create L7 policy.
|
||||
|
||||
:param namespace: ocp-route/k8s-ingress namespace
|
||||
:param route_name: ocp-route/k8s-ingress name
|
||||
:param loadbalancer: `LBaaSLoadBalancer` object
|
||||
:param pool: L7 policy's target pool
|
||||
:param listener_id: ID of listener to attach L7policy to
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def release_l7_policy(self, loadbalancer, l7_policy):
|
||||
"""Release l7policy.
|
||||
|
||||
:param loadbalancer: `LBaaSLoadBalancer` object
|
||||
:param l7_policy: `LBaaSL7Policy` object
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def ensure_l7_rule(self, loadbalancer, l7_policy, compare_type,
|
||||
type, value):
|
||||
"""Get or create L7 rule.
|
||||
|
||||
:param loadbalancer: `LBaaSLoadBalancer` object
|
||||
:param l7_policy: `LBaaSL7Policy` object
|
||||
:param compare_type: comparison type for the L7 rule.
|
||||
:param type: the L7 rule type
|
||||
:param value:the value to use for the comparison.
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def release_l7_rule(self, loadbalancer, l7_rule):
|
||||
"""Release L7 rule.
|
||||
|
||||
:param loadbalancer: `LBaaSLoadBalancer` object
|
||||
:param l7_rule: `LBaaSL7Rule` object
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def update_l7_rule(self, l7_rule, new_value):
|
||||
"""Update L7 rule value.
|
||||
|
||||
:param l7_rule: `LBaaSL7Rule` object
|
||||
:param new_value: rule's new value
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def is_pool_used_by_other_l7policies(l7policy, pool):
|
||||
"""Checks if pool used by other L7policy.
|
||||
|
||||
:param l7policy: `LBaaSL7Policy` object
|
||||
:param pool: `LBaaSPool` object
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class VIFPoolDriver(PodVIFDriver):
|
||||
|
@ -35,6 +35,7 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
_ACTIVATION_TIMEOUT = CONF.neutron_defaults.lbaas_activation_timeout
|
||||
_SUPPORTED_LISTENER_PROT = ('HTTP', 'HTTPS', 'TCP')
|
||||
_L7_POLICY_ACT_REDIRECT_TO_POOL = 'REDIRECT_TO_POOL'
|
||||
|
||||
|
||||
class LBaaSv2Driver(base.LBaaSDriver):
|
||||
@ -197,7 +198,7 @@ class LBaaSv2Driver(base.LBaaSDriver):
|
||||
|
||||
def ensure_pool_attached_to_lb(self, loadbalancer, namespace,
|
||||
svc_name, protocol):
|
||||
name = self.get_loadbalancer_pool_name(loadbalancer.name,
|
||||
name = self.get_loadbalancer_pool_name(loadbalancer,
|
||||
namespace, svc_name)
|
||||
pool = obj_lbaas.LBaaSPool(name=name,
|
||||
project_id=loadbalancer.project_id,
|
||||
@ -334,7 +335,7 @@ class LBaaSv2Driver(base.LBaaSDriver):
|
||||
loadbalancer_id=pool.loadbalancer_id,
|
||||
protocol=pool.protocol, lb_algorithm=lb_algorithm)
|
||||
bogus_pool_ids = [p['id'] for p in pools.get('pools')
|
||||
if not p['listeners']]
|
||||
if not p['listeners'] and pool.name == p['name']]
|
||||
for pool_id in bogus_pool_ids:
|
||||
try:
|
||||
LOG.debug("Removing bogus pool %(id)s %(pool)s", {
|
||||
@ -476,3 +477,148 @@ class LBaaSv2Driver(base.LBaaSDriver):
|
||||
loadbalancer.name)
|
||||
|
||||
return None
|
||||
|
||||
def get_lb_by_uuid(self, lb_uuid):
|
||||
lbaas = clients.get_loadbalancer_client()
|
||||
response = lbaas.show_loadbalancer(lb_uuid)
|
||||
try:
|
||||
return obj_lbaas.LBaaSLoadBalancer(
|
||||
id=response['loadbalancer']['id'],
|
||||
port_id=response['loadbalancer']['vip_port_id'],
|
||||
name=response['loadbalancer']['name'],
|
||||
project_id=response['loadbalancer']['project_id'],
|
||||
subnet_id=response['loadbalancer']['vip_subnet_id'],
|
||||
ip=response['loadbalancer']['vip_address'],
|
||||
security_groups=None,
|
||||
provider=response['loadbalancer']['provider'])
|
||||
except (KeyError, IndexError):
|
||||
LOG.debug("Couldn't find loadbalancer with uuid=%s", lb_uuid)
|
||||
return None
|
||||
|
||||
def get_pool_by_name(self, pool_name, project_id):
|
||||
lbaas = clients.get_loadbalancer_client()
|
||||
|
||||
# NOTE(yboaron): pool_name should be constructed using
|
||||
# get_loadbalancer_pool_name function, which means that pool's name
|
||||
# is unique
|
||||
|
||||
pools_list = lbaas.list_lbaas_pools(
|
||||
project_id=project_id)
|
||||
for entry in pools_list['pools']:
|
||||
if not entry:
|
||||
continue
|
||||
if entry['name'] == pool_name:
|
||||
listener_id = (entry['listeners'][0]['id'] if
|
||||
entry['listeners'] else None)
|
||||
return obj_lbaas.LBaaSPool(
|
||||
name=entry['name'], project_id=entry['project_id'],
|
||||
loadbalancer_id=entry['loadbalancers'][0]['id'],
|
||||
listener_id=listener_id,
|
||||
protocol=entry['protocol'], id=entry['id'])
|
||||
return None
|
||||
|
||||
def ensure_l7_policy(self, namespace, route_name,
|
||||
loadbalancer, pool,
|
||||
listener_id):
|
||||
name = namespace + route_name
|
||||
l7_policy = obj_lbaas.LBaaSL7Policy(name=name,
|
||||
project_id=pool.project_id,
|
||||
listener_id=listener_id,
|
||||
redirect_pool_id=pool.id)
|
||||
|
||||
return self._ensure_provisioned(
|
||||
loadbalancer, l7_policy, self._create_l7_policy,
|
||||
self._find_l7_policy)
|
||||
|
||||
def release_l7_policy(self, loadbalancer, l7_policy):
|
||||
lbaas = clients.get_loadbalancer_client()
|
||||
self._release(
|
||||
loadbalancer, l7_policy, lbaas.delete_lbaas_l7policy,
|
||||
l7_policy.id)
|
||||
|
||||
def _create_l7_policy(self, l7_policy):
|
||||
lbaas = clients.get_loadbalancer_client()
|
||||
response = lbaas.create_lbaas_l7policy({'l7policy': {
|
||||
'action': _L7_POLICY_ACT_REDIRECT_TO_POOL,
|
||||
'listener_id': l7_policy.listener_id,
|
||||
'name': l7_policy.name,
|
||||
'project_id': l7_policy.project_id,
|
||||
'redirect_pool_id': l7_policy.redirect_pool_id}})
|
||||
l7_policy.id = response['l7policy']['id']
|
||||
return l7_policy
|
||||
|
||||
def _find_l7_policy(self, l7_policy):
|
||||
lbaas = clients.get_loadbalancer_client()
|
||||
response = lbaas.list_lbaas_l7policies(
|
||||
name=l7_policy.name,
|
||||
project_id=l7_policy.project_id,
|
||||
redirect_pool_id=l7_policy.redirect_pool_id,
|
||||
listener_id=l7_policy.listener_id)
|
||||
try:
|
||||
l7_policy.id = response['l7policies'][0]['id']
|
||||
except (KeyError, IndexError):
|
||||
return None
|
||||
return l7_policy
|
||||
|
||||
def ensure_l7_rule(self, loadbalancer, l7_policy, compare_type,
|
||||
type, value):
|
||||
|
||||
l7_rule = obj_lbaas.LBaaSL7Rule(
|
||||
compare_type=compare_type, l7policy_id=l7_policy.id,
|
||||
type=type, value=value)
|
||||
return self._ensure_provisioned(
|
||||
loadbalancer, l7_rule, self._create_l7_rule,
|
||||
self._find_l7_rule)
|
||||
|
||||
def _create_l7_rule(self, l7_rule):
|
||||
lbaas = clients.get_loadbalancer_client()
|
||||
response = lbaas.create_lbaas_l7rule(
|
||||
l7_rule.l7policy_id,
|
||||
{'rule': {'compare_type': l7_rule.compare_type,
|
||||
'type': l7_rule.type,
|
||||
'value': l7_rule.value}})
|
||||
l7_rule.id = response['rule']['id']
|
||||
return l7_rule
|
||||
|
||||
def _find_l7_rule(self, l7_rule):
|
||||
lbaas = clients.get_loadbalancer_client()
|
||||
response = lbaas.list_lbaas_l7rules(
|
||||
l7_rule.l7policy_id,
|
||||
type=l7_rule.type,
|
||||
value=l7_rule.value,
|
||||
compare_type=l7_rule.compare_type)
|
||||
try:
|
||||
l7_rule.id = response['rules'][0]['id']
|
||||
except (KeyError, IndexError):
|
||||
return None
|
||||
return l7_rule
|
||||
|
||||
def release_l7_rule(self, loadbalancer, l7_rule):
|
||||
lbaas = clients.get_loadbalancer_client()
|
||||
self._release(
|
||||
loadbalancer, l7_rule, lbaas.delete_lbaas_l7rule,
|
||||
l7_rule.id, l7_rule.l7policy_id)
|
||||
|
||||
def update_l7_rule(self, l7_rule, new_value):
|
||||
lbaas = clients.get_loadbalancer_client()
|
||||
try:
|
||||
lbaas.update_lbaas_l7rule(
|
||||
l7_rule.id, l7_rule.l7policy_id,
|
||||
{'rule': {'value': new_value}})
|
||||
|
||||
except n_exc.NeutronClientException as ex:
|
||||
LOG.error("Failed to update l7_rule- id=%s ",
|
||||
l7_rule.id)
|
||||
raise ex
|
||||
|
||||
def is_pool_used_by_other_l7policies(self, l7policy, pool):
|
||||
lbaas = clients.get_loadbalancer_client()
|
||||
l7policy_list = lbaas.list_lbaas_l7policies(
|
||||
project_id=l7policy.project_id)
|
||||
for entry in l7policy_list['l7policies']:
|
||||
if not entry:
|
||||
continue
|
||||
if (entry['redirect_pool_id'] == pool.id and
|
||||
entry['id'] != l7policy.id):
|
||||
return True
|
||||
return False
|
||||
|
0
kuryr_kubernetes/controller/ingress/__init__.py
Normal file
0
kuryr_kubernetes/controller/ingress/__init__.py
Normal file
159
kuryr_kubernetes/controller/ingress/ingress_ctl.py
Normal file
159
kuryr_kubernetes/controller/ingress/ingress_ctl.py
Normal file
@ -0,0 +1,159 @@
|
||||
# Copyright (c) 2018 RedHat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import eventlet
|
||||
import time
|
||||
|
||||
|
||||
from kuryr_kubernetes import config
|
||||
from kuryr_kubernetes import constants as k_const
|
||||
from kuryr_kubernetes.controller.drivers import base as drv_base
|
||||
from kuryr_kubernetes import exceptions
|
||||
from oslo_log import log as logging
|
||||
|
||||
|
||||
_OCP_ROUTE_HANDLER = 'ocproute'
|
||||
_INGRESS_LB_HANDLER = 'ingresslb'
|
||||
_ROUTER_POLL_INTERVAL = 10
|
||||
# NOTE(yboaron): LoadBalancers creation at Devstack is very slow, could take
|
||||
# up to 20 minutes
|
||||
_ROUTER_MANUAL_CREATION_TIMEOUT = 1200
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class L7Router(object):
|
||||
"""L7Router is responsible for create/verify L7 LoadBalancer entity."""
|
||||
|
||||
def __init__(self, router_uuid):
|
||||
|
||||
# Note(yboaron) the LBaaS driver is used as the L7 router driver
|
||||
self._drv_l7_router = drv_base.LBaaSDriver.get_instance()
|
||||
self._l7_router_uuid = router_uuid
|
||||
self._l7_router_listeners = None
|
||||
self._router_lb = None
|
||||
|
||||
def ensure_router(self):
|
||||
# retrieve router details
|
||||
self._router_lb = self._drv_l7_router.get_lb_by_uuid(
|
||||
self._l7_router_uuid)
|
||||
if not self._router_lb:
|
||||
LOG.error("Failed to retrieve L7_Router (UUID=%s)",
|
||||
self._l7_router_uuid)
|
||||
raise exceptions.IngressControllerFailure
|
||||
# verify that loadbalancer is active
|
||||
try:
|
||||
self._drv_l7_router._wait_for_provisioning(
|
||||
self._router_lb, _ROUTER_MANUAL_CREATION_TIMEOUT)
|
||||
except exceptions.ResourceNotReady as e:
|
||||
LOG.error("Timed out waiting for L7 router to appear in "
|
||||
"ACTIVE state: %s.", e)
|
||||
raise exceptions.IngressControllerFailure
|
||||
|
||||
LOG.info("Ingress controller - "
|
||||
"retrieve '%s' router details", self._router_lb)
|
||||
|
||||
# TODO(yboaron) add support for HTTPS listener
|
||||
# create/verify listeners
|
||||
self._l7_router_listeners = {}
|
||||
listener = self._drv_l7_router.ensure_listener(
|
||||
self._router_lb, 'HTTP', k_const.KURYR_L7_ROUTER_HTTP_PORT)
|
||||
self._l7_router_listeners[k_const.KURYR_L7_ROUTER_HTTP_PORT] = listener
|
||||
|
||||
def get_router(self):
|
||||
return self._router_lb
|
||||
|
||||
def get_router_listeners(self):
|
||||
return self._l7_router_listeners
|
||||
|
||||
|
||||
class IngressCtrlr(object):
|
||||
"""IngressCtrlr is responsible for the Ingress controller capability
|
||||
|
||||
The Ingress controller should create or verify (in case router pre-created
|
||||
by admin) L7 router/LB - the entity that will do the actual L7 routing.
|
||||
In addition the Ingress controller should provide the L7 router details
|
||||
to Ingress/ocp-route handlers and Endpoint handler.
|
||||
Both Ingress/ocp-route handlers and Endpoint handler should update the
|
||||
L7 rules of the L7 router.
|
||||
|
||||
"""
|
||||
instances = {}
|
||||
|
||||
@classmethod
|
||||
def get_instance(cls):
|
||||
if cls not in IngressCtrlr.instances:
|
||||
IngressCtrlr.instances[cls] = cls()
|
||||
return IngressCtrlr.instances[cls]
|
||||
|
||||
def __init__(self):
|
||||
self._l7_router = None
|
||||
self._status = 'DOWN'
|
||||
|
||||
def _start_operation_impl(self):
|
||||
LOG.info('Ingress controller is enabled')
|
||||
self._l7_router = L7Router(config.CONF.ingress.l7_router_uuid)
|
||||
try:
|
||||
self._status = 'IN_PROGRESS'
|
||||
self._l7_router.ensure_router()
|
||||
except Exception:
|
||||
self._status = 'DOWN'
|
||||
LOG.error("Ingress controller - failed to get L7 router")
|
||||
return
|
||||
self._status = 'ACTIVE'
|
||||
LOG.info("Ingress controller - ACTIVE")
|
||||
|
||||
def _is_ingress_controller_disabled(self):
|
||||
# Note(yboaron) To enable the ingress controller admin should :
|
||||
# A. Set the L7 router's FIP in kuryr.conf
|
||||
# and
|
||||
# B. Add K8S-ingress and OCP-route handlers to pluggable
|
||||
# handlers list
|
||||
configured_handlers = config.CONF.kubernetes.enabled_handlers
|
||||
return not (any(handler in configured_handlers for handler in
|
||||
(_OCP_ROUTE_HANDLER, _INGRESS_LB_HANDLER)) and
|
||||
config.CONF.ingress.l7_router_uuid)
|
||||
|
||||
def start_operation(self):
|
||||
if self._is_ingress_controller_disabled():
|
||||
LOG.info('To enable Ingress controller either OCP-Route or '
|
||||
'Ingress-LB handlers should be enabled, and '
|
||||
'l7_router_uuid should be specified')
|
||||
return
|
||||
# Note(yboaron) Create a new thread for L7 router/LB configuration
|
||||
# verification, a separate thread is needed since it might be that
|
||||
# router not created yet and we can't block controller/service thread.
|
||||
eventlet.spawn(self._start_operation_impl)
|
||||
|
||||
def get_router_and_listener(self):
|
||||
"""This function returns L7 router and Listeners details,
|
||||
|
||||
The caller to this function will be blocked until Ingress controller
|
||||
status is in stable (not in progress), the consumers of this function
|
||||
will be the OCP-Route and K8S-Ingress handlers
|
||||
"""
|
||||
get_router_threshold = (time.time() + _ROUTER_MANUAL_CREATION_TIMEOUT)
|
||||
while True:
|
||||
if self._status != 'IN_PROGRESS':
|
||||
if self._l7_router:
|
||||
return (self._l7_router.get_router(),
|
||||
self._l7_router.get_router_listeners())
|
||||
else:
|
||||
return None, None
|
||||
if time.time() > get_router_threshold:
|
||||
LOG.error("Ingress controller: get router - timeout expired")
|
||||
return None, None
|
||||
LOG.debug("Ingress controller - waiting till status is "
|
||||
"!= IN_PROGRESS")
|
||||
time.sleep(_ROUTER_POLL_INTERVAL)
|
@ -27,6 +27,7 @@ from stevedore.named import NamedExtensionManager
|
||||
from kuryr_kubernetes import clients
|
||||
from kuryr_kubernetes import config
|
||||
from kuryr_kubernetes.controller.handlers import pipeline as h_pipeline
|
||||
from kuryr_kubernetes.controller.ingress import ingress_ctl
|
||||
from kuryr_kubernetes.controller.managers import health
|
||||
from kuryr_kubernetes import objects
|
||||
from kuryr_kubernetes import utils
|
||||
@ -97,6 +98,8 @@ class KuryrK8sService(six.with_metaclass(KuryrK8sServiceMeta,
|
||||
|
||||
def start(self):
|
||||
LOG.info("Service '%s' starting", self.__class__.__name__)
|
||||
ingress_ctrl = ingress_ctl.IngressCtrlr.get_instance()
|
||||
ingress_ctrl.start_operation()
|
||||
super(KuryrK8sService, self).start()
|
||||
|
||||
if not CONF.kubernetes.controller_ha:
|
||||
|
@ -72,3 +72,11 @@ class MultiPodDriverPoolConfigurationNotSupported(Exception):
|
||||
2. One of the pod drivers is not supported
|
||||
3. One of the pod drivers is not supported by its selected pool driver
|
||||
"""
|
||||
|
||||
|
||||
class IngressControllerFailure(Exception):
|
||||
"""Exception represents a failure in the Ingress Controller functionality
|
||||
|
||||
This exception is raised when we fail to activate properly the Ingress
|
||||
Controller.
|
||||
"""
|
||||
|
@ -56,14 +56,16 @@ class LBaaSListener(k_obj.KuryrK8sObjectBase):
|
||||
|
||||
@obj_base.VersionedObjectRegistry.register
|
||||
class LBaaSPool(k_obj.KuryrK8sObjectBase):
|
||||
VERSION = '1.0'
|
||||
# Version 1.0: Initial version
|
||||
# Version 1.1: Added support for pool attached directly to loadbalancer.
|
||||
VERSION = '1.1'
|
||||
|
||||
fields = {
|
||||
'id': obj_fields.UUIDField(),
|
||||
'project_id': obj_fields.StringField(),
|
||||
'name': obj_fields.StringField(),
|
||||
'loadbalancer_id': obj_fields.UUIDField(),
|
||||
'listener_id': obj_fields.UUIDField(),
|
||||
'listener_id': obj_fields.UUIDField(nullable=True),
|
||||
'protocol': obj_fields.StringField(),
|
||||
}
|
||||
|
||||
@ -139,3 +141,29 @@ class LBaaSServiceSpec(k_obj.KuryrK8sObjectBase):
|
||||
'type': obj_fields.StringField(nullable=True, default=None),
|
||||
'lb_ip': obj_fields.IPAddressField(nullable=True, default=None),
|
||||
}
|
||||
|
||||
|
||||
@obj_base.VersionedObjectRegistry.register
|
||||
class LBaaSL7Policy(k_obj.KuryrK8sObjectBase):
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
'id': obj_fields.UUIDField(),
|
||||
'name': obj_fields.StringField(nullable=True),
|
||||
'listener_id': obj_fields.UUIDField(),
|
||||
'redirect_pool_id': obj_fields.UUIDField(),
|
||||
'project_id': obj_fields.StringField(),
|
||||
}
|
||||
|
||||
|
||||
@obj_base.VersionedObjectRegistry.register
|
||||
class LBaaSL7Rule(k_obj.KuryrK8sObjectBase):
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
'id': obj_fields.UUIDField(),
|
||||
'compare_type': obj_fields.StringField(nullable=True),
|
||||
'l7policy_id': obj_fields.UUIDField(),
|
||||
'type': obj_fields.StringField(nullable=True),
|
||||
'value': obj_fields.StringField(nullable=True),
|
||||
}
|
||||
|
@ -38,6 +38,7 @@ _kuryr_k8s_opts = [
|
||||
('health_server', health.health_server_opts),
|
||||
('cni_health_server', cni_health.cni_health_server_opts),
|
||||
('namespace_subnet', namespace_subnet.namespace_subnet_driver_opts),
|
||||
('ingress', config.ingress),
|
||||
]
|
||||
|
||||
|
||||
|
@ -418,7 +418,7 @@ class TestLBaaSv2Driver(test_base.TestCase):
|
||||
# TODO(ivc): add unit test or get rid of _cleanup_bogus_pool
|
||||
self.skipTest("not implemented")
|
||||
|
||||
def test_find_pool(self):
|
||||
def test_find_pool_by_listener(self):
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
|
||||
@ -442,7 +442,7 @@ class TestLBaaSv2Driver(test_base.TestCase):
|
||||
getattr(ret, attr))
|
||||
self.assertEqual(pool_id, ret.id)
|
||||
|
||||
def test_find_pool_not_found(self):
|
||||
def test_find_pool_by_listener_not_found(self):
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
|
||||
@ -700,3 +700,372 @@ class TestLBaaSv2Driver(test_base.TestCase):
|
||||
def test_provisioning_timer(self):
|
||||
# REVISIT(ivc): add test if _provisioning_timer is to stay
|
||||
self.skipTest("not implemented")
|
||||
|
||||
def test_get_pool_by_name_not_found(self):
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
|
||||
|
||||
pools = {'name': 'KUKU', 'id': 'a2a62ea7-e3bf-40df-8c09-aa0c29876a6b'}
|
||||
lbaas.list_lbaas_pools.return_value = {'pools': [pools]}
|
||||
pool_name = 'NOT_KUKU'
|
||||
project_id = 'TEST_PROJECT'
|
||||
|
||||
pool_id = cls.get_pool_by_name(m_driver, pool_name, project_id)
|
||||
self.assertIsNone(pool_id)
|
||||
|
||||
def test_get_pool_by_name_found(self):
|
||||
self._test_get_pool_by_name_found(listener_is_empty=False)
|
||||
|
||||
def test_get_pool_by_name_found_listener_is_empty(self):
|
||||
self._test_get_pool_by_name_found(listener_is_empty=True)
|
||||
|
||||
def _test_get_pool_by_name_found(self, listener_is_empty):
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
|
||||
|
||||
pool_name = 'KUKU'
|
||||
pool_lb_id = "607226db-27ef-4d41-ae89-f2a800e9c2db"
|
||||
pool_project_id = "e3cd678b11784734bc366148aa37580e"
|
||||
pool_id = "ddb2b28f-89e9-45d3-a329-a359c3e39e4a"
|
||||
pool_protocol = "HTTP"
|
||||
pool_listener_id = "023f2e34-7806-443b-bfae-16c324569a3d"
|
||||
|
||||
if listener_is_empty:
|
||||
resp_listeners = []
|
||||
else:
|
||||
resp_listeners = [{"id": pool_listener_id}]
|
||||
|
||||
listener_id = (resp_listeners[0]['id'] if
|
||||
resp_listeners else None)
|
||||
expected_result = obj_lbaas.LBaaSPool(
|
||||
name=pool_name, project_id=pool_project_id,
|
||||
loadbalancer_id=pool_lb_id,
|
||||
listener_id=listener_id,
|
||||
protocol=pool_protocol,
|
||||
id=pool_id)
|
||||
|
||||
resp = {"pools": [
|
||||
{
|
||||
"protocol": pool_protocol,
|
||||
"loadbalancers": [
|
||||
{
|
||||
"id": pool_lb_id
|
||||
}
|
||||
],
|
||||
"listeners": resp_listeners,
|
||||
"project_id": pool_project_id,
|
||||
"id": pool_id,
|
||||
"name": pool_name
|
||||
}
|
||||
]}
|
||||
|
||||
lbaas.list_lbaas_pools.return_value = resp
|
||||
|
||||
pool = cls.get_pool_by_name(m_driver, pool_name, pool_project_id)
|
||||
lbaas.list_lbaas_pools.assert_called_once()
|
||||
for attr in expected_result.obj_fields:
|
||||
self.assertEqual(getattr(expected_result, attr),
|
||||
getattr(pool, attr))
|
||||
|
||||
def test_get_pool_by_name_empty_list(self):
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
|
||||
pools = {}
|
||||
lbaas.list_lbaas_pools.return_value = {'pools': [pools]}
|
||||
pool_name = 'NOT_KUKU'
|
||||
project_id = 'TEST_PROJECT'
|
||||
|
||||
pool = cls.get_pool_by_name(m_driver, pool_name, project_id)
|
||||
self.assertIsNone(pool)
|
||||
|
||||
def test_get_lb_by_uuid(self):
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
|
||||
|
||||
loadbalancer_id = '00EE9E11-91C2-41CF-8FD4-7970579E5C4C'
|
||||
loadbalancer_vip = '1.2.3.4'
|
||||
loadbalancer_vip_port_id = '00EE9E11-91C2-41CF-8FD4-7970579EFFFF'
|
||||
loadbalancer_project_id = '00EE9E11-91C2-41CF-8FD4-7970579EAAAA'
|
||||
loadbalancer_name = 'MyName'
|
||||
loadbalancer_subnet_id = '00EE9E11-91C2-41CF-8FD4-7970579EBBBB'
|
||||
loadbalancer_provider = 'haproxy'
|
||||
|
||||
expected_lb = obj_lbaas.LBaaSLoadBalancer(
|
||||
id=loadbalancer_id, port_id=loadbalancer_vip_port_id,
|
||||
name=loadbalancer_name, project_id=loadbalancer_project_id,
|
||||
subnet_id=loadbalancer_subnet_id, ip=loadbalancer_vip,
|
||||
security_groups=None, provider=loadbalancer_provider)
|
||||
|
||||
resp = {'loadbalancer': {'id': loadbalancer_id,
|
||||
'vip_port_id': loadbalancer_vip_port_id,
|
||||
'name': loadbalancer_name,
|
||||
'project_id': loadbalancer_project_id,
|
||||
'vip_subnet_id': loadbalancer_subnet_id,
|
||||
'vip_address': loadbalancer_vip,
|
||||
'provider': loadbalancer_provider}}
|
||||
|
||||
lbaas.show_loadbalancer.return_value = resp
|
||||
|
||||
ret = cls.get_lb_by_uuid(m_driver, loadbalancer_id)
|
||||
lbaas.show_loadbalancer.assert_called_once()
|
||||
for attr in expected_lb.obj_fields:
|
||||
self.assertEqual(getattr(expected_lb, attr),
|
||||
getattr(ret, attr))
|
||||
self.assertEqual(loadbalancer_id, ret.id)
|
||||
|
||||
def test_get_lb_by_uuid_not_found(self):
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
|
||||
|
||||
resp = {'loadbalancer': {}}
|
||||
lbaas.show_loadbalancer.return_value = resp
|
||||
|
||||
requested_uuid = '00EE9E11-91C2-41CF-8FD4-7970579EFFFF'
|
||||
lbaas.show_loadbalancer.return_value = resp
|
||||
|
||||
ret = cls.get_lb_by_uuid(m_driver, requested_uuid)
|
||||
lbaas.show_loadbalancer.assert_called_once()
|
||||
self.assertIsNone(ret)
|
||||
|
||||
def test_ensure_l7policy(self):
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
|
||||
expected_resp = mock.sentinel.expected_resp
|
||||
loadbalancer = mock.sentinel.expected_resp
|
||||
route_name = 'ROUTE_NAME'
|
||||
namespace = 'NAMESPACE'
|
||||
listener_id = 'D4F35594-27EB-4F4C-930C-31DD40F53B77'
|
||||
pool = obj_lbaas.LBaaSPool(
|
||||
id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
|
||||
project_id='TEST_PROJECT',
|
||||
name='NAME',
|
||||
loadbalancer_id='010101',
|
||||
listener_id='12345',
|
||||
protocol='TCP'
|
||||
)
|
||||
|
||||
m_driver._ensure_provisioned.return_value = expected_resp
|
||||
|
||||
cls.ensure_l7_policy(
|
||||
m_driver, namespace, route_name, loadbalancer, pool, listener_id)
|
||||
|
||||
m_driver._ensure_provisioned.assert_called_once_with(
|
||||
loadbalancer, mock.ANY, m_driver._create_l7_policy,
|
||||
m_driver._find_l7_policy)
|
||||
l7policy = m_driver._ensure_provisioned.call_args[0][1]
|
||||
|
||||
self.assertEqual("%s%s" % (namespace, route_name), l7policy.name)
|
||||
self.assertEqual(listener_id, l7policy.listener_id)
|
||||
self.assertEqual(pool.id, l7policy.redirect_pool_id)
|
||||
self.assertEqual(pool.project_id, l7policy.project_id)
|
||||
|
||||
def test_release_l7policy(self):
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
|
||||
|
||||
loadbalancer = mock.Mock()
|
||||
l7_policy = mock.Mock()
|
||||
cls.release_l7_policy(m_driver, loadbalancer, l7_policy)
|
||||
|
||||
m_driver._release.assert_called_once_with(
|
||||
loadbalancer, l7_policy, lbaas.delete_lbaas_l7policy,
|
||||
l7_policy.id)
|
||||
|
||||
def test_create_l7policy(self):
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
|
||||
|
||||
l7_policy = obj_lbaas.LBaaSL7Policy(
|
||||
name='TEST_NAME',
|
||||
project_id='TEST_PROJECT',
|
||||
listener_id='D4F35594-27EB-4F4C-930C-31DD40F53B77',
|
||||
redirect_pool_id='D3FA400A-F543-4B91-9CD3-047AF0CE42D1')
|
||||
|
||||
l7policy_id = '3A70CEC0-392D-4BC1-A27C-06E63A0FD54F'
|
||||
req = {'l7policy': {
|
||||
'action': 'REDIRECT_TO_POOL',
|
||||
'listener_id': l7_policy.listener_id,
|
||||
'name': l7_policy.name,
|
||||
'project_id': l7_policy.project_id,
|
||||
'redirect_pool_id': l7_policy.redirect_pool_id}}
|
||||
resp = {'l7policy': {'id': l7policy_id}}
|
||||
lbaas.create_lbaas_l7policy.return_value = resp
|
||||
|
||||
ret = cls._create_l7_policy(m_driver, l7_policy)
|
||||
lbaas.create_lbaas_l7policy.assert_called_once_with(req)
|
||||
for attr in l7_policy.obj_fields:
|
||||
self.assertEqual(getattr(l7_policy, attr),
|
||||
getattr(ret, attr))
|
||||
self.assertEqual(l7policy_id, ret.id)
|
||||
|
||||
def test_find_l7_policy(self):
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
|
||||
l7_policy = obj_lbaas.LBaaSL7Policy(
|
||||
name='TEST_NAME',
|
||||
project_id='TEST_PROJECT',
|
||||
listener_id='D4F35594-27EB-4F4C-930C-31DD40F53B77',
|
||||
redirect_pool_id='D3FA400A-F543-4B91-9CD3-047AF0CE42D1')
|
||||
|
||||
l7policy_id = '3A70CEC0-392D-4BC1-A27C-06E63A0FD54F'
|
||||
|
||||
resp = {'l7policies': [{'id': l7policy_id}]}
|
||||
lbaas.list_lbaas_l7policies.return_value = resp
|
||||
|
||||
ret = cls._find_l7_policy(m_driver, l7_policy)
|
||||
lbaas.list_lbaas_l7policies.assert_called_once_with(
|
||||
name=l7_policy.name,
|
||||
project_id=l7_policy.project_id,
|
||||
redirect_pool_id=l7_policy.redirect_pool_id,
|
||||
listener_id=l7_policy.listener_id)
|
||||
for attr in l7_policy.obj_fields:
|
||||
self.assertEqual(getattr(l7_policy, attr),
|
||||
getattr(ret, attr))
|
||||
self.assertEqual(l7policy_id, ret.id)
|
||||
|
||||
def test_find_l7_policy_not_found(self):
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
|
||||
l7_policy = obj_lbaas.LBaaSL7Policy(
|
||||
name='TEST_NAME',
|
||||
project_id='TEST_PROJECT',
|
||||
listener_id='D4F35594-27EB-4F4C-930C-31DD40F53B77',
|
||||
redirect_pool_id='D3FA400A-F543-4B91-9CD3-047AF0CE42D1')
|
||||
|
||||
resp = {'l7policies': []}
|
||||
lbaas.list_lbaas_l7policies.return_value = resp
|
||||
|
||||
ret = cls._find_l7_policy(m_driver, l7_policy)
|
||||
lbaas.list_lbaas_l7policies.assert_called_once_with(
|
||||
name=l7_policy.name,
|
||||
project_id=l7_policy.project_id,
|
||||
redirect_pool_id=l7_policy.redirect_pool_id,
|
||||
listener_id=l7_policy.listener_id)
|
||||
self.assertIsNone(ret)
|
||||
|
||||
def test_ensure_l7_rule(self):
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
|
||||
expected_resp = mock.sentinel.expected_resp
|
||||
loadbalancer = mock.sentinel.expected_resp
|
||||
compare_type = 'EQUAL_TO'
|
||||
type = 'HOST_NAME'
|
||||
value = 'www.test.com'
|
||||
l7_policy = obj_lbaas.LBaaSL7Policy(
|
||||
id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
|
||||
name='TEST_NAME',
|
||||
project_id='TEST_PROJECT',
|
||||
listener_id='D4F35594-27EB-4F4C-930C-31DD40F53B77',
|
||||
redirect_pool_id='D3FA400A-F543-4B91-9CD3-047AF0CE42D1')
|
||||
|
||||
m_driver._ensure_provisioned.return_value = expected_resp
|
||||
|
||||
cls.ensure_l7_rule(
|
||||
m_driver, loadbalancer, l7_policy, compare_type, type, value)
|
||||
|
||||
m_driver._ensure_provisioned.assert_called_once_with(
|
||||
loadbalancer, mock.ANY, m_driver._create_l7_rule,
|
||||
m_driver._find_l7_rule)
|
||||
l7rule = m_driver._ensure_provisioned.call_args[0][1]
|
||||
|
||||
self.assertEqual(compare_type, l7rule.compare_type)
|
||||
self.assertEqual(type, l7rule.type)
|
||||
self.assertEqual(value, l7rule.value)
|
||||
|
||||
def test_release_l7_rule(self):
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
|
||||
|
||||
loadbalancer = mock.Mock()
|
||||
l7_rule = mock.Mock()
|
||||
cls.release_l7_rule(m_driver, loadbalancer, l7_rule)
|
||||
|
||||
m_driver._release.assert_called_once_with(
|
||||
loadbalancer, l7_rule, lbaas.delete_lbaas_l7rule,
|
||||
l7_rule.id, l7_rule.l7policy_id)
|
||||
|
||||
def test_create_l7_rule(self):
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
|
||||
|
||||
l7_rule = obj_lbaas.LBaaSL7Rule(
|
||||
compare_type='EQUAL_TO',
|
||||
l7policy_id='D4F35594-27EB-4F4C-930C-31DD40F53B77',
|
||||
type='HOST_NAME',
|
||||
value='www.test.com')
|
||||
|
||||
l7_rule_id = '3A70CEC0-392D-4BC1-A27C-06E63A0FD54F'
|
||||
|
||||
req = {'rule': {
|
||||
'compare_type': l7_rule.compare_type,
|
||||
'type': l7_rule.type,
|
||||
'value': l7_rule.value}}
|
||||
|
||||
resp = {'rule': {'id': l7_rule_id}}
|
||||
lbaas.create_lbaas_l7rule.return_value = resp
|
||||
|
||||
ret = cls._create_l7_rule(m_driver, l7_rule)
|
||||
lbaas.create_lbaas_l7rule.assert_called_once_with(
|
||||
l7_rule.l7policy_id, req)
|
||||
for attr in l7_rule.obj_fields:
|
||||
self.assertEqual(getattr(l7_rule, attr),
|
||||
getattr(ret, attr))
|
||||
self.assertEqual(l7_rule_id, ret.id)
|
||||
|
||||
def test_find_l7_rule(self):
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
|
||||
l7_rule = obj_lbaas.LBaaSL7Rule(
|
||||
compare_type='EQUAL_TO',
|
||||
l7policy_id='D4F35594-27EB-4F4C-930C-31DD40F53B77',
|
||||
type='HOST_NAME',
|
||||
value='www.test.com')
|
||||
|
||||
l7_rule_id = '3A70CEC0-392D-4BC1-A27C-06E63A0FD54F'
|
||||
resp = {'rules': [{'id': l7_rule_id}]}
|
||||
lbaas.list_lbaas_l7rules.return_value = resp
|
||||
|
||||
ret = cls._find_l7_rule(m_driver, l7_rule)
|
||||
lbaas.list_lbaas_l7rules.assert_called_once_with(
|
||||
l7_rule.l7policy_id,
|
||||
type=l7_rule.type,
|
||||
value=l7_rule.value,
|
||||
compare_type=l7_rule.compare_type)
|
||||
|
||||
for attr in l7_rule.obj_fields:
|
||||
self.assertEqual(getattr(l7_rule, attr),
|
||||
getattr(ret, attr))
|
||||
self.assertEqual(l7_rule_id, ret.id)
|
||||
|
||||
def test_find_l7_rule_not_found(self):
|
||||
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
|
||||
cls = d_lbaasv2.LBaaSv2Driver
|
||||
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
|
||||
l7_rule = obj_lbaas.LBaaSL7Rule(
|
||||
compare_type='EQUAL_TO',
|
||||
l7policy_id='D4F35594-27EB-4F4C-930C-31DD40F53B77',
|
||||
type='HOST_NAME',
|
||||
value='www.test.com')
|
||||
|
||||
resp = {'rules': []}
|
||||
lbaas.list_lbaas_l7rules.return_value = resp
|
||||
|
||||
ret = cls._find_l7_rule(m_driver, l7_rule)
|
||||
lbaas.list_lbaas_l7rules.assert_called_once_with(
|
||||
l7_rule.l7policy_id,
|
||||
type=l7_rule.type,
|
||||
value=l7_rule.value,
|
||||
compare_type=l7_rule.compare_type)
|
||||
self.assertIsNone(ret)
|
||||
|
@ -408,6 +408,37 @@ class FakeLBaaSDriver(drv_base.LBaaSDriver):
|
||||
def get_loadbalancer_pool_name(self, lb_name, namespace, svc_name):
|
||||
return "%s/%s/%s" % (lb_name, namespace, svc_name)
|
||||
|
||||
def ensure_l7_policy(self, namespace, route_name,
|
||||
loadbalancer, pool,
|
||||
listener_id):
|
||||
pass
|
||||
|
||||
def release_l7_policy(self, loadbalancer, l7_policy):
|
||||
pass
|
||||
|
||||
def ensure_l7_rule(self, loadbalancer, l7_policy, compare_type,
|
||||
type, value):
|
||||
pass
|
||||
|
||||
def release_l7_rule(self, loadbalancer, l7_rule):
|
||||
pass
|
||||
|
||||
def update_l7_rule(self, l7_rule, new_value):
|
||||
pass
|
||||
|
||||
def ensure_pool_attached_to_lb(self, loadbalancer, namespace,
|
||||
svc_name, protocol):
|
||||
pass
|
||||
|
||||
def get_pool_by_name(self, pool_name, project_id):
|
||||
pass
|
||||
|
||||
def get_lb_by_uuid(self, vip):
|
||||
pass
|
||||
|
||||
def is_pool_used_by_other_l7policies(l7policy, pool):
|
||||
pass
|
||||
|
||||
|
||||
class TestLoadBalancerHandler(test_base.TestCase):
|
||||
@mock.patch('kuryr_kubernetes.controller.drivers.base'
|
||||
|
@ -0,0 +1,112 @@
|
||||
# Copyright (c) 2018 RedHat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from kuryr_kubernetes.controller.ingress import ingress_ctl
|
||||
from kuryr_kubernetes.objects import lbaas as obj_lbaas
|
||||
from kuryr_kubernetes.tests import base as test_base
|
||||
import mock
|
||||
|
||||
|
||||
class TestIngressCtrlr(test_base.TestCase):
|
||||
def test_ingress_ctrlr_instance(self):
|
||||
ing_ctrl = ingress_ctl.IngressCtrlr.get_instance()
|
||||
self.assertIsNotNone(ing_ctrl)
|
||||
|
||||
@mock.patch('kuryr_kubernetes.config.CONF')
|
||||
def test_ingress_ctrlr_conf_disabled(self, m_cfg):
|
||||
m_cfg.kubernetes.enabled_handlers = ['not_ocproute']
|
||||
m_cfg.ingress.l7_router_uuid = '00EE9E11-91C2-41CF-8FD4-7970579E5C4C'
|
||||
ing_ctrl = ingress_ctl.IngressCtrlr.get_instance()
|
||||
ing_ctrl.start_operation()
|
||||
ret_l7router, ret_listener = ing_ctrl.get_router_and_listener()
|
||||
self.assertIsNotNone(ing_ctrl)
|
||||
self.assertIsNone(ret_l7router)
|
||||
self.assertIsNone(ret_listener)
|
||||
|
||||
@mock.patch('kuryr_kubernetes.config.CONF')
|
||||
def test_ingress_ctrlr_l7router_ip_not_defined(self, m_cfg):
|
||||
m_cfg.kubernetes.enabled_handlers = ['ocproute']
|
||||
m_cfg.ingress.l7_router_uuid = None
|
||||
ing_ctrl = ingress_ctl.IngressCtrlr.get_instance()
|
||||
ing_ctrl.start_operation()
|
||||
|
||||
ret_l7router, ret_listener = ing_ctrl.get_router_and_listener()
|
||||
self.assertIsNotNone(ing_ctrl)
|
||||
self.assertIsNone(ret_l7router)
|
||||
self.assertIsNone(ret_listener)
|
||||
|
||||
@mock.patch('eventlet.spawn')
|
||||
@mock.patch('kuryr_kubernetes.config.CONF')
|
||||
def test_ingress_ctrlr_router_enabled_k8s(self, m_cfg, m_eventlet):
|
||||
|
||||
m_cfg.kubernetes.enabled_handlers = ['ingresslb']
|
||||
m_cfg.ingress.l7_router_uuid = '00EE9E11-91C2-41CF-8FD4-7970579E5C4C'
|
||||
ing_ctrl = ingress_ctl.IngressCtrlr.get_instance()
|
||||
ing_ctrl.start_operation()
|
||||
self.assertIsNotNone(ing_ctrl)
|
||||
m_eventlet.assert_called_once()
|
||||
|
||||
@mock.patch('eventlet.spawn')
|
||||
@mock.patch('kuryr_kubernetes.config.CONF')
|
||||
def test_ingress_ctrlr_router_enabled_ocp(self, m_cfg, m_eventlet):
|
||||
|
||||
m_cfg.kubernetes.enabled_handlers = ['ocproute']
|
||||
m_cfg.ingress.l7_router_uuid = '00EE9E11-91C2-41CF-8FD4-7970579E5C4C'
|
||||
ing_ctrl = ingress_ctl.IngressCtrlr.get_instance()
|
||||
ing_ctrl.start_operation()
|
||||
self.assertIsNotNone(ing_ctrl)
|
||||
m_eventlet.assert_called_once()
|
||||
|
||||
@mock.patch('kuryr_kubernetes.controller.drivers.base'
|
||||
'.LBaaSDriver.get_instance')
|
||||
@mock.patch('kuryr_kubernetes.config.CONF')
|
||||
def test_ingress_ctrlr_router_created(self, m_cfg, m_get_lbaas_drv):
|
||||
|
||||
m_cfg.kubernetes.enabled_handlers = ['ocproute', 'ingresslb']
|
||||
m_cfg.ingress.l7_router_uuid = '00EE9E11-91C2-41CF-8FD4-7970579E5C4C'
|
||||
|
||||
l7_router = obj_lbaas.LBaaSLoadBalancer(
|
||||
name='TEST_NAME', project_id='TEST_PROJECT', ip='1.2.3.4',
|
||||
subnet_id='D3FA400A-F543-4B91-9CD3-047AF0CE42D1',
|
||||
security_groups=[],
|
||||
id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C')
|
||||
|
||||
m_driver = mock.Mock()
|
||||
m_driver.get_lb_by_uuid.return_value = l7_router
|
||||
m_get_lbaas_drv.return_value = m_driver
|
||||
|
||||
ing_ctrl = ingress_ctl.IngressCtrlr.get_instance()
|
||||
ing_ctrl._start_operation_impl()
|
||||
self.assertIsNotNone(ing_ctrl)
|
||||
self.assertEqual(ing_ctrl._status, 'ACTIVE')
|
||||
ret_l7router, ret_listener = ing_ctrl.get_router_and_listener()
|
||||
self.assertEqual(ret_l7router, l7_router)
|
||||
|
||||
@mock.patch('kuryr_kubernetes.controller.drivers.base'
|
||||
'.LBaaSDriver.get_instance')
|
||||
@mock.patch('kuryr_kubernetes.config.CONF')
|
||||
def test_ingress_ctrlr_router_l7_router_drv_fail(
|
||||
self, m_cfg, m_get_lbaas_drv):
|
||||
|
||||
m_cfg.ingress.l7_router_uuid = '00EE9E11-91C2-41CF-8FD4-7970579E5C4C'
|
||||
m_cfg.kubernetes.enabled_handlers = ['ocproute', 'ingresslb']
|
||||
|
||||
m_driver = mock.Mock()
|
||||
m_driver.get_lb_by_uuid.return_value = None
|
||||
m_get_lbaas_drv.return_value = m_driver
|
||||
|
||||
ing_ctrl = ingress_ctl.IngressCtrlr.get_instance()
|
||||
ing_ctrl._start_operation_impl()
|
||||
self.assertEqual(ing_ctrl._status, 'DOWN')
|
||||
self.assertIsNotNone(ing_ctrl)
|
Loading…
Reference in New Issue
Block a user