Merge "Remove openshift routes(Ingress) support"

This commit is contained in:
Zuul 2020-02-05 16:49:38 +00:00 committed by Gerrit Code Review
commit 9f33c5f839
39 changed files with 0 additions and 2586 deletions

View File

@ -144,18 +144,6 @@
KURYR_K8S_SERIAL_TESTS: True
tempest_concurrency: 1
- job:
name: kuryr-kubernetes-tempest-openshift-ingress
parent: kuryr-kubernetes-tempest-openshift
description: |
Kuryr-Kubernetes tempest job using ingress controller and OpenShift
vars:
devstack_localrc:
KURYR_ENABLE_INGRESS: true
KURYR_ENABLED_HANDLERS: vif,lb,lbaasspec,namespace,pod_label,policy,kuryrnetpolicy,ocproute,ingresslb
voting: false
- job:
name: kuryr-kubernetes-tempest-containerized-crio
parent: kuryr-kubernetes-tempest-containerized

View File

@ -36,7 +36,6 @@
- kuryr-kubernetes-tempest-containerized-openshift-serial
- kuryr-kubernetes-tempest-ovn
- kuryr-kubernetes-tempest-openshift
- kuryr-kubernetes-tempest-openshift-ingress
- kuryr-kubernetes-tempest-openshift-multi-vif
- kuryr-kubernetes-tempest-multinode-ha
- kuryr-kubernetes-tempest-containerized-crio

View File

@ -137,20 +137,6 @@ enable_service kuryr-kubernetes
# Since Rocky release this is a default deployment configuration.
enable_service kuryr-daemon
# Kuryr enable L7 routing
# ========================
#
# Uncomment the next line to enable the L7 Routing/Ingress controller
#
#KURYR_ENABLE_INGRESS=True
# Kuryr L7 router/lb name
# ========================
#
# Edit the next line to change L7 Router/LB name
#
#KURYR_L7_ROUTER_NAME=kuryr-l7-router
# Containerized Kuryr
# ===================
#

View File

@ -915,60 +915,6 @@ function run_kuryr_daemon {
run_process kuryr-daemon "$daemon_bin --config-file $KURYR_CONFIG" root root
}
function create_ingress_l7_router {
local lb_port_id
local lb_name
local project_id
local max_timeout
local lb_vip
local fake_svc_name
local l7_router_fip
local project_id
local lb_uuid
lb_name=${KURYR_L7_ROUTER_NAME}
max_timeout=1000
project_id=$(get_or_create_project \
"$KURYR_NEUTRON_DEFAULT_PROJECT" default)
create_load_balancer "$lb_name" "$KURYR_NEUTRON_DEFAULT_SERVICE_SUBNET" "$project_id"
wait_for_lb $lb_name $max_timeout
lb_port_id="$(get_loadbalancer_attribute "$lb_name" "vip_port_id")"
#allocate FIP and bind it to lb vip
l7_router_fip=$(openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" \
floating ip create --project "$project_id" \
--subnet "$KURYR_NEUTRON_DEFAULT_EXT_SVC_SUBNET" \
"$KURYR_NEUTRON_DEFAULT_EXT_SVC_NET" \
-f value -c floating_ip_address)
openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" \
floating ip set --port "$lb_port_id" "$l7_router_fip"
lb_uuid="$(get_loadbalancer_attribute "$lb_name" "id")"
iniset "$KURYR_CONFIG" ingress l7_router_uuid "$lb_uuid"
#in case tempest enabled, update router's FIP in tempest.conf
if is_service_enabled tempest; then
iniset $TEMPEST_CONFIG kuryr_kubernetes ocp_router_fip "$l7_router_fip"
fi
if is_service_enabled octavia; then
echo -n "Octavia: no need to create fake k8s service for Ingress."
else
# keep fake an endpoint less k8s service to keep Kubernetes API server
# from allocating ingress LB vip
fake_svc_name='kuryr-svc-ingress'
echo -n "LBaaS: create fake k8s service: $fake_svc_name for Ingress."
lb_vip="$(get_loadbalancer_attribute "$lb_name" "vip_address")"
create_k8s_fake_service $fake_svc_name $lb_vip
fi
}
function configure_overcloud_vm_k8s_svc_sg {
local dst_port
@ -1184,12 +1130,6 @@ elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then
if is_service_enabled kuryr-kubernetes; then
if is_service_enabled octavia; then
create_k8s_api_service
#create Ingress L7 router if required
enable_ingress=$(trueorfalse False KURYR_ENABLE_INGRESS)
if [ "$enable_ingress" == "True" ]; then
create_ingress_l7_router
fi
fi
# FIXME(dulek): This is a very late phase to start Kuryr services.

View File

@ -97,12 +97,6 @@ KURYR_CONTROLLER_HA_PORT=${KURYR_CONTROLLER_HA_PORT:-16401}
KURYR_CONTROLLER_REPLICAS=${KURYR_CONTROLLER_REPLICAS:-1}
KURYR_FORCE_IMAGE_BUILD=${KURYR_FORCE_IMAGE_BUILD:-False}
# Kuryr ingress enable
KURYR_ENABLE_INGRESS=${KURYR_ENABLE_INGRESS:-False}
# Kuryr L7 router's name
KURYR_L7_ROUTER_NAME=${KURYR_L7_ROUTER_NAME:-kuryr-l7-router}
# Whether to use lower-constraints.txt when installing dependencies.
KURYR_CONTAINERS_USE_LOWER_CONSTRAINTS=${KURYR_CONTAINERS_USE_LOWER_CONSTRAINTS:-False}

View File

@ -42,7 +42,6 @@ Design documents
vif_handler_drivers_design
health_manager
kuryr_kubernetes_ingress_design
kuryr_kubernetes_ocp_route_design
high_availability
kuryr_kubernetes_versions
port_crd_usage

View File

@ -1,167 +0,0 @@
..
This work is licensed under a Creative Commons Attribution 3.0 Unported
License.
http://creativecommons.org/licenses/by/3.0/legalcode
Convention for heading levels in Neutron devref:
======= Heading 0 (reserved for the title in a document)
------- Heading 1
~~~~~~~ Heading 2
+++++++ Heading 3
''''''' Heading 4
(Avoid deeper levels because they do not render well.)
====================================================
Kuryr Kubernetes Openshift Routes integration design
====================================================
Purpose
-------
The purpose of this document is to present how Openshift Routes are supported
by kuryr-kubernetes.
Overview
--------
`OpenShift Origin`_ is an open source cloud application development and
hosting platform that automates the provisioning, management and scaling
of applications.
OpenShift Origin is a distribution of Kubernetes optimized for continuous
application development and multi-tenancy deployment. OpenShift adds developer
and operations-centric tools on top of Kubernetes to enable rapid application
development, easy deployment and scaling, and long-term lifecycle maintenance.
The `OpenShift Route`_ exposes a Service at a host name, like www.example.com,
so that external clients can reach it by name.
The Route is an Openshift resource that defines the rules you want to apply to
incoming connections.
The Openshift Routes concept was `introduced before Ingress`_ was supported by
kubernetes, the Openshift Route matches the functionality of kubernetes Ingress.
Proposed Solution
-----------------
The solution will rely on L7 router, Service/Endpoints handler and L7 router
driver components described at kuryr-kubernetes Ingress integration design,
where a new component - OCP-Route handler, will satisfy requests for Openshift
Route resources.
Controller Handlers impact:
---------------------------
The controller handlers should be extended to support OCP-Route resource.
The OCP-Route handler
~~~~~~~~~~~~~~~~~~~~~
The OCP-Route handler watches the apiserver's for updates to Openshift
Route resources.
The following scheme describes OCP-Route controller SW architecture:
.. image:: ../../images/kuryr_k8s_ocp_route_ctrl_sw.svg
:alt: Ingress/OCP-Route controllers SW architecture
:align: center
:width: 100%
Similar to Kubernetes Ingress, each OCP-Route object being translated to a L7
policy in L7 router, and the rules on OCP-Route become L7 (URL) mapping rules
in that L7 policy. The L7 policy is configured to forward the filtered traffic
to LbaaS Pool. The LbaaS pool represents an Endpoints resource, and it's the
Service/Endpoints handler responsibility to attach all its members to this
pool. Since the Endpoints resource is not aware of changes in OCP-Route objects
pointing to it, the OCP-Route handler should trigger this notification, the
notification will be implemented using annotation of the relevant Endpoint
resource.
Use cases examples
~~~~~~~~~~~~~~~~~~
This section describes in details the following scenarios:
A. Create OCP-Route, create Service/Endpoints.
B. Create Service/Endpoints, create OCP-Route, delete OCP-Route.
* Create OCP-Route, create Service/Endpoints:
* OCP-Route is created under namespace 'mynamespace'
* OCP-Route details :
.. code-block:: yaml
apiVersion: v1
kind: Route
metadata:
name: test
spec:
host: www.example.com
to:
kind: Service
name: s1
* Since it's the first route pointing to this Service, the OCP-Route
handler will create LbaaS pool (attached to L7 router)- named
'mynamespace_s1'.
* The OCP-Route handler will create L7 rule and L7 policy, the L7
policy direct it's filtered traffic towards 'mynamespace_s1' pool.
* Service/Endpoints is created under namespace 'mynamespace'
* name: s1
* The Service/Endpoints handler will create user loadbalancer
* The Service/Endpoints handler will check for pool named
'mynamespace_s1' and add its members to this pool.
* Create Service/Endpoints, create OCP-Route, delete OCP-Route:
* Service/Endpoints is created under namespace 'mynamespace'
* name: s1
* The Service/Endpoints handler will create user loadbalancer
* Since no pool named 'mynamespace_s1' exist in L7 router,
Service will exit.
* OCP-Route is created with same details as described in above yaml file.
* Since it's the first route pointing to this Service, the OCP-Route
handler will create LbaaS pool (attached to L7 router) named
'mynamespace_s1'.
* The OCP-Route handler will create L7 rule and L7 policy, the L7 policy
configured to direct its filtered traffic towards 'mynamespace_s1' pool.
* The last step from OCP-Route handler will be to notify
(using annotation) s1 Endpoint.
* As a result to the OCP-Route notification, the Endpoint handler will
be called.
The Service/Endpoints handler will update the members information
attached to 'mynamespace_s1' pool.
* OCP-Route is deleted
* OCP-Route handler will first delete L7 rule and L7 policy.
* In case no other L7 policy is pointing 'mynamespace_s1' pool, the
OCP-Route handler will delete 'mynamespace_s1' pool's members and the pool
itself. The last step from Ingress handler will be to notify s1
Service/Endpoints.
* As a result to the OCP-Route handler notification, the Service/Endpoints
handler will set its internal state to 'no Ingress is pointing' state.
.. _OpenShift Origin: https://www.openshift.org/
.. _OpenShift Route: https://docs.openshift.com/enterprise/3.0/architecture/core_concepts/routes.html
.. _introduced before Ingress: https://kubernetes.io/docs/concepts/Services-networking/ingress/

View File

@ -42,7 +42,6 @@ This section describes how you can install and configure kuryr-kubernetes
testing_connectivity
testing_nested_connectivity
containerized
ocp_route
multi_vif_with_npwg_spec
sriov
testing_udp_services

View File

@ -1,161 +0,0 @@
===============================
Enable OCP-Router functionality
===============================
To enable OCP-Router functionality we should set the following:
- Setting L7 Router.
- Configure Kuryr to support L7 Router and OCP-Route resources.
Setting L7 Router
------------------
The L7 Router is the ingress point for the external traffic destined for
services in the K8S/OCP cluster. The next steps are needed for setting the L7
Router:
#. Create LoadBalancer that will run the L7 loadbalancing:
.. code-block:: console
$ openstack loadbalancer create --name kuryr-l7-router --vip-subnet-id k8s-service-subnet
+---------------------+--------------------------------------+
| Field | Value |
+---------------------+--------------------------------------+
| admin_state_up | True |
| created_at | 2018-06-28T06:34:15 |
| description | |
| flavor | |
| id | 99f580e6-d894-442a-bc5f-4d14b41e10d2 |
| listeners | |
| name | kuryr-l7-router |
| operating_status | OFFLINE |
| pools | |
| project_id | 24042703aba141b89217e098e495cea1 |
| provider | amphora |
| provisioning_status | PENDING_CREATE |
| updated_at | None |
| vip_address | 10.0.0.171 |
| vip_network_id | 65875d24-5a54-43fb-91a7-087e956deb1a |
| vip_port_id | 42c6062a-644a-4004-a4a6-5a88bf596196 |
| vip_qos_policy_id | None |
| vip_subnet_id | 01f21201-65a3-4bc5-a7a8-868ccf4f0edd |
+---------------------+--------------------------------------+
$
#. Create floating IP address that should be accessible from external network:
.. code-block:: console
$ openstack floating ip create --subnet public-subnet public
+---------------------+--------------------------------------+
| Field | Value |
+---------------------+--------------------------------------+
| created_at | 2018-06-28T06:31:36Z |
| description | |
| dns_domain | None |
| dns_name | None |
| fixed_ip_address | None |
| floating_ip_address | 172.24.4.3 |
| floating_network_id | 3371c2ba-edb5-45f2-a589-d35080177311 |
| id | c971f6d3-ba63-4318-a9e7-43cbf85437c2 |
| name | 172.24.4.3 |
| port_details | None |
| port_id | None |
| project_id | 24042703aba141b89217e098e495cea1 |
| qos_policy_id | None |
| revision_number | 0 |
| router_id | None |
| status | DOWN |
| subnet_id | 939eeb1f-20b8-4185-a6b1-6477fbe73409 |
| tags | [] |
| updated_at | 2018-06-28T06:31:36Z |
+---------------------+--------------------------------------+
$
#. Bind the floating IP to LB vip:
.. code-block:: console
[stack@gddggd devstack]$ openstack floating ip set --port 42c6062a-644a-4004-a4a6-5a88bf596196 172.24.4.3
Configure Kuryr to support L7 Router and OCP-Route resources
------------------------------------------------------------
#. Configure the L7 Router by adding the LB UUID at kuryr.conf:
.. code-block:: ini
[ingress]
l7_router_uuid = 99f580e6-d894-442a-bc5f-4d14b41e10d2
#. Enable the ocp-route and k8s-endpoint handlers. For that you need to add
this handlers to the enabled handlers list at kuryr.conf (details on how to
edit this for containerized deployment can be found at
:doc:`./devstack/containerized`):
.. code-block:: ini
[kubernetes]
enabled_handlers=vif,lb,lbaasspec,ocproute,ingresslb
Note: you need to restart the kuryr controller after applying the above
detailed steps. For devstack non-containerized deployments:
.. code-block:: console
$ sudo systemctl restart devstack@kuryr-kubernetes.service
And for containerized deployments:
.. code-block:: console
$ kubectl -n kube-system get pod | grep kuryr-controller
$ kubectl -n kube-system delete pod KURYR_CONTROLLER_POD_NAME
For directly enabling both L7 router and OCP-Route handlers when deploying
with devstack, you just need to add the following at local.conf file:
.. code-block:: bash
KURYR_ENABLE_INGRESS=True
KURYR_ENABLED_HANDLERS=vif,lb,lbaasspec,ocproute,ingresslb
Testing OCP-Route functionality
-------------------------------
#. Create a service:
.. code-block:: console
$ oc run --image=celebdor/kuryr-demo kuryr-demo
$ oc scale dc/kuryr-demo --replicas=2
$ oc expose dc/kuryr-demo --port 80 --target-port 8080
#. Create a Route object pointing to above service (kuryr-demo):
.. code-block:: console
$ cat >> route.yaml << EOF
> apiVersion: v1
> kind: Route
> metadata:
> name: testroute
> spec:
> host: www.firstroute.com
> to:
> kind: Service
> name: kuryr-demo
> EOF
$ oc create -f route.yaml
#. Curl L7 router's FIP using specified hostname:
.. code-block:: console
$ curl --header 'Host: www.firstroute.com' 172.24.4.3
kuryr-demo-1-gzgj2: HELLO, I AM ALIVE!!!
$

View File

@ -263,11 +263,6 @@ cache_defaults = [
default="dogpile.cache.memory"),
]
ingress = [
cfg.StrOpt('l7_router_uuid',
help=_("UUID of the L7 Router")),
]
nested_vif_driver_opts = [
cfg.StrOpt('worker_nodes_subnet',
help=_("Neutron subnet ID for k8s worker node vms."),
@ -325,7 +320,6 @@ CONF.register_opts(k8s_opts, group='kubernetes')
CONF.register_opts(neutron_defaults, group='neutron_defaults')
CONF.register_opts(octavia_defaults, group='octavia_defaults')
CONF.register_opts(cache_defaults, group='cache_defaults')
CONF.register_opts(ingress, group='ingress')
CONF.register_opts(nested_vif_driver_opts, group='pod_vif_nested')
CONF.register_opts(sriov_opts, group='sriov')

View File

@ -43,10 +43,6 @@ K8S_ANNOTATION_LBAAS_SPEC = K8S_ANNOTATION_PREFIX + '-lbaas-spec'
K8S_ANNOTATION_LBAAS_STATE = K8S_ANNOTATION_PREFIX + '-lbaas-state'
K8S_ANNOTATION_NET_CRD = K8S_ANNOTATION_PREFIX + '-net-crd'
K8S_ANNOTATION_NETPOLICY_CRD = K8S_ANNOTATION_PREFIX + '-netpolicy-crd'
K8S_ANNOTATION_LBAAS_RT_STATE = K8S_ANNOTATION_PREFIX + '-lbaas-route-state'
K8S_ANNOTATION_LBAAS_RT_NOTIF = K8S_ANNOTATION_PREFIX + '-lbaas-route-notif'
K8S_ANNOTATION_ROUTE_STATE = K8S_ANNOTATION_PREFIX + '-route-state'
K8S_ANNOTATION_ROUTE_SPEC = K8S_ANNOTATION_PREFIX + '-route-spec'
K8S_ANNOTATION_NPWG_PREFIX = 'k8s.v1.cni.cncf.io'
K8S_ANNOTATION_NPWG_NETWORK = K8S_ANNOTATION_NPWG_PREFIX + '/networks'
@ -65,7 +61,6 @@ CNI_EXCEPTION_CODE = 100
CNI_TIMEOUT_CODE = 200
KURYR_PORT_NAME = 'kuryr-pool-port'
KURYR_L7_ROUTER_HTTP_PORT = '80'
KURYR_VIF_TYPE_SRIOV = 'sriov'
OCTAVIA_L2_MEMBER_MODE = "L2"

View File

@ -631,91 +631,6 @@ class LBaaSDriver(DriverBase):
"""
raise NotImplementedError()
@abc.abstractmethod
def get_lb_by_uuid(self, lb_uuid):
"""Get loadbalancer by loadbalancer uuid.
:param lb_uuid: Loadbalancer uuid
"""
raise NotImplementedError()
@abc.abstractmethod
def get_pool_by_name(self, pool_name, project_id):
"""Get pool by pool's name.
:param pool_name: the pool name
:param project_id: project id
"""
raise NotImplementedError()
@abc.abstractmethod
def ensure_l7_policy(self, namespace, route_name, loadbalancer,
pool, listener_id):
"""Get or create L7 policy.
:param namespace: ocp-route/k8s-ingress namespace
:param route_name: ocp-route/k8s-ingress name
:param loadbalancer: `LBaaSLoadBalancer` object
:param pool: L7 policy's target pool
:param listener_id: ID of listener to attach L7policy to
"""
raise NotImplementedError()
@abc.abstractmethod
def release_l7_policy(self, loadbalancer, l7_policy):
"""Release l7policy.
:param loadbalancer: `LBaaSLoadBalancer` object
:param l7_policy: `LBaaSL7Policy` object
"""
raise NotImplementedError()
@abc.abstractmethod
def ensure_l7_rule(self, loadbalancer, l7_policy, compare_type,
type, value):
"""Get or create L7 rule.
:param loadbalancer: `LBaaSLoadBalancer` object
:param l7_policy: `LBaaSL7Policy` object
:param compare_type: comparison type for the L7 rule.
:param type: the L7 rule type
:param value:the value to use for the comparison.
"""
raise NotImplementedError()
@abc.abstractmethod
def release_l7_rule(self, loadbalancer, l7_rule):
"""Release L7 rule.
:param loadbalancer: `LBaaSLoadBalancer` object
:param l7_rule: `LBaaSL7Rule` object
"""
raise NotImplementedError()
@abc.abstractmethod
def update_l7_rule(self, l7_rule, new_value):
"""Update L7 rule value.
:param l7_rule: `LBaaSL7Rule` object
:param new_value: rule's new value
"""
raise NotImplementedError()
@abc.abstractmethod
def is_pool_used_by_other_l7policies(self, l7policy, pool):
"""Checks if pool used by other L7policy.
:param l7policy: `LBaaSL7Policy` object
:param pool: `LBaaSPool` object
"""
raise NotImplementedError()
@abc.abstractmethod
def update_lbaas_sg(self, service, sgs):
"""Update security group rules associated to the loadbalancer

View File

@ -20,8 +20,6 @@ import time
import requests
from openstack import exceptions as os_exc
from openstack.load_balancer.v2 import l7_policy as o_l7p
from openstack.load_balancer.v2 import l7_rule as o_l7r
from openstack.load_balancer.v2 import listener as o_lis
from openstack.load_balancer.v2 import load_balancer as o_lb
from openstack.load_balancer.v2 import member as o_mem
@ -43,7 +41,6 @@ CONF = cfg.CONF
LOG = logging.getLogger(__name__)
_ACTIVATION_TIMEOUT = CONF.neutron_defaults.lbaas_activation_timeout
_L7_POLICY_ACT_REDIRECT_TO_POOL = 'REDIRECT_TO_POOL'
# NOTE(yboaron):Prior to sending create request to Octavia, LBaaS driver
# verifies that LB is in a stable state by polling LB's provisioning_status
# using backoff timer.
@ -905,152 +902,6 @@ class LBaaSv2Driver(base.LBaaSDriver):
return None
def get_lb_by_uuid(self, lb_uuid):
lbaas = clients.get_loadbalancer_client()
try:
response = lbaas.get_load_balancer(lb_uuid)
except os_exc.NotFoundException:
LOG.debug("Couldn't find loadbalancer with uuid=%s", lb_uuid)
return None
return obj_lbaas.LBaaSLoadBalancer(
id=response.id,
port_id=response.vip_port_id,
name=response.name,
project_id=response.project_id,
subnet_id=response.vip_subnet_id,
ip=response.vip_address,
security_groups=None,
provider=response.provider)
def get_pool_by_name(self, pool_name, project_id):
lbaas = clients.get_loadbalancer_client()
# NOTE(yboaron): pool_name should be constructed using
# get_loadbalancer_pool_name function, which means that pool's name
# is unique
pools = lbaas.pools(project_id=project_id)
for entry in pools:
if not entry:
continue
if entry.name == pool_name:
listener_id = (entry.listeners[0].id if
entry.listeners else None)
return obj_lbaas.LBaaSPool(
name=entry.name, project_id=entry.project_id,
loadbalancer_id=entry.loadbalancers[0].id,
listener_id=listener_id,
protocol=entry.protocol, id=entry.id)
return None
def ensure_l7_policy(self, namespace, route_name,
loadbalancer, pool,
listener_id):
name = namespace + route_name
l7_policy = obj_lbaas.LBaaSL7Policy(name=name,
project_id=pool.project_id,
listener_id=listener_id,
redirect_pool_id=pool.id)
return self._ensure_provisioned(
loadbalancer, l7_policy, self._create_l7_policy,
self._find_l7_policy)
def release_l7_policy(self, loadbalancer, l7_policy):
lbaas = clients.get_loadbalancer_client()
self._release(
loadbalancer, l7_policy, lbaas.delete_l7_policy,
l7_policy.id)
def _create_l7_policy(self, l7_policy):
request = {
'action': _L7_POLICY_ACT_REDIRECT_TO_POOL,
'listener_id': l7_policy.listener_id,
'name': l7_policy.name,
'project_id': l7_policy.project_id,
'redirect_pool_id': l7_policy.redirect_pool_id,
}
self.add_tags('l7policy', request)
response = self._post_lb_resource(o_l7p.L7Policy, request)
l7_policy.id = response['id']
return l7_policy
def _find_l7_policy(self, l7_policy):
lbaas = clients.get_loadbalancer_client()
response = lbaas.l7_policies(
name=l7_policy.name,
project_id=l7_policy.project_id,
redirect_pool_id=l7_policy.redirect_pool_id,
listener_id=l7_policy.listener_id)
try:
l7_policy.id = next(response).id
except (KeyError, StopIteration):
return None
return l7_policy
def ensure_l7_rule(self, loadbalancer, l7_policy, compare_type,
type, value):
l7_rule = obj_lbaas.LBaaSL7Rule(
compare_type=compare_type, l7policy_id=l7_policy.id,
type=type, value=value)
return self._ensure_provisioned(
loadbalancer, l7_rule, self._create_l7_rule,
self._find_l7_rule)
def _create_l7_rule(self, l7_rule):
request = {
'compare_type': l7_rule.compare_type,
'type': l7_rule.type,
'value': l7_rule.value
}
self.add_tags('rule', request)
response = self._post_lb_resource(o_l7r.L7Rule, request,
l7policy_id=l7_rule.l7policy_id)
l7_rule.id = response['id']
return l7_rule
def _find_l7_rule(self, l7_rule):
lbaas = clients.get_loadbalancer_client()
response = lbaas.l7_rules(
l7_rule.l7policy_id,
type=l7_rule.type,
value=l7_rule.value,
compare_type=l7_rule.compare_type)
try:
l7_rule.id = next(response).id
except (KeyError, StopIteration):
return None
return l7_rule
def release_l7_rule(self, loadbalancer, l7_rule):
lbaas = clients.get_loadbalancer_client()
self._release(
loadbalancer, l7_rule, lbaas.delete_l7_rule,
l7_rule.id, l7_rule.l7policy_id)
def update_l7_rule(self, l7_rule, new_value):
lbaas = clients.get_loadbalancer_client()
try:
lbaas.update_l7_rule(
l7_rule.id, l7_rule.l7policy_id,
value=new_value)
except os_exc.SDKException:
LOG.exception("Failed to update l7_rule- id=%s ", l7_rule.id)
raise
def is_pool_used_by_other_l7policies(self, l7policy, pool):
lbaas = clients.get_loadbalancer_client()
l7policy_list = lbaas.l7_policies(project_id=l7policy.project_id)
for entry in l7policy_list:
if not entry:
continue
if (entry.redirect_pool_id == pool.id and
entry.id != l7policy.id):
return True
return False
def update_lbaas_sg(self, service, sgs):
LOG.debug('Setting SG for LBaaS VIP port')

View File

@ -1,213 +0,0 @@
# Copyright (c) 2018 RedHat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils
from kuryr_kubernetes import clients
from kuryr_kubernetes import config
from kuryr_kubernetes import constants as k_const
from kuryr_kubernetes.controller.drivers import base as drv_base
from kuryr_kubernetes.controller.handlers import lbaas as h_lbaas
from kuryr_kubernetes.controller.ingress import ingress_ctl
from kuryr_kubernetes.objects import lbaas as obj_lbaas
from kuryr_kubernetes import utils
LOG = logging.getLogger(__name__)
class IngressLoadBalancerHandler(h_lbaas.LoadBalancerHandler):
"""IngressLoadBalancerHandler handles K8s Endpoints events.
IngressLoadBalancerHandler handles K8s Endpoints events and tracks
changes in LBaaSServiceSpec to update Ingress Controller
L7 router accordingly.
"""
OBJECT_KIND = k_const.K8S_OBJ_ENDPOINTS
OBJECT_WATCH_PATH = "%s/%s" % (k_const.K8S_API_BASE, "endpoints")
def __init__(self):
super(IngressLoadBalancerHandler, self).__init__()
self._drv_lbaas = drv_base.LBaaSDriver.get_instance()
self._l7_router = None
def _should_ignore(self, endpoints, lbaas_spec):
return not(lbaas_spec and
self._has_pods(endpoints))
def on_present(self, endpoints):
if not self._l7_router:
ing_ctl = ingress_ctl.IngressCtrlr.get_instance()
self._l7_router, listener = ing_ctl.get_router_and_listener()
if not self._l7_router:
LOG.info("No L7 router found - do nothing")
return
lbaas_spec = utils.get_lbaas_spec(endpoints)
if self._should_ignore(endpoints, lbaas_spec):
return
pool_name = self._drv_lbaas.get_loadbalancer_pool_name(
self._l7_router, endpoints['metadata']['namespace'],
endpoints['metadata']['name'])
pool = self._drv_lbaas.get_pool_by_name(pool_name,
self._l7_router.project_id)
if not pool:
if self._get_lbaas_route_state(endpoints):
self._set_lbaas_route_state(endpoints, None)
LOG.debug("L7 routing: no route defined for service "
":%s - do nothing", endpoints['metadata']['name'])
else:
# pool was found in L7 router LB ,verify that members are up2date
lbaas_route_state = self._get_lbaas_route_state(endpoints)
if not lbaas_route_state:
lbaas_route_state = obj_lbaas.LBaaSRouteState()
lbaas_route_state.pool = pool
if self._sync_lbaas_route_members(endpoints,
lbaas_route_state, lbaas_spec):
self._set_lbaas_route_state(endpoints, lbaas_route_state)
self._clear_route_notification(endpoints)
def on_deleted(self, endpoints):
if not self._l7_router:
LOG.info("No L7 router found - do nothing")
return
lbaas_route_state = self._get_lbaas_route_state(endpoints)
if not lbaas_route_state:
return
self._remove_unused_route_members(endpoints, lbaas_route_state,
obj_lbaas.LBaaSServiceSpec())
def _sync_lbaas_route_members(self, endpoints,
lbaas_route_state, lbaas_spec):
changed = False
if self._remove_unused_route_members(
endpoints, lbaas_route_state, lbaas_spec):
changed = True
if self._add_new_route_members(endpoints, lbaas_route_state):
changed = True
return changed
def _add_new_route_members(self, endpoints, lbaas_route_state):
changed = False
current_targets = {(str(m.ip), m.port)
for m in lbaas_route_state.members}
for subset in endpoints.get('subsets', []):
subset_ports = subset.get('ports', [])
for subset_address in subset.get('addresses', []):
try:
target_ip = subset_address['ip']
target_ref = subset_address['targetRef']
if target_ref['kind'] != k_const.K8S_OBJ_POD:
continue
except KeyError:
continue
for subset_port in subset_ports:
target_port = subset_port['port']
if (target_ip, target_port) in current_targets:
continue
# TODO(apuimedo): Do not pass subnet_id at all when in
# L3 mode once old neutron-lbaasv2 is not supported, as
# octavia does not require it
if (config.CONF.octavia_defaults.member_mode ==
k_const.OCTAVIA_L2_MEMBER_MODE):
member_subnet_id = self._get_pod_subnet(target_ref,
target_ip)
else:
# We use the service subnet id so that the connectivity
# from VIP to pods happens in layer 3 mode, i.e.,
# routed.
member_subnet_id = self._l7_router.subnet_id
member = self._drv_lbaas.ensure_member(
loadbalancer=self._l7_router,
pool=lbaas_route_state.pool,
subnet_id=member_subnet_id,
ip=target_ip,
port=target_port,
target_ref_namespace=target_ref['namespace'],
target_ref_name=target_ref['name'])
lbaas_route_state.members.append(member)
changed = True
return changed
def _remove_unused_route_members(
self, endpoints, lbaas_route_state, lbaas_spec):
spec_port_names = {p.name for p in lbaas_spec.ports}
current_targets = {(a['ip'], p['port'])
for s in endpoints['subsets']
for a in s['addresses']
for p in s['ports']
if p.get('name') in spec_port_names}
removed_ids = set()
for member in lbaas_route_state.members:
if (str(member.ip), member.port) in current_targets:
continue
self._drv_lbaas.release_member(self._l7_router, member)
removed_ids.add(member.id)
if removed_ids:
lbaas_route_state.members = [
m for m in lbaas_route_state.members
if m.id not in removed_ids]
return bool(removed_ids)
def _set_lbaas_route_state(self, endpoints, route_state):
if route_state is None:
LOG.debug("Removing LBaaSRouteState annotation: %r", route_state)
annotation = None
else:
route_state.obj_reset_changes(recursive=True)
LOG.debug("Setting LBaaSRouteState annotation: %r", route_state)
annotation = jsonutils.dumps(route_state.obj_to_primitive(),
sort_keys=True)
k8s = clients.get_kubernetes_client()
k8s.annotate(endpoints['metadata']['selfLink'],
{k_const.K8S_ANNOTATION_LBAAS_RT_STATE: annotation},
resource_version=endpoints['metadata']['resourceVersion'])
def _get_lbaas_route_state(self, endpoints):
try:
annotations = endpoints['metadata']['annotations']
annotation = annotations[k_const.K8S_ANNOTATION_LBAAS_RT_STATE]
except KeyError:
return None
obj_dict = jsonutils.loads(annotation)
obj = obj_lbaas.LBaaSRouteState.obj_from_primitive(obj_dict)
LOG.debug("Got LBaaSRouteState from annotation: %r", obj)
return obj
def _clear_route_notification(self, endpoints):
try:
annotations = endpoints['metadata']['annotations']
annotation = annotations[
k_const.K8S_ANNOTATION_LBAAS_RT_NOTIF]
except KeyError:
return
LOG.debug("Removing LBaaSRouteNotifier annotation")
annotation = None
k8s = clients.get_kubernetes_client()
k8s.annotate(
endpoints['metadata']['selfLink'],
{k_const.K8S_ANNOTATION_LBAAS_RT_NOTIF: annotation},
resource_version=endpoints['metadata']['resourceVersion'])

View File

@ -1,159 +0,0 @@
# Copyright (c) 2018 RedHat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from kuryr_kubernetes import config
from kuryr_kubernetes import constants as k_const
from kuryr_kubernetes.controller.drivers import base as drv_base
from kuryr_kubernetes import exceptions
from oslo_log import log as logging
_OCP_ROUTE_HANDLER = 'ocproute'
_INGRESS_LB_HANDLER = 'ingresslb'
_ROUTER_POLL_INTERVAL = 10
# NOTE(yboaron): LoadBalancers creation at Devstack is very slow, could take
# up to 20 minutes
_ROUTER_MANUAL_CREATION_TIMEOUT = 1200
LOG = logging.getLogger(__name__)
class L7Router(object):
"""L7Router is responsible for create/verify L7 LoadBalancer entity."""
def __init__(self, router_uuid):
# Note(yboaron) the LBaaS driver is used as the L7 router driver
self._drv_l7_router = drv_base.LBaaSDriver.get_instance()
self._l7_router_uuid = router_uuid
self._l7_router_listeners = None
self._router_lb = None
def ensure_router(self):
# retrieve router details
self._router_lb = self._drv_l7_router.get_lb_by_uuid(
self._l7_router_uuid)
if not self._router_lb:
LOG.error("Failed to retrieve L7_Router (UUID=%s)",
self._l7_router_uuid)
raise exceptions.IngressControllerFailure
# verify that loadbalancer is active
try:
self._drv_l7_router._wait_for_provisioning(
self._router_lb, _ROUTER_MANUAL_CREATION_TIMEOUT)
except exceptions.ResourceNotReady as e:
LOG.error("Timed out waiting for L7 router to appear in "
"ACTIVE state: %s.", e)
raise exceptions.IngressControllerFailure
LOG.info("Ingress controller - "
"retrieve '%s' router details", self._router_lb)
# TODO(yboaron) add support for HTTPS listener
# create/verify listeners
self._l7_router_listeners = {}
listener = self._drv_l7_router.ensure_listener(
self._router_lb, 'HTTP', k_const.KURYR_L7_ROUTER_HTTP_PORT,
service_type=None)
LOG.info("Ingress controller - "
"retrieve HTTP listener details '%s'", listener)
self._l7_router_listeners[k_const.KURYR_L7_ROUTER_HTTP_PORT] = listener
def get_router(self):
return self._router_lb
def get_router_listeners(self):
return self._l7_router_listeners
class IngressCtrlr(object):
"""IngressCtrlr is responsible for the Ingress controller capability
The Ingress controller should create or verify (in case router pre-created
by admin) L7 router/LB - the entity that will do the actual L7 routing.
In addition the Ingress controller should provide the L7 router details
to Ingress/ocp-route handlers and Endpoint handler.
Both Ingress/ocp-route handlers and Endpoint handler should update the
L7 rules of the L7 router.
"""
instances = {}
@classmethod
def get_instance(cls):
if cls not in IngressCtrlr.instances:
IngressCtrlr.instances[cls] = cls()
return IngressCtrlr.instances[cls]
def __init__(self):
self._l7_router = None
self._status = 'DOWN'
def _start_operation_impl(self):
LOG.info('Ingress controller is enabled')
self._l7_router = L7Router(config.CONF.ingress.l7_router_uuid)
try:
self._status = 'IN_PROGRESS'
self._l7_router.ensure_router()
except Exception as e:
self._status = 'DOWN'
LOG.error("Ingress controller - failed to get L7 router (%s)", e)
return
self._status = 'ACTIVE'
LOG.info("Ingress controller - ACTIVE")
def _is_ingress_controller_disabled(self):
# Note(yboaron) To enable the ingress controller admin should :
# A. Set the L7-router/LB UUID in kuryr.conf
# and
# B. Add K8S-ingress and OCP-route handlers to pluggable
# handlers list
configured_handlers = config.CONF.kubernetes.enabled_handlers
return not (any(handler in configured_handlers for handler in
(_OCP_ROUTE_HANDLER, _INGRESS_LB_HANDLER)) and
config.CONF.ingress.l7_router_uuid)
def start_operation(self):
if self._is_ingress_controller_disabled():
LOG.info('To enable Ingress controller either OCP-Route or '
'Ingress-LB handlers should be enabled, and '
'l7_router_uuid should be specified')
return
self._start_operation_impl()
def get_router_and_listener(self):
"""This function returns L7 router and Listeners details,
The caller to this function will be blocked until Ingress controller
status is in stable (not in progress), the consumers of this function
will be the OCP-Route and K8S-Ingress handlers
"""
get_router_threshold = (time.time() + _ROUTER_MANUAL_CREATION_TIMEOUT)
while True:
if self._status != 'IN_PROGRESS':
if self._l7_router:
return (self._l7_router.get_router(),
self._l7_router.get_router_listeners())
else:
return None, None
if time.time() > get_router_threshold:
LOG.error("Ingress controller: get router - timeout expired")
return None, None
LOG.debug("Ingress controller - waiting till status is "
"!= IN_PROGRESS")
time.sleep(_ROUTER_POLL_INTERVAL)

View File

@ -28,7 +28,6 @@ from kuryr_kubernetes import clients
from kuryr_kubernetes import config
from kuryr_kubernetes.controller.drivers import base as drivers
from kuryr_kubernetes.controller.handlers import pipeline as h_pipeline
from kuryr_kubernetes.controller.ingress import ingress_ctl
from kuryr_kubernetes.controller.managers import health
from kuryr_kubernetes import objects
from kuryr_kubernetes import utils
@ -102,8 +101,6 @@ class KuryrK8sService(six.with_metaclass(KuryrK8sServiceMeta,
def start(self):
LOG.info("Service '%s' starting", self.__class__.__name__)
ingress_ctrl = ingress_ctl.IngressCtrlr.get_instance()
ingress_ctrl.start_operation()
super(KuryrK8sService, self).start()
if not CONF.kubernetes.controller_ha:

View File

@ -76,11 +76,3 @@ class MultiPodDriverPoolConfigurationNotSupported(Exception):
2. One of the pod drivers is not supported
3. One of the pod drivers is not supported by its selected pool driver
"""
class IngressControllerFailure(Exception):
"""Exception represents a failure in the Ingress Controller functionality
This exception is raised when we fail to activate properly the Ingress
Controller.
"""

View File

@ -146,61 +146,3 @@ class LBaaSServiceSpec(k_obj.KuryrK8sObjectBase):
'type': obj_fields.StringField(nullable=True, default=None),
'lb_ip': obj_fields.IPAddressField(nullable=True, default=None),
}
@obj_base.VersionedObjectRegistry.register
class LBaaSL7Policy(k_obj.KuryrK8sObjectBase):
VERSION = '1.0'
fields = {
'id': obj_fields.UUIDField(),
'name': obj_fields.StringField(nullable=True),
'listener_id': obj_fields.UUIDField(),
'redirect_pool_id': obj_fields.UUIDField(),
'project_id': obj_fields.StringField(),
}
@obj_base.VersionedObjectRegistry.register
class LBaaSL7Rule(k_obj.KuryrK8sObjectBase):
VERSION = '1.0'
fields = {
'id': obj_fields.UUIDField(),
'compare_type': obj_fields.StringField(nullable=True),
'l7policy_id': obj_fields.UUIDField(),
'type': obj_fields.StringField(nullable=True),
'value': obj_fields.StringField(nullable=True),
}
@obj_base.VersionedObjectRegistry.register
class LBaaSRouteState(k_obj.KuryrK8sObjectBase):
VERSION = '1.0'
fields = {
'members': obj_fields.ListOfObjectsField(LBaaSMember.__name__,
default=[]),
'pool': obj_fields.ObjectField(LBaaSPool.__name__,
nullable=True, default=None),
}
@obj_base.VersionedObjectRegistry.register
class LBaaSRouteNotifEntry(k_obj.KuryrK8sObjectBase):
VERSION = '1.0'
fields = {
'route_id': obj_fields.UUIDField(),
'msg': obj_fields.StringField(),
}
@obj_base.VersionedObjectRegistry.register
class LBaaSRouteNotifier(k_obj.KuryrK8sObjectBase):
VERSION = '1.0'
fields = {
'routes': obj_fields.ListOfObjectsField(
LBaaSRouteNotifEntry.__name__, default=[]),
}

View File

@ -1,43 +0,0 @@
# Copyright (c) 2018 RedHat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from kuryr_kubernetes.objects import base as k_obj
from kuryr_kubernetes.objects import lbaas as lbaas_obj
from oslo_versionedobjects import base as obj_base
from oslo_versionedobjects import fields as obj_fields
@obj_base.VersionedObjectRegistry.register
class RouteState(k_obj.KuryrK8sObjectBase):
VERSION = '1.0'
fields = {
'router_pool': obj_fields.ObjectField(
lbaas_obj.LBaaSPool.__name__, nullable=True, default=None),
'l7_policy': obj_fields.ObjectField(
lbaas_obj.LBaaSL7Policy.__name__, nullable=True, default=None),
'h_l7_rule': obj_fields.ObjectField(
lbaas_obj.LBaaSL7Rule.__name__, nullable=True, default=None),
'p_l7_rule': obj_fields.ObjectField(
lbaas_obj.LBaaSL7Rule.__name__, nullable=True, default=None),
}
@obj_base.VersionedObjectRegistry.register
class RouteSpec(k_obj.KuryrK8sObjectBase):
VERSION = '1.0'
fields = {
'host': obj_fields.StringField(nullable=True, default=None),
'path': obj_fields.StringField(nullable=True, default=None),
'to_service': obj_fields.StringField(nullable=True, default=None),
}

View File

@ -43,7 +43,6 @@ _kuryr_k8s_opts = [
('cni_health_server', cni_health.cni_health_server_opts),
('namespace_subnet', namespace_subnet.namespace_subnet_driver_opts),
('namespace_sg', namespace_security_groups.namespace_sg_driver_opts),
('ingress', config.ingress),
('sriov', config.sriov_opts),
('namespace_handler_caching', namespace.namespace_handler_caching_opts),
('np_handler_caching', policy.np_handler_caching_opts),

View File

@ -1,17 +0,0 @@
# Copyright (c) 2018 RedHat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
OCP_API_BASE = '/apis/route.openshift.io/v1'
OCP_OBJ_ROUTE = 'Route'

View File

@ -1,255 +0,0 @@
# Copyright (c) 2017 RedHat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from kuryr.lib._i18n import _
from kuryr_kubernetes import clients
from kuryr_kubernetes import constants as k_const
from kuryr_kubernetes.controller.drivers import base as drv_base
from kuryr_kubernetes.controller.ingress import ingress_ctl
from kuryr_kubernetes import exceptions as k_exc
from kuryr_kubernetes.handlers import k8s_base
from kuryr_kubernetes.objects import lbaas as obj_lbaas
from kuryr_kubernetes.objects import route as obj_route
from kuryr_kubernetes.platform import constants as ocp_const
from oslo_log import log as logging
from oslo_serialization import jsonutils
LOG = logging.getLogger(__name__)
class OcpRouteHandler(k8s_base.ResourceEventHandler):
"""OcpRouteHandler handles OCP route events.
An OpenShift route allows service to be externally-reachable via host name.
This host name is then used to route traffic to the service.
The OcpRouteHandler is responsible for processing all route resource
events.
"""
OBJECT_KIND = ocp_const.OCP_OBJ_ROUTE
OBJECT_WATCH_PATH = "%s/%s" % (ocp_const.OCP_API_BASE, "routes")
def __init__(self):
self._drv_lbaas = drv_base.LBaaSDriver.get_instance()
self._l7_router = None
self._l7_router_listeners = None
def on_present(self, route):
if not self._l7_router or not self._l7_router_listeners:
ing_ctl = ingress_ctl.IngressCtrlr.get_instance()
self._l7_router, self._l7_router_listeners = (
ing_ctl.get_router_and_listener())
if not self._l7_router or not self._l7_router_listeners:
LOG.info("No L7 router found - do nothing")
return
route_spec = self._get_route_spec(route)
if not route_spec:
route_spec = obj_route.RouteSpec()
if self._should_ignore(route, route_spec):
return
route_state = self._get_route_state(route)
if not route_state:
route_state = obj_route.RouteState()
self._sync_router_pool(route, route_spec, route_state)
self._sync_l7_policy(route, route_spec, route_state)
self._sync_host_l7_rule(route, route_spec, route_state)
self._sync_path_l7_rule(route, route_spec, route_state)
self._set_route_state(route, route_state)
self._set_route_spec(route, route_spec)
self._send_route_notification_to_ep(
route, route_spec.to_service)
def _get_endpoints_link_by_route(self, route_link, ep_name):
route_link = route_link.replace(
ocp_const.OCP_API_BASE, k_const.K8S_API_BASE)
link_parts = route_link.split('/')
if link_parts[-2] != 'routes':
raise k_exc.IntegrityError(_(
"Unsupported route link: %(link)s") % {
'link': route_link})
link_parts[-2] = 'endpoints'
link_parts[-1] = ep_name
return "/".join(link_parts)
def _send_route_notification_to_ep(self, route, ep_name):
route_link = route['metadata']['selfLink']
ep_link = self._get_endpoints_link_by_route(route_link, ep_name)
k8s = clients.get_kubernetes_client()
try:
k8s.get(ep_link)
except k_exc.K8sClientException:
LOG.debug("Failed to get EP link : %s", ep_link)
return
route_notifier = obj_lbaas.LBaaSRouteNotifier()
route_notifier.routes.append(
obj_lbaas.LBaaSRouteNotifEntry(
route_id=route['metadata']['uid'], msg='RouteChanged'))
route_notifier.obj_reset_changes(recursive=True)
LOG.debug("Setting LBaaSRouteNotifier annotation: %r", route_notifier)
annotation = jsonutils.dumps(route_notifier.obj_to_primitive(),
sort_keys=True)
k8s.annotate(
ep_link,
{k_const.K8S_ANNOTATION_LBAAS_RT_NOTIF: annotation},
resource_version=route['metadata']['resourceVersion'])
def _should_ignore(self, route, route_spec):
spec = route['spec']
return ((not self._l7_router)
or
((spec.get('host') == route_spec.host) and
(spec.get('path') == route_spec.path) and
(spec['to'].get('name') == route_spec.to_service)))
def on_deleted(self, route):
if not self._l7_router:
LOG.info("No L7 router found - do nothing")
return
route_state = self._get_route_state(route)
if not route_state:
return
# NOTE(yboaron): deleting l7policy deletes also l7rules
if route_state.l7_policy:
self._drv_lbaas.release_l7_policy(
self._l7_router, route_state.l7_policy)
if route_state.router_pool:
if self._drv_lbaas.is_pool_used_by_other_l7policies(
route_state.l7_policy, route_state.router_pool):
LOG.debug("Can't delete pool (pointed by another route)")
else:
self._drv_lbaas.release_pool(
self._l7_router, route_state.router_pool)
# no more routes pointing to this pool/ep - update ep
spec = route['spec']
self._send_route_notification_to_ep(
route, spec['to'].get('name'))
def _sync_router_pool(self, route, route_spec, route_state):
if route_state.router_pool:
return
pool_name = self._drv_lbaas.get_loadbalancer_pool_name(
self._l7_router, route['metadata']['namespace'],
route['spec']['to']['name'])
pool = self._drv_lbaas.get_pool_by_name(
pool_name, self._l7_router.project_id)
if not pool:
pool = self._drv_lbaas.ensure_pool_attached_to_lb(
self._l7_router, route['metadata']['namespace'],
route['spec']['to']['name'], protocol='HTTP')
route_state.router_pool = pool
route_spec.to_service = route['spec']['to']['name']
def _sync_l7_policy(self, route, route_spec, route_state):
if route_state.l7_policy:
return
# TBD , take care of listener HTTPS
listener = self._l7_router_listeners[k_const.KURYR_L7_ROUTER_HTTP_PORT]
route_state.l7_policy = self._drv_lbaas.ensure_l7_policy(
route['metadata']['namespace'], route['metadata']['name'],
self._l7_router, route_state.router_pool, listener.id)
def _sync_host_l7_rule(self, route, route_spec, route_state):
if route_spec.host == route['spec']['host']:
return
if not route_spec.host:
route_state.h_l7_rule = self._drv_lbaas.ensure_l7_rule(
self._l7_router, route_state.l7_policy,
'EQUAL_TO', 'HOST_NAME', route['spec']['host'])
else:
self._drv_lbaas.update_l7_rule(
route_state.h_l7_rule, route['spec']['host'])
route_state.h_l7_rule.value = route['spec']['host']
route_spec.host = route['spec']['host']
def _sync_path_l7_rule(self, route, route_spec, route_state):
if route_spec.path == route['spec'].get('path'):
return
if not route_spec.path:
route_state.p_l7_rule = self._drv_lbaas.ensure_l7_rule(
self._l7_router, route_state.l7_policy,
'STARTS_WITH', 'PATH', route['spec']['path'])
else:
if route['spec']['path']:
self._drv_lbaas.update_l7_rule(
route_state.p_l7_rule, route['spec']['path'])
route_state.p_l7_rule.value = route['spec']['path']
else:
self._drv_lbaas.release_l7_rule(route_state.p_l7_rule)
route_state.p_l7_rule = None
route_spec.path = route['spec']['path']
def _get_route_spec(self, route):
try:
annotations = route['metadata']['annotations']
annotation = annotations[k_const.K8S_ANNOTATION_ROUTE_SPEC]
except KeyError:
return obj_route.RouteSpec()
obj_dict = jsonutils.loads(annotation)
obj = obj_route.RouteSpec.obj_from_primitive(obj_dict)
LOG.debug("Got RouteSpec from annotation: %r", obj)
return obj
def _set_route_spec(self, route, route_spec):
if route_spec is None:
LOG.debug("Removing RouteSpec annotation: %r", route_spec)
annotation = None
else:
route_spec.obj_reset_changes(recursive=True)
LOG.debug("Setting RouteSpec annotation: %r", route_spec)
annotation = jsonutils.dumps(route_spec.obj_to_primitive(),
sort_keys=True)
k8s = clients.get_kubernetes_client()
k8s.annotate(route['metadata']['selfLink'],
{k_const.K8S_ANNOTATION_ROUTE_SPEC: annotation},
resource_version=route['metadata']['resourceVersion'])
def _get_route_state(self, route):
try:
annotations = route['metadata']['annotations']
annotation = annotations[k_const.K8S_ANNOTATION_ROUTE_STATE]
except KeyError:
return obj_route.RouteState()
obj_dict = jsonutils.loads(annotation)
obj = obj_route.RouteState.obj_from_primitive(obj_dict)
LOG.debug("Got RouteState from annotation: %r", obj)
return obj
def _set_route_state(self, route, route_state):
if route_state is None:
LOG.debug("Removing RouteState annotation: %r", route_state)
annotation = None
else:
route_state.obj_reset_changes(recursive=True)
LOG.debug("Setting RouteState annotation: %r", route_state)
annotation = jsonutils.dumps(route_state.obj_to_primitive(),
sort_keys=True)
k8s = clients.get_kubernetes_client()
k8s.annotate(route['metadata']['selfLink'],
{k_const.K8S_ANNOTATION_ROUTE_STATE: annotation},
resource_version=route['metadata']['resourceVersion'])

View File

@ -18,8 +18,6 @@ import munch
from neutronclient.common import exceptions as n_exc
from openstack import exceptions as os_exc
from openstack.load_balancer.v2 import l7_policy as o_l7p
from openstack.load_balancer.v2 import l7_rule as o_l7r
from openstack.load_balancer.v2 import listener as o_lis
from openstack.load_balancer.v2 import load_balancer as o_lb
from openstack.load_balancer.v2 import member as o_mem
@ -917,363 +915,3 @@ class TestLBaaSv2Driver(test_base.TestCase):
def test_provisioning_timer(self):
# REVISIT(ivc): add test if _provisioning_timer is to stay
self.skipTest("not implemented")
def test_get_pool_by_name_not_found(self):
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
pools = o_pool.Pool(name='KUKU',
id='a2a62ea7-e3bf-40df-8c09-aa0c29876a6b')
lbaas.pools.return_value = [pools]
pool_name = 'NOT_KUKU'
project_id = 'TEST_PROJECT'
pool_id = cls.get_pool_by_name(m_driver, pool_name, project_id)
self.assertIsNone(pool_id)
def test_get_pool_by_name_found(self):
self._test_get_pool_by_name_found(listener_is_empty=False)
def test_get_pool_by_name_found_listener_is_empty(self):
self._test_get_pool_by_name_found(listener_is_empty=True)
def _test_get_pool_by_name_found(self, listener_is_empty):
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
pool_name = 'KUKU'
pool_lb_id = "607226db-27ef-4d41-ae89-f2a800e9c2db"
pool_project_id = "e3cd678b11784734bc366148aa37580e"
pool_id = "ddb2b28f-89e9-45d3-a329-a359c3e39e4a"
pool_protocol = "HTTP"
pool_listener_id = "023f2e34-7806-443b-bfae-16c324569a3d"
if listener_is_empty:
resp_listeners = []
else:
resp_listeners = [o_lis.Listener(id=pool_listener_id)]
listener_id = (resp_listeners[0].id if resp_listeners else None)
expected_result = obj_lbaas.LBaaSPool(
name=pool_name, project_id=pool_project_id,
loadbalancer_id=pool_lb_id,
listener_id=listener_id,
protocol=pool_protocol,
id=pool_id)
resp = [o_pool.Pool(
protocol=pool_protocol,
loadbalancers=[o_lb.LoadBalancer(id=pool_lb_id)],
listeners=resp_listeners,
project_id=pool_project_id,
id=pool_id,
name=pool_name,
)]
lbaas.pools.return_value = resp
pool = cls.get_pool_by_name(m_driver, pool_name, pool_project_id)
lbaas.pools.assert_called_once()
for attr in expected_result.obj_fields:
self.assertEqual(getattr(expected_result, attr),
getattr(pool, attr))
def test_get_pool_by_name_empty_list(self):
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
pools = {}
lbaas.pools.return_value = [pools]
pool_name = 'NOT_KUKU'
project_id = 'TEST_PROJECT'
pool = cls.get_pool_by_name(m_driver, pool_name, project_id)
self.assertIsNone(pool)
def test_get_lb_by_uuid(self):
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
loadbalancer_id = '00EE9E11-91C2-41CF-8FD4-7970579E5C4C'
loadbalancer_vip = '1.2.3.4'
loadbalancer_vip_port_id = '00EE9E11-91C2-41CF-8FD4-7970579EFFFF'
loadbalancer_project_id = '00EE9E11-91C2-41CF-8FD4-7970579EAAAA'
loadbalancer_name = 'MyName'
loadbalancer_subnet_id = '00EE9E11-91C2-41CF-8FD4-7970579EBBBB'
loadbalancer_provider = 'haproxy'
expected_lb = obj_lbaas.LBaaSLoadBalancer(
id=loadbalancer_id, port_id=loadbalancer_vip_port_id,
name=loadbalancer_name, project_id=loadbalancer_project_id,
subnet_id=loadbalancer_subnet_id, ip=loadbalancer_vip,
security_groups=None, provider=loadbalancer_provider)
resp = o_lb.LoadBalancer(id=loadbalancer_id,
vip_port_id=loadbalancer_vip_port_id,
name=loadbalancer_name,
project_id=loadbalancer_project_id,
vip_subnet_id=loadbalancer_subnet_id,
vip_address=loadbalancer_vip,
provider=loadbalancer_provider)
lbaas.get_load_balancer.return_value = resp
ret = cls.get_lb_by_uuid(m_driver, loadbalancer_id)
lbaas.get_load_balancer.assert_called_once()
for attr in expected_lb.obj_fields:
self.assertEqual(getattr(expected_lb, attr),
getattr(ret, attr))
self.assertEqual(loadbalancer_id, ret.id)
def test_get_lb_by_uuid_not_found(self):
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
resp = {'loadbalancer': {}}
lbaas.get_load_balancer.return_value = resp
requested_uuid = '00EE9E11-91C2-41CF-8FD4-7970579EFFFF'
lbaas.get_load_balancer.side_effect = os_exc.ResourceNotFound
ret = cls.get_lb_by_uuid(m_driver, requested_uuid)
lbaas.get_load_balancer.assert_called_once()
self.assertIsNone(ret)
def test_ensure_l7policy(self):
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
expected_resp = mock.sentinel.expected_resp
loadbalancer = mock.sentinel.expected_resp
route_name = 'ROUTE_NAME'
namespace = 'NAMESPACE'
listener_id = 'D4F35594-27EB-4F4C-930C-31DD40F53B77'
pool = obj_lbaas.LBaaSPool(
id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
project_id='TEST_PROJECT',
name='NAME',
loadbalancer_id='010101',
listener_id='12345',
protocol='TCP'
)
m_driver._ensure_provisioned.return_value = expected_resp
cls.ensure_l7_policy(
m_driver, namespace, route_name, loadbalancer, pool, listener_id)
m_driver._ensure_provisioned.assert_called_once_with(
loadbalancer, mock.ANY, m_driver._create_l7_policy,
m_driver._find_l7_policy)
l7policy = m_driver._ensure_provisioned.call_args[0][1]
self.assertEqual("%s%s" % (namespace, route_name), l7policy.name)
self.assertEqual(listener_id, l7policy.listener_id)
self.assertEqual(pool.id, l7policy.redirect_pool_id)
self.assertEqual(pool.project_id, l7policy.project_id)
def test_release_l7policy(self):
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
loadbalancer = mock.Mock()
l7_policy = mock.Mock()
cls.release_l7_policy(m_driver, loadbalancer, l7_policy)
m_driver._release.assert_called_once_with(
loadbalancer, l7_policy, lbaas.delete_l7_policy,
l7_policy.id)
def test_create_l7policy(self):
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
l7_policy = obj_lbaas.LBaaSL7Policy(
name='TEST_NAME',
project_id='TEST_PROJECT',
listener_id='D4F35594-27EB-4F4C-930C-31DD40F53B77',
redirect_pool_id='D3FA400A-F543-4B91-9CD3-047AF0CE42D1')
l7policy_id = '3A70CEC0-392D-4BC1-A27C-06E63A0FD54F'
req = {
'action': 'REDIRECT_TO_POOL',
'listener_id': l7_policy.listener_id,
'name': l7_policy.name,
'project_id': l7_policy.project_id,
'redirect_pool_id': l7_policy.redirect_pool_id}
resp = {'id': l7policy_id}
m_driver._post_lb_resource.return_value = resp
ret = cls._create_l7_policy(m_driver, l7_policy)
m_driver._post_lb_resource.assert_called_once_with(o_l7p.L7Policy, req)
for attr in l7_policy.obj_fields:
self.assertEqual(getattr(l7_policy, attr),
getattr(ret, attr))
self.assertEqual(l7policy_id, ret.id)
def test_find_l7_policy(self):
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
l7_policy = obj_lbaas.LBaaSL7Policy(
name='TEST_NAME',
project_id='TEST_PROJECT',
listener_id='D4F35594-27EB-4F4C-930C-31DD40F53B77',
redirect_pool_id='D3FA400A-F543-4B91-9CD3-047AF0CE42D1')
l7policy_id = '3A70CEC0-392D-4BC1-A27C-06E63A0FD54F'
resp = iter([o_l7p.L7Policy(id=l7policy_id)])
lbaas.l7_policies.return_value = resp
ret = cls._find_l7_policy(m_driver, l7_policy)
lbaas.l7_policies.assert_called_once_with(
name=l7_policy.name,
project_id=l7_policy.project_id,
redirect_pool_id=l7_policy.redirect_pool_id,
listener_id=l7_policy.listener_id)
for attr in l7_policy.obj_fields:
self.assertEqual(getattr(l7_policy, attr),
getattr(ret, attr))
self.assertEqual(l7policy_id, ret.id)
def test_find_l7_policy_not_found(self):
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
l7_policy = obj_lbaas.LBaaSL7Policy(
name='TEST_NAME',
project_id='TEST_PROJECT',
listener_id='D4F35594-27EB-4F4C-930C-31DD40F53B77',
redirect_pool_id='D3FA400A-F543-4B91-9CD3-047AF0CE42D1')
resp = iter([])
lbaas.l7_policies.return_value = resp
ret = cls._find_l7_policy(m_driver, l7_policy)
lbaas.l7_policies.assert_called_once_with(
name=l7_policy.name,
project_id=l7_policy.project_id,
redirect_pool_id=l7_policy.redirect_pool_id,
listener_id=l7_policy.listener_id)
self.assertIsNone(ret)
def test_ensure_l7_rule(self):
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
expected_resp = mock.sentinel.expected_resp
loadbalancer = mock.sentinel.expected_resp
compare_type = 'EQUAL_TO'
type = 'HOST_NAME'
value = 'www.test.com'
l7_policy = obj_lbaas.LBaaSL7Policy(
id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
name='TEST_NAME',
project_id='TEST_PROJECT',
listener_id='D4F35594-27EB-4F4C-930C-31DD40F53B77',
redirect_pool_id='D3FA400A-F543-4B91-9CD3-047AF0CE42D1')
m_driver._ensure_provisioned.return_value = expected_resp
cls.ensure_l7_rule(
m_driver, loadbalancer, l7_policy, compare_type, type, value)
m_driver._ensure_provisioned.assert_called_once_with(
loadbalancer, mock.ANY, m_driver._create_l7_rule,
m_driver._find_l7_rule)
l7rule = m_driver._ensure_provisioned.call_args[0][1]
self.assertEqual(compare_type, l7rule.compare_type)
self.assertEqual(type, l7rule.type)
self.assertEqual(value, l7rule.value)
def test_release_l7_rule(self):
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
loadbalancer = mock.Mock()
l7_rule = mock.Mock()
cls.release_l7_rule(m_driver, loadbalancer, l7_rule)
m_driver._release.assert_called_once_with(
loadbalancer, l7_rule, lbaas.delete_l7_rule,
l7_rule.id, l7_rule.l7policy_id)
def test_create_l7_rule(self):
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
l7_rule = obj_lbaas.LBaaSL7Rule(
compare_type='EQUAL_TO',
l7policy_id='D4F35594-27EB-4F4C-930C-31DD40F53B77',
type='HOST_NAME',
value='www.test.com')
l7_rule_id = '3A70CEC0-392D-4BC1-A27C-06E63A0FD54F'
req = {'compare_type': l7_rule.compare_type,
'type': l7_rule.type,
'value': l7_rule.value}
resp = {'id': l7_rule_id}
m_driver._post_lb_resource.return_value = resp
ret = cls._create_l7_rule(m_driver, l7_rule)
m_driver._post_lb_resource.assert_called_once_with(
o_l7r.L7Rule, req, l7policy_id=l7_rule.l7policy_id)
for attr in l7_rule.obj_fields:
self.assertEqual(getattr(l7_rule, attr),
getattr(ret, attr))
self.assertEqual(l7_rule_id, ret.id)
def test_find_l7_rule(self):
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
l7_rule = obj_lbaas.LBaaSL7Rule(
compare_type='EQUAL_TO',
l7policy_id='D4F35594-27EB-4F4C-930C-31DD40F53B77',
type='HOST_NAME',
value='www.test.com')
l7_rule_id = '3A70CEC0-392D-4BC1-A27C-06E63A0FD54F'
resp = iter([o_l7r.L7Rule(id=l7_rule_id)])
lbaas.l7_rules.return_value = resp
ret = cls._find_l7_rule(m_driver, l7_rule)
lbaas.l7_rules.assert_called_once_with(
l7_rule.l7policy_id,
type=l7_rule.type,
value=l7_rule.value,
compare_type=l7_rule.compare_type)
for attr in l7_rule.obj_fields:
self.assertEqual(getattr(l7_rule, attr),
getattr(ret, attr))
self.assertEqual(l7_rule_id, ret.id)
def test_find_l7_rule_not_found(self):
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
l7_rule = obj_lbaas.LBaaSL7Rule(
compare_type='EQUAL_TO',
l7policy_id='D4F35594-27EB-4F4C-930C-31DD40F53B77',
type='HOST_NAME',
value='www.test.com')
resp = iter([])
lbaas.l7_rules.return_value = resp
ret = cls._find_l7_rule(m_driver, l7_rule)
lbaas.l7_rules.assert_called_once_with(
l7_rule.l7policy_id,
type=l7_rule.type,
value=l7_rule.value,
compare_type=l7_rule.compare_type)
self.assertIsNone(ret)

View File

@ -1,192 +0,0 @@
# Copyright (c) 2018 RedHat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import uuid
from kuryr_kubernetes.controller.handlers import ingress_lbaas as h_ing_lbaas
from kuryr_kubernetes.objects import lbaas as obj_lbaas
from kuryr_kubernetes.tests.unit.controller.handlers import \
test_lbaas as t_lbaas
class TestIngressLoadBalancerHandler(t_lbaas.TestLoadBalancerHandler):
@mock.patch('kuryr_kubernetes.controller.handlers.lbaas'
'.LoadBalancerHandler._cleanup_leftover_lbaas')
@mock.patch('kuryr_kubernetes.controller.drivers.base'
'.LBaaSDriver.get_instance')
def test_init(self, m_get_drv_lbaas, m_cleanup_leftover_lbaas):
m_get_drv_lbaas.return_value = mock.sentinel.drv_lbaas
handler = h_ing_lbaas.IngressLoadBalancerHandler()
self.assertEqual(mock.sentinel.drv_lbaas, handler._drv_lbaas)
@mock.patch('kuryr_kubernetes.utils.get_lbaas_spec')
def test_on_present_no_ing_ctrlr(self, m_get_lbaas_spec):
endpoints = mock.sentinel.endpoints
m_handler = mock.Mock(spec=h_ing_lbaas.IngressLoadBalancerHandler)
m_handler._l7_router = None
h_ing_lbaas.IngressLoadBalancerHandler.on_present(m_handler, endpoints)
m_get_lbaas_spec.assert_not_called()
m_handler._should_ignore.assert_not_called()
def test_should_ignore(self):
endpoints = mock.sentinel.endpoints
lbaas_spec = mock.sentinel.lbaas_spec
m_handler = mock.Mock(spec=h_ing_lbaas.IngressLoadBalancerHandler)
m_handler._has_pods.return_value = False
ret = h_ing_lbaas.IngressLoadBalancerHandler._should_ignore(
m_handler, endpoints, lbaas_spec)
self.assertEqual(True, ret)
m_handler._has_pods.assert_called_once_with(endpoints)
def test_should_ignore_with_pods(self):
endpoints = mock.sentinel.endpoints
lbaas_spec = mock.sentinel.lbaas_spec
m_handler = mock.Mock(spec=h_ing_lbaas.IngressLoadBalancerHandler)
m_handler._has_pods.return_value = True
ret = h_ing_lbaas.IngressLoadBalancerHandler._should_ignore(
m_handler, endpoints, lbaas_spec)
self.assertEqual(False, ret)
m_handler._has_pods.assert_called_once_with(endpoints)
def _generate_route_state(self, vip, targets, project_id, subnet_id):
name = 'DUMMY_NAME'
drv = t_lbaas.FakeLBaaSDriver()
lb = drv.ensure_loadbalancer(
name, project_id, subnet_id, vip, None, 'ClusterIP')
pool = drv.ensure_pool_attached_to_lb(lb, 'namespace',
'svc_name', 'HTTP')
members = {}
for ip, (listen_port, target_port) in targets.items():
members.setdefault((ip, listen_port, target_port),
drv.ensure_member(lb, pool,
subnet_id, ip,
target_port, None, None))
return obj_lbaas.LBaaSRouteState(
pool=pool,
members=list(members.values()))
def _sync_route_members_impl(self, m_get_drv_lbaas, m_get_drv_project,
m_get_drv_subnets, subnet_id, project_id,
endpoints, state, spec):
m_drv_lbaas = mock.Mock(wraps=t_lbaas.FakeLBaaSDriver())
m_drv_project = mock.Mock()
m_drv_project.get_project.return_value = project_id
m_drv_subnets = mock.Mock()
m_drv_subnets.get_subnets.return_value = {
subnet_id: mock.sentinel.subnet}
m_get_drv_lbaas.return_value = m_drv_lbaas
m_get_drv_project.return_value = m_drv_project
m_get_drv_subnets.return_value = m_drv_subnets
handler = h_ing_lbaas.IngressLoadBalancerHandler()
handler._l7_router = t_lbaas.FakeLBaaSDriver().ensure_loadbalancer(
name='L7_Router',
project_id=project_id,
subnet_id=subnet_id,
ip='1.2.3.4',
security_groups_ids=None,
service_type='ClusterIP')
with mock.patch.object(handler, '_get_pod_subnet') as m_get_pod_subnet:
m_get_pod_subnet.return_value = subnet_id
handler._sync_lbaas_route_members(endpoints, state, spec)
observed_targets = sorted(
(str(member.ip), (
member.port,
member.port))
for member in state.members)
return observed_targets
@mock.patch('kuryr_kubernetes.controller.handlers.lbaas'
'.LoadBalancerHandler._cleanup_leftover_lbaas')
@mock.patch('kuryr_kubernetes.controller.drivers.base'
'.PodSubnetsDriver.get_instance')
@mock.patch('kuryr_kubernetes.controller.drivers.base'
'.PodProjectDriver.get_instance')
@mock.patch('kuryr_kubernetes.controller.drivers.base'
'.LBaaSDriver.get_instance')
def test__sync_lbaas_route_members(self, m_get_drv_lbaas,
m_get_drv_project, m_get_drv_subnets,
m_cleanup_leftover_lbaas):
project_id = str(uuid.uuid4())
subnet_id = str(uuid.uuid4())
current_ip = '1.1.1.1'
current_targets = {
'1.1.1.101': (1001, 1001),
'1.1.1.111': (1001, 1001),
'1.1.1.201': (2001, 2001)}
expected_ip = '2.2.2.2'
expected_targets = {
'2.2.2.101': (1201, 1201),
'2.2.2.111': (1201, 1201),
'2.2.2.201': (2201, 2201)}
endpoints = self._generate_endpoints(expected_targets)
state = self._generate_route_state(
current_ip, current_targets, project_id, subnet_id)
spec = self._generate_lbaas_spec(expected_ip, expected_targets,
project_id, subnet_id)
observed_targets = self._sync_route_members_impl(
m_get_drv_lbaas, m_get_drv_project, m_get_drv_subnets,
subnet_id, project_id, endpoints, state, spec)
self.assertEqual(sorted(expected_targets.items()), observed_targets)
def test_on_deleted_no_ingress_controller(self):
endpoints = mock.sentinel.endpoints
m_handler = mock.Mock(spec=h_ing_lbaas.IngressLoadBalancerHandler)
m_handler._l7_router = None
h_ing_lbaas.IngressLoadBalancerHandler.on_deleted(m_handler, endpoints)
m_handler._get_lbaas_route_state.assert_not_called()
m_handler._remove_unused_route_members.assert_not_called()
def test_on_deleted(self):
endpoints = mock.sentinel.endpoints
project_id = str(uuid.uuid4())
subnet_id = str(uuid.uuid4())
m_handler = mock.Mock(spec=h_ing_lbaas.IngressLoadBalancerHandler)
m_handler._l7_router = t_lbaas.FakeLBaaSDriver().ensure_loadbalancer(
name='L7_Router',
project_id=project_id,
subnet_id=subnet_id,
ip='1.2.3.4',
security_groups_ids=None,
service_type='ClusterIP')
m_handler._get_lbaas_route_state.return_value = (
obj_lbaas.LBaaSRouteState())
m_handler._remove_unused_route_members.return_value = True
h_ing_lbaas.IngressLoadBalancerHandler.on_deleted(m_handler, endpoints)
m_handler._get_lbaas_route_state.assert_called_once()
m_handler._remove_unused_route_members.assert_called_once()

View File

@ -356,40 +356,6 @@ class FakeLBaaSDriver(drv_base.LBaaSDriver):
def get_loadbalancer_pool_name(self, lb_name, namespace, svc_name):
return "%s/%s/%s" % (lb_name, namespace, svc_name)
def ensure_l7_policy(self, namespace, route_name,
loadbalancer, pool,
listener_id):
pass
def release_l7_policy(self, loadbalancer, l7_policy):
pass
def ensure_l7_rule(self, loadbalancer, l7_policy, compare_type,
type, value):
pass
def release_l7_rule(self, loadbalancer, l7_rule):
pass
def update_l7_rule(self, l7_rule, new_value):
pass
def ensure_pool_attached_to_lb(self, loadbalancer, namespace,
svc_name, protocol):
return obj_lbaas.LBaaSPool(id=str(uuid.uuid4()),
loadbalancer_id=loadbalancer.id,
project_id=loadbalancer.project_id,
protocol=protocol)
def get_pool_by_name(self, pool_name, project_id):
pass
def get_lb_by_uuid(self, vip):
pass
def is_pool_used_by_other_l7policies(l7policy, pool):
pass
class TestLoadBalancerHandler(test_base.TestCase):

View File

@ -1,135 +0,0 @@
# Copyright (c) 2018 RedHat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from kuryr_kubernetes.controller.ingress import ingress_ctl
from kuryr_kubernetes.objects import lbaas as obj_lbaas
from kuryr_kubernetes.tests import base as test_base
import mock
class TestIngressCtrlr(test_base.TestCase):
def test_ingress_ctrlr_instance(self):
ing_ctrl = ingress_ctl.IngressCtrlr.get_instance()
self.assertIsNotNone(ing_ctrl)
@mock.patch('kuryr_kubernetes.config.CONF')
def test_ingress_ctrlr_conf_disabled(self, m_cfg):
m_cfg.kubernetes.enabled_handlers = ['not_ocproute']
m_cfg.ingress.l7_router_uuid = '00EE9E11-91C2-41CF-8FD4-7970579E5C4C'
ing_ctrl = ingress_ctl.IngressCtrlr.get_instance()
ing_ctrl.start_operation()
ret_l7router, ret_listener = ing_ctrl.get_router_and_listener()
self.assertIsNotNone(ing_ctrl)
self.assertIsNone(ret_l7router)
self.assertIsNone(ret_listener)
@mock.patch('kuryr_kubernetes.config.CONF')
def test_ingress_ctrlr_l7router_ip_not_defined(self, m_cfg):
m_cfg.kubernetes.enabled_handlers = ['ocproute']
m_cfg.ingress.l7_router_uuid = None
ing_ctrl = ingress_ctl.IngressCtrlr.get_instance()
ing_ctrl.start_operation()
ret_l7router, ret_listener = ing_ctrl.get_router_and_listener()
self.assertIsNotNone(ing_ctrl)
self.assertIsNone(ret_l7router)
self.assertIsNone(ret_listener)
@mock.patch('kuryr_kubernetes.controller.drivers.base'
'.LBaaSDriver.get_instance')
@mock.patch('kuryr_kubernetes.config.CONF')
def test_ingress_ctrlr_router_enabled_k8s(self, m_cfg, m_get_lbaas_drv):
m_cfg.kubernetes.enabled_handlers = ['ingresslb']
m_cfg.ingress.l7_router_uuid = '00EE9E11-91C2-41CF-8FD4-7970579E5C4C'
l7_router = obj_lbaas.LBaaSLoadBalancer(
name='TEST_NAME', project_id='TEST_PROJECT', ip='1.2.3.4',
subnet_id='D3FA400A-F543-4B91-9CD3-047AF0CE42D1',
security_groups=[],
id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C')
m_driver = mock.Mock()
m_driver.get_lb_by_uuid.return_value = l7_router
m_get_lbaas_drv.return_value = m_driver
ing_ctrl = ingress_ctl.IngressCtrlr.get_instance()
ing_ctrl.start_operation()
self.assertIsNotNone(ing_ctrl)
self.assertEqual(ing_ctrl._status, 'ACTIVE')
@mock.patch('kuryr_kubernetes.controller.drivers.base'
'.LBaaSDriver.get_instance')
@mock.patch('kuryr_kubernetes.config.CONF')
def test_ingress_ctrlr_router_enabled_ocp(self, m_cfg, m_get_lbaas_drv):
m_cfg.kubernetes.enabled_handlers = ['ocproute']
m_cfg.ingress.l7_router_uuid = '00EE9E11-91C2-41CF-8FD4-7970579E5C4C'
l7_router = obj_lbaas.LBaaSLoadBalancer(
name='TEST_NAME', project_id='TEST_PROJECT', ip='1.2.3.4',
subnet_id='D3FA400A-F543-4B91-9CD3-047AF0CE42D1',
security_groups=[],
id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C')
m_driver = mock.Mock()
m_driver.get_lb_by_uuid.return_value = l7_router
m_get_lbaas_drv.return_value = m_driver
ing_ctrl = ingress_ctl.IngressCtrlr.get_instance()
ing_ctrl.start_operation()
self.assertIsNotNone(ing_ctrl)
self.assertEqual(ing_ctrl._status, 'ACTIVE')
@mock.patch('kuryr_kubernetes.controller.drivers.base'
'.LBaaSDriver.get_instance')
@mock.patch('kuryr_kubernetes.config.CONF')
def test_ingress_ctrlr_router_created(self, m_cfg, m_get_lbaas_drv):
m_cfg.kubernetes.enabled_handlers = ['ocproute', 'ingresslb']
m_cfg.ingress.l7_router_uuid = '00EE9E11-91C2-41CF-8FD4-7970579E5C4C'
l7_router = obj_lbaas.LBaaSLoadBalancer(
name='TEST_NAME', project_id='TEST_PROJECT', ip='1.2.3.4',
subnet_id='D3FA400A-F543-4B91-9CD3-047AF0CE42D1',
security_groups=[],
id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C')
m_driver = mock.Mock()
m_driver.get_lb_by_uuid.return_value = l7_router
m_get_lbaas_drv.return_value = m_driver
ing_ctrl = ingress_ctl.IngressCtrlr.get_instance()
ing_ctrl._start_operation_impl()
self.assertIsNotNone(ing_ctrl)
self.assertEqual(ing_ctrl._status, 'ACTIVE')
ret_l7router, ret_listener = ing_ctrl.get_router_and_listener()
self.assertEqual(ret_l7router, l7_router)
@mock.patch('kuryr_kubernetes.controller.drivers.base'
'.LBaaSDriver.get_instance')
@mock.patch('kuryr_kubernetes.config.CONF')
def test_ingress_ctrlr_router_l7_router_drv_fail(
self, m_cfg, m_get_lbaas_drv):
m_cfg.ingress.l7_router_uuid = '00EE9E11-91C2-41CF-8FD4-7970579E5C4C'
m_cfg.kubernetes.enabled_handlers = ['ocproute', 'ingresslb']
m_driver = mock.Mock()
m_driver.get_lb_by_uuid.return_value = None
m_get_lbaas_drv.return_value = m_driver
ing_ctrl = ingress_ctl.IngressCtrlr.get_instance()
ing_ctrl._start_operation_impl()
self.assertEqual(ing_ctrl._status, 'DOWN')
self.assertIsNotNone(ing_ctrl)

View File

@ -1,428 +0,0 @@
# Copyright (c) 2017 RedHat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from kuryr_kubernetes.controller.drivers import base as drv_base
from kuryr_kubernetes.objects import lbaas as obj_lbaas
from kuryr_kubernetes.objects import route as obj_route
from kuryr_kubernetes.platform.ocp.controller.handlers import route as h_route
from kuryr_kubernetes.tests import base as test_base
import mock
OCP_ROUTE_PATH_COMP_TYPE = 'STARTS_WITH'
class TestOcpRouteHandler(test_base.TestCase):
@mock.patch('kuryr_kubernetes.controller.drivers.base'
'.LBaaSDriver.get_instance')
def test_init(self, m_get_drv_lbaas):
m_get_drv_lbaas.return_value = mock.sentinel.drv_lbaas
handler = h_route.OcpRouteHandler()
self.assertEqual(mock.sentinel.drv_lbaas, handler._drv_lbaas)
self.assertIsNone(handler._l7_router)
self.assertIsNone(handler._l7_router_listeners)
def test_on_present(self):
route_event = mock.sentinel.route_event
route_spec = mock.sentinel.route_spec
route_state = mock.sentinel.route_state
route_spec.to_service = mock.sentinel.to_service
m_handler = mock.Mock(spec=h_route.OcpRouteHandler)
m_handler._get_route_spec.return_value = route_spec
m_handler._should_ignore.return_value = False
m_handler._get_route_state.return_value = route_state
m_handler._l7_router = obj_lbaas.LBaaSLoadBalancer(
id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
project_id='TEST_PROJECT')
m_handler._l7_router_listeners = obj_lbaas.LBaaSListener(
id='00EE9E11-91C2-41CF-8FD4-7970579E1234',
project_id='TEST_PROJECT',
name='http_listenr',
protocol='http',
port=80)
h_route.OcpRouteHandler.on_present(m_handler, route_event)
m_handler._sync_router_pool.assert_called_once_with(
route_event, route_spec, route_state)
m_handler._sync_l7_policy.assert_called_once_with(
route_event, route_spec, route_state)
m_handler._sync_host_l7_rule.assert_called_once_with(
route_event, route_spec, route_state)
m_handler._sync_path_l7_rule.assert_called_once_with(
route_event, route_spec, route_state)
m_handler._set_route_state.assert_called_once_with(
route_event, route_state)
m_handler._set_route_spec.assert_called_once_with(
route_event, route_spec)
m_handler._send_route_notification_to_ep.assert_called_once_with(
route_event, route_spec.to_service)
def test_on_present_no_change(self):
route_event = mock.sentinel.route_event
route_spec = mock.sentinel.route_spec
m_handler = mock.Mock(spec=h_route.OcpRouteHandler)
m_handler._get_route_spec.return_value = route_spec
m_handler._should_ignore.return_value = True
m_handler._l7_router = obj_lbaas.LBaaSLoadBalancer(
id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
project_id='TEST_PROJECT')
m_handler._l7_router_listeners = obj_lbaas.LBaaSListener(
id='00EE9E11-91C2-41CF-8FD4-7970579E1234',
project_id='TEST_PROJECT',
name='http_listenr',
protocol='http',
port=80)
h_route.OcpRouteHandler.on_present(m_handler, route_event)
m_handler._get_route_spec.assert_called_once_with(
route_event)
m_handler._sync_router_pool.assert_not_called()
m_handler._sync_l7_policy.assert_not_called()
m_handler._sync_host_l7_rule.assert_not_called()
m_handler._sync_path_l7_rule.assert_not_called()
m_handler._set_route_state.assert_not_called()
m_handler._set_route_spec.assert_not_called()
m_handler._send_route_notification_to_ep.assert_not_called()
def test_get_endpoints_link_by_route(self):
m_handler = mock.Mock(spec=h_route.OcpRouteHandler)
route_link = (
'/apis/route.openshift.io/v1/namespaces/default/routes/my_route')
ep_name = 'my_endpoint'
expected_ep_link = '/api/v1/namespaces/default/endpoints/my_endpoint'
ret_ep_path = h_route.OcpRouteHandler._get_endpoints_link_by_route(
m_handler, route_link, ep_name)
self.assertEqual(expected_ep_link, ret_ep_path)
def test_get_endpoints_link_by_route_error(self):
m_handler = mock.Mock(spec=h_route.OcpRouteHandler)
route_link = '/oapi/v1/namespaces/default/routes/my_route'
ep_name = 'wrong_endpoint'
expected_ep_link = '/api/v1/namespaces/default/endpoints/my_endpoint'
ret_ep_path = h_route.OcpRouteHandler._get_endpoints_link_by_route(
m_handler, route_link, ep_name)
self.assertNotEqual(expected_ep_link, ret_ep_path)
def test_should_ignore_l7_router_not_exist(self):
m_handler = mock.Mock(spec=h_route.OcpRouteHandler)
m_handler._l7_router = None
route = {'spec': {
'host': 'www.test.com', 'path': 'mypath',
'to': {'name': 'target_service'}}}
route_spec = obj_route.RouteSpec(
host='www.test.com',
path='mypath',
to_service='target_service')
expected_result = True
ret_value = h_route.OcpRouteHandler._should_ignore(
m_handler, route, route_spec)
self.assertEqual(ret_value, expected_result)
def test_should_ignore_l7_router_exist_no_change(self):
m_handler = mock.Mock(spec=h_route.OcpRouteHandler)
m_handler._l7_router = obj_lbaas.LBaaSLoadBalancer(
id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
project_id='TEST_PROJECT')
route = {'spec': {
'host': 'www.test.com', 'path': 'mypath',
'to': {'name': 'target_service'}}}
route_spec = obj_route.RouteSpec(
host='www.test.com',
path='mypath',
to_service='target_service')
expected_result = True
ret_value = h_route.OcpRouteHandler._should_ignore(
m_handler, route, route_spec)
self.assertEqual(ret_value, expected_result)
def test_should_ignore_l7_router_exist_with_changes(self):
m_handler = mock.Mock(spec=h_route.OcpRouteHandler)
m_handler._l7_router = obj_lbaas.LBaaSLoadBalancer(
id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
project_id='TEST_PROJECT')
route = {'spec': {
'host': 'www.test.com', 'path': 'mypath',
'to': {'name': 'target_service'}}}
route_spec = obj_route.RouteSpec(
host='www.test.com1',
path='mypath',
to_service='target_service')
expected_result = False
ret_value = h_route.OcpRouteHandler._should_ignore(
m_handler, route, route_spec)
self.assertEqual(ret_value, expected_result)
def test_sync_router_pool_empty_pool(self):
m_handler = mock.Mock(spec=h_route.OcpRouteHandler)
m_handler._l7_router = obj_lbaas.LBaaSLoadBalancer(
id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
project_id='TEST_PROJECT')
m_handler._drv_lbaas = mock.Mock(
spec=drv_base.LBaaSDriver)
m_handler._drv_lbaas.get_pool_by_name.return_value = None
m_handler._drv_lbaas.ensure_pool_attached_to_lb.return_value = None
route = {'metadata': {'namespace': 'namespace'},
'spec': {'host': 'www.test.com', 'path': 'mypath',
'to': {'name': 'target_service'}}}
route_spec = obj_route.RouteSpec(
host='www.test.com1',
path='mypath',
to_service='target_service')
route_state = obj_route.RouteState()
h_route.OcpRouteHandler._sync_router_pool(
m_handler, route, route_spec, route_state)
self.assertIsNone(route_state.router_pool)
def test_sync_router_pool_valid_pool(self):
m_handler = mock.Mock(spec=h_route.OcpRouteHandler)
m_handler._l7_router = obj_lbaas.LBaaSLoadBalancer(
id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
project_id='TEST_PROJECT')
m_handler._drv_lbaas = mock.Mock(
spec=drv_base.LBaaSDriver)
ret_pool = obj_lbaas.LBaaSPool(
name='TEST_NAME', project_id='TEST_PROJECT', protocol='TCP',
listener_id='A57B7771-6050-4CA8-A63C-443493EC98AB',
loadbalancer_id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C')
m_handler._drv_lbaas.get_pool_by_name.return_value = None
m_handler._drv_lbaas.ensure_pool_attached_to_lb.return_value = ret_pool
route = {'metadata': {'namespace': 'namespace'},
'spec': {'host': 'www.test.com', 'path': 'mypath',
'to': {'name': 'target_service'}}}
route_spec = obj_route.RouteSpec(
host='www.test.com1',
path='mypath',
to_service='target_service')
route_state = obj_route.RouteState()
h_route.OcpRouteHandler._sync_router_pool(
m_handler, route, route_spec, route_state)
self.assertEqual(route_state.router_pool, ret_pool)
def test_sync_l7_policy(self):
m_handler = mock.Mock(spec=h_route.OcpRouteHandler)
m_handler._l7_router = obj_lbaas.LBaaSLoadBalancer(
id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
project_id='TEST_PROJECT')
m_handler._drv_lbaas = mock.Mock(
spec=drv_base.LBaaSDriver)
listener = obj_lbaas.LBaaSListener(
id='123443545',
name='TEST_NAME', project_id='TEST_PROJECT', protocol='TCP',
port=80, loadbalancer_id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C')
m_handler._l7_router_listeners = {'80': listener}
l7_policy = obj_lbaas.LBaaSL7Policy(
id='00EE9E11-91C2-41CF-8FD4-7970579E5C44', name='myname',
listener_id='00EE9E11-91C2-41CF-8FD4-7970579E5C45',
redirect_pool_id='00EE9E11-91C2-41CF-8FD4-7970579E5C46',
project_id='00EE9E11-91C2-41CF-8FD4-7970579E5C46')
route_state = obj_route.RouteState()
m_handler._drv_lbaas.ensure_l7_policy.return_value = l7_policy
route = {'metadata': {'namespace': 'namespace', 'name': 'name'},
'spec': {'host': 'www.test.com', 'path': 'mypath',
'to': {'name': 'target_service'}}}
route_spec = obj_route.RouteSpec(
host='www.test.com1',
path='mypath',
to_service='target_service')
h_route.OcpRouteHandler._sync_l7_policy(
m_handler, route, route_spec, route_state)
self.assertEqual(route_state.l7_policy, l7_policy)
def test_sync_host_l7_rule_already_exist(self):
m_handler = mock.Mock(spec=h_route.OcpRouteHandler)
m_handler._l7_router = obj_lbaas.LBaaSLoadBalancer(
id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
project_id='TEST_PROJECT')
m_handler._drv_lbaas = mock.Mock(
spec=drv_base.LBaaSDriver)
h_l7_rule = obj_lbaas.LBaaSL7Rule(
id='00EE9E11-91C2-41CF-8FD4-7970579E5C44',
compare_type='EQUAL_TO',
l7policy_id='00EE9E11-91C2-41CF-8FD4-7970579E5C45',
type='HOST',
value='www.example.com')
route_state = obj_route.RouteState(h_l7_rule=h_l7_rule)
route = {'metadata': {'namespace': 'namespace', 'name': 'name'},
'spec': {'host': 'www.test.com', 'path': 'mypath',
'to': {'name': 'target_service'}}}
route_spec = obj_route.RouteSpec(
host='www.test.com',
path='mypath',
to_service='target_service')
h_route.OcpRouteHandler._sync_host_l7_rule(
m_handler, route, route_spec, route_state)
self.assertEqual(route_state.h_l7_rule, h_l7_rule)
def test_sync_host_l7_rule_new_host(self):
m_handler = mock.Mock(spec=h_route.OcpRouteHandler)
m_handler._l7_router = obj_lbaas.LBaaSLoadBalancer(
id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
project_id='TEST_PROJECT')
m_handler._drv_lbaas = mock.Mock(
spec=drv_base.LBaaSDriver)
h_l7_rule = obj_lbaas.LBaaSL7Rule(
id='00EE9E11-91C2-41CF-8FD4-7970579E5C44',
compare_type='EQUAL_TO',
l7policy_id='00EE9E11-91C2-41CF-8FD4-7970579E5C45',
type='HOST',
value='www.example.com')
route_state = obj_route.RouteState(h_l7_rule=h_l7_rule)
route = {'metadata': {'namespace': 'namespace', 'name': 'name'},
'spec': {'host': 'new.www.test.com', 'path': 'mypath',
'to': {'name': 'target_service'}}}
route_spec = obj_route.RouteSpec(
host='www.test.com',
path='mypath',
to_service='target_service')
m_handler._drv_lbaas.ensure_l7_rule.return_value = h_l7_rule
h_route.OcpRouteHandler._sync_host_l7_rule(
m_handler, route, route_spec, route_state)
self.assertEqual(route_state.h_l7_rule.value, route['spec']['host'])
def test_sync_path_l7_rule(self):
m_handler = mock.Mock(spec=h_route.OcpRouteHandler)
m_handler._l7_router = obj_lbaas.LBaaSLoadBalancer(
id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
project_id='TEST_PROJECT')
m_handler._drv_lbaas = mock.Mock(
spec=drv_base.LBaaSDriver)
l7_policy = obj_lbaas.LBaaSL7Policy(
id='00EE9E11-91C2-41CF-8FD4-7970579E6666', name='myname',
listener_id='00EE9E11-91C2-41CF-8FD4-7970579E5C45',
redirect_pool_id='00EE9E11-91C2-41CF-8FD4-7970579E5C46',
project_id='00EE9E11-91C2-41CF-8FD4-7970579E5C46')
route_state = obj_route.RouteState(
l7_policy=l7_policy)
route = {'metadata': {'namespace': 'namespace', 'name': 'name'},
'spec': {'host': 'new.www.test.com', 'path': '/nice_path',
'to': {'name': 'target_service'}}}
route_spec = obj_route.RouteSpec(
host='www.test.com',
path=None,
to_service='target_service')
ret_p_l7_rule = obj_lbaas.LBaaSL7Rule(
id='55559E11-91C2-41CF-8FD4-7970579E5C44',
compare_type=OCP_ROUTE_PATH_COMP_TYPE,
l7policy_id='55559E11-91C2-41CF-8FD4-7970579E5C45',
type='PATH',
value='/nice_path')
m_handler._drv_lbaas.ensure_l7_rule.return_value = ret_p_l7_rule
h_route.OcpRouteHandler._sync_path_l7_rule(
m_handler, route, route_spec, route_state)
self.assertEqual(route_state.p_l7_rule, ret_p_l7_rule)
m_handler._drv_lbaas.ensure_l7_rule.assert_called_once_with(
m_handler._l7_router, route_state.l7_policy,
OCP_ROUTE_PATH_COMP_TYPE, 'PATH', route['spec']['path'])
def test_sync_path_l7_rule_edit_usecase(self):
m_handler = mock.Mock(spec=h_route.OcpRouteHandler)
m_handler._l7_router = obj_lbaas.LBaaSLoadBalancer(
id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
project_id='TEST_PROJECT')
m_handler._drv_lbaas = mock.Mock(
spec=drv_base.LBaaSDriver)
old_p_l7_rule = obj_lbaas.LBaaSL7Rule(
id='00EE9E11-91C2-41CF-8FD4-7970579E5C44',
compare_type=OCP_ROUTE_PATH_COMP_TYPE,
l7policy_id='00EE9E11-91C2-41CF-8FD4-7970579E5C45',
type='PATH',
value='/cur_path')
route_state = obj_route.RouteState(p_l7_rule=old_p_l7_rule)
route = {'metadata': {'namespace': 'namespace', 'name': 'name'},
'spec': {'host': 'new.www.test.com', 'path': '/new_path',
'to': {'name': 'target_service'}}}
route_spec = obj_route.RouteSpec(
host='www.test.com',
path=old_p_l7_rule.value,
to_service='target_service')
ret_p_l7_rule = obj_lbaas.LBaaSL7Rule(
id='00EE9E11-91C2-41CF-8FD4-7970579E5C44',
compare_type=OCP_ROUTE_PATH_COMP_TYPE,
l7policy_id='00EE9E11-91C2-41CF-8FD4-7970579E5C45',
type='PATH',
value=route['spec']['path'])
m_handler._drv_lbaas.update_l7_rule.return_value = True
h_route.OcpRouteHandler._sync_path_l7_rule(
m_handler, route, route_spec, route_state)
self.assertEqual(route_state.p_l7_rule.value, ret_p_l7_rule.value)
m_handler._drv_lbaas.update_l7_rule.assert_called_once_with(
old_p_l7_rule, route['spec']['path'])
def test_sync_path_l7_rule_route_spec_not_sync(self):
m_handler = mock.Mock(spec=h_route.OcpRouteHandler)
m_handler._l7_router = obj_lbaas.LBaaSLoadBalancer(
id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
project_id='TEST_PROJECT')
m_handler._drv_lbaas = mock.Mock(
spec=drv_base.LBaaSDriver)
old_p_l7_rule = obj_lbaas.LBaaSL7Rule(
id='00EE9E11-91C2-41CF-8FD4-7970579E5C44',
compare_type=OCP_ROUTE_PATH_COMP_TYPE,
l7policy_id='00EE9E11-91C2-41CF-8FD4-7970579E5C45',
type='PATH',
value='/cur_path')
route_state = obj_route.RouteState(p_l7_rule=old_p_l7_rule)
route = {'metadata': {'namespace': 'namespace', 'name': 'name'},
'spec': {'host': 'new.www.test.com', 'path': 'new_path',
'to': {'name': 'target_service'}}}
route_spec = obj_route.RouteSpec(
host='www.test.com',
path='/not_cur_path',
to_service='target_service')
m_handler._drv_lbaas.update_l7_rule.return_value = None
h_route.OcpRouteHandler._sync_path_l7_rule(
m_handler, route, route_spec, route_state)
self.assertEqual(route_state.p_l7_rule.value, route['spec']['path'])
self.assertEqual(route_spec.path, route['spec']['path'])

View File

@ -21,22 +21,14 @@ from oslo_versionedobjects import fixture
# they come with a corresponding version bump in the affected
# objects
object_data = {
'LBaaSL7Policy':
'1.0-3ac4fcd50a555f433a78c67cb6a4cd52',
'LBaaSL7Rule': '1.0-276d9d678e1a8fc4b53fdbf3b2ac39ec',
'LBaaSListener': '1.0-a9e2d5c73687f5edc66fdb2f48650e15',
'LBaaSLoadBalancer': '1.3-8bc0a9bdbd160da67572aa38784378d1',
'LBaaSMember': '1.0-a770c6884c27d6d8c21186b27d0e2ccb',
'LBaaSPool': '1.1-6e77370d7632a902445444249eb77b01',
'LBaaSPortSpec': '1.1-1b307f34630617086c7af70f2cb8b215',
'LBaaSPubIp': '1.0-83992edec2c60fb4ab8998ea42a4ff74',
'LBaaSRouteNotifEntry': '1.0-dd2f2be956f68814b1f47cb13483a885',
'LBaaSRouteNotifier': '1.0-f0bfd8e772434abe7557930d7e0180c1',
'LBaaSRouteState': '1.0-bdf561462a2d337c0e0ae8cb10e9ff20',
'LBaaSServiceSpec': '1.0-d430ecd443f2b1999196bfe531e56f7e',
'LBaaSState': '1.0-a0ff7dce2d3f6ce1ffab4ff95a344361',
'RouteSpec': '1.0-2f02b2e24b1ca2b94c2bbdb718bfc020',
'RouteState': '1.0-2475dbeb6ebedabe2a1e235f9bc6b614',
}

View File

@ -100,8 +100,6 @@ kuryr_kubernetes.controller.handlers =
lbaasspec = kuryr_kubernetes.controller.handlers.lbaas:LBaaSSpecHandler
lb = kuryr_kubernetes.controller.handlers.lbaas:LoadBalancerHandler
namespace = kuryr_kubernetes.controller.handlers.namespace:NamespaceHandler
ingresslb = kuryr_kubernetes.controller.handlers.ingress_lbaas:IngressLoadBalancerHandler
ocproute = kuryr_kubernetes.platform.ocp.controller.handlers.route:OcpRouteHandler
policy = kuryr_kubernetes.controller.handlers.policy:NetworkPolicyHandler
pod_label = kuryr_kubernetes.controller.handlers.pod_label:PodLabelHandler
kuryrnetpolicy = kuryr_kubernetes.controller.handlers.kuryrnetpolicy:KuryrNetPolicyHandler