From 6349976cde822b8535a07fb04b14cfeb4c409287 Mon Sep 17 00:00:00 2001 From: Michael Johnson Date: Wed, 9 May 2018 11:47:04 -0700 Subject: [PATCH] Implement provider drivers - Cleanup This patch addresses the following: Fixes some unit tests. Cleans up some code from the parent patches, Adds a release note for the provider driver support. Adds the "List providers" API. Adds a document listing the know provider drivers. Adds a provider driver development guide. Change-Id: I90dc39e5e9d7d5839913dc2dbf187d935ee2b8b5 Story: 1655768 Task: 5165 --- api-ref/source/parameters.yaml | 12 + api-ref/source/v2/examples/provider-list-curl | 1 + .../v2/examples/provider-list-response.json | 12 + api-ref/source/v2/index.rst | 5 + api-ref/source/v2/provider.inc | 51 + doc/source/admin/index.rst | 1 + doc/source/admin/providers.rst | 59 + doc/source/contributor/guides/providers.rst | 1770 +++++++++++++++++ doc/source/contributor/index.rst | 6 + etc/octavia.conf | 6 +- octavia/api/drivers/amphora_driver/driver.py | 20 +- octavia/api/drivers/utils.py | 13 +- octavia/api/v2/controllers/__init__.py | 2 + octavia/api/v2/controllers/amphora.py | 14 +- octavia/api/v2/controllers/base.py | 6 - octavia/api/v2/controllers/listener.py | 1 - octavia/api/v2/controllers/load_balancer.py | 1 - octavia/api/v2/controllers/pool.py | 1 - octavia/api/v2/controllers/provider.py | 50 + octavia/api/v2/types/provider.py | 26 + octavia/common/config.py | 14 +- octavia/common/constants.py | 1 + octavia/policies/__init__.py | 2 + octavia/policies/provider.py | 29 + octavia/tests/functional/api/v2/base.py | 13 +- .../tests/functional/api/v2/test_amphora.py | 27 +- .../tests/functional/api/v2/test_provider.py | 45 + .../api/drivers/amphora_driver/__init__.py | 11 + .../amphora_driver/test_amphora_driver.py | 376 ++++ .../unit/api/drivers/sample_data_models.py | 471 +++++ .../unit/api/drivers/test_data_models.py | 16 +- .../unit/api/drivers/test_driver_factory.py | 48 + octavia/tests/unit/api/drivers/test_utils.py | 612 +----- ...vider_driver_support-7523f130dd5025af.yaml | 44 + 34 files changed, 3201 insertions(+), 565 deletions(-) create mode 100644 api-ref/source/v2/examples/provider-list-curl create mode 100644 api-ref/source/v2/examples/provider-list-response.json create mode 100644 api-ref/source/v2/provider.inc create mode 100644 doc/source/admin/providers.rst create mode 100644 doc/source/contributor/guides/providers.rst create mode 100644 octavia/api/v2/controllers/provider.py create mode 100644 octavia/api/v2/types/provider.py create mode 100644 octavia/policies/provider.py create mode 100644 octavia/tests/functional/api/v2/test_provider.py create mode 100644 octavia/tests/unit/api/drivers/amphora_driver/__init__.py create mode 100644 octavia/tests/unit/api/drivers/amphora_driver/test_amphora_driver.py create mode 100644 octavia/tests/unit/api/drivers/sample_data_models.py create mode 100644 octavia/tests/unit/api/drivers/test_driver_factory.py create mode 100644 releasenotes/notes/add_provider_driver_support-7523f130dd5025af.yaml diff --git a/api-ref/source/parameters.yaml b/api-ref/source/parameters.yaml index fd30f84025..2e0e0e8a3f 100644 --- a/api-ref/source/parameters.yaml +++ b/api-ref/source/parameters.yaml @@ -862,6 +862,18 @@ provider: in: body required: true type: string +provider-description: + description: | + Provider description. + in: body + required: true + type: string +provider-name: + description: | + Provider name. + in: body + required: true + type: string provider-optional: description: | Provider name for the load balancer. Default is ``octavia``. diff --git a/api-ref/source/v2/examples/provider-list-curl b/api-ref/source/v2/examples/provider-list-curl new file mode 100644 index 0000000000..24ade7db11 --- /dev/null +++ b/api-ref/source/v2/examples/provider-list-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2.0/lbaas/providers diff --git a/api-ref/source/v2/examples/provider-list-response.json b/api-ref/source/v2/examples/provider-list-response.json new file mode 100644 index 0000000000..d23a924c7b --- /dev/null +++ b/api-ref/source/v2/examples/provider-list-response.json @@ -0,0 +1,12 @@ +{ + "providers": [ + { + "name": "amphora", + "description": "The Octavia Amphora driver." + }, + { + "name": "octavia", + "description": "Deprecated alias of the Octavia Amphora driver." + } + ] +} diff --git a/api-ref/source/v2/index.rst b/api-ref/source/v2/index.rst index 77f1693f0c..84b854df18 100644 --- a/api-ref/source/v2/index.rst +++ b/api-ref/source/v2/index.rst @@ -51,6 +51,11 @@ Quotas ------ .. include:: quota.inc +--------- +Providers +--------- +.. include:: provider.inc + -------- Amphorae -------- diff --git a/api-ref/source/v2/provider.inc b/api-ref/source/v2/provider.inc new file mode 100644 index 0000000000..f20c750377 --- /dev/null +++ b/api-ref/source/v2/provider.inc @@ -0,0 +1,51 @@ +.. -*- rst -*- + +List Providers +============== + +.. rest_method:: GET /v2.0/lbaas/providers + +Lists all enabled provider drivers. + +Use the ``fields`` query parameter to control which fields are +returned in the response body. + +The list might be empty. + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - fields: fields + +Curl Example +------------ + +.. literalinclude:: examples/provider-list-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - name: provider-name + - description: provider-description + +Response Example +---------------- + +.. literalinclude:: examples/provider-list-response.json + :language: javascript diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst index fb39ffcddf..0b8f8607a2 100644 --- a/doc/source/admin/index.rst +++ b/doc/source/admin/index.rst @@ -34,6 +34,7 @@ Operator Reference ../configuration/policy.rst Anchor.rst apache-httpd.rst + providers.rst Indices and Search ------------------ diff --git a/doc/source/admin/providers.rst b/doc/source/admin/providers.rst new file mode 100644 index 0000000000..d25d31e426 --- /dev/null +++ b/doc/source/admin/providers.rst @@ -0,0 +1,59 @@ +.. + Copyright 2018 Rackspace, US Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +========================== +Available Provider Drivers +========================== + +Octavia supports enabling multiple provider drivers via the Octavia v2 API. +Drivers, other than the reference Amphora driver, exist outside of the Octavia +repository and are not maintained by the Octavia team. This list is intended +to provide a place for operators to discover and find available load balancing +provider drivers. + +This list is a "best effort" to keep updated, so please check with your +favorite load balancer provider to see if they support OpenStack load +balancing. If they don't, make a request for support! + +.. Note:: The provider drivers listed here may not be maintained by the + OpenStack LBaaS team. Please submit bugs for these projects through + their respective bug tracking systems. + +Drivers are installed on all of your Octavia API instances using pip and +automatically integrated with Octavia using `setuptools entry points`_. Once +installed, operators can enable the provider by adding the provider to the +Octavia configuration file `enabled_provider_drivers`_ setting in the +[api_settings] section. Be sure to install and enable the provider on all of +your Octavia API instances. + +.. _setuptools entry points: http://setuptools.readthedocs.io/en/latest/pkg_resources.html?#entry-points +.. _enabled_provider_drivers: https://docs.openstack.org/octavia/latest/configuration/configref.html#api_settings.enabled_provider_drivers + +Amphora +======= + +This is the reference driver for Octavia, meaning it is used for testing the +Octavia code base. It is an open source, scalable, and highly available load +balancing provider. + +Default provider name: **amphora** + +The driver package: https://pypi.org/project/octavia/ + +The driver source: http://git.openstack.org/cgit/openstack/octavia/ + +The documentation: https://docs.openstack.org/octavia/latest/ + +Where to report issues with the driver: https://storyboard.openstack.org/#!/project/908 diff --git a/doc/source/contributor/guides/providers.rst b/doc/source/contributor/guides/providers.rst new file mode 100644 index 0000000000..2f4317dc60 --- /dev/null +++ b/doc/source/contributor/guides/providers.rst @@ -0,0 +1,1770 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +================================= +Provider Driver Development Guide +================================= +This document is intended as a guide for developers creating provider drivers +for the Octavia API. This guide is intended to be an up to date version of the +`provider driver specification`_ previously approved. + +.. _provider driver specification: ../specs/version1.1/enable-provider-driver.html + +How Provider Drivers Integrate +============================== +Available drivers will be enabled by entries in the Octavia configuration file. +Drivers will be loaded via stevedore and Octavia will communicate with drivers +through a standard class interface defined below. Most driver functions will be +asynchronous to Octavia, and Octavia will provide a library of functions +that give drivers a way to update status and statistics. Functions that are +synchronous are noted below. + +Octavia API functions not listed here will continue to be handled by the +Octavia API and will not call into the driver. Examples would be show, list, +and quota requests. + +Driver Entry Points +------------------- + +Provider drivers will be loaded via +`stevedore `_. Drivers will +have an entry point defined in their setup tools configuration using the +Octavia driver namespace "octavia.api.drivers". This entry point name will +be used to enable the driver in the Octavia configuration file and as the +"provider" parameter users specify when creating a load balancer. An example +for the octavia reference driver would be: + +.. code-block:: python + + amphora = octavia.api.drivers.amphora_driver.driver:AmphoraProviderDriver + +Stable Provider Driver Interface +================================ + +Provider drivers should only access the following Octavia APIs. All other +Octavia APIs are not considered stable or safe for provider driver use and +may change at any time. + +* octavia.api.drivers.data_models +* octavia.api.drivers.driver_lib +* octavia.api.drivers.exceptions +* octavia.api.drivers.provider_base + +Octavia Provider Driver API +=========================== + +Provider drivers will be expected to support the full interface described +by the Octavia API, currently v2.0. If a driver does not implement an API +function, drivers should fail a request by raising a ``NotImplementedError`` +exception. If a driver implements a function but does not support a particular +option passed in by the caller, the driver should raise an +``UnsupportedOptionError``. + +It is recommended that drivers use the +`jsonschema `_ package or +`voluptuous `_ to validate the +request against the current driver capabilities. + +See the `Exception Model`_ below for more details. + +.. note:: Driver developers should refer to the official + `Octavia API reference`_ document for details of the fields and + expected outcome of these calls. + +.. _Octavia API reference: https://developer.openstack.org/api-ref/load-balancer/v2/index.html + +Load balancer +------------- + +Create +^^^^^^ + +Creates a load balancer. + +Octavia will pass in the load balancer object with all requested settings. + +The load balancer will be in the ``PENDING_CREATE`` provisioning_status and +``OFFLINE`` operating_status when it is passed to the driver. The driver +will be responsible for updating the provisioning status of the load +balancer to either ``ACTIVE`` if successfully created, or ``ERROR`` if not +created. + +The Octavia API will accept and do basic API validation of the create +request from the user. The load balancer python object representing the +request body will be passed to the driver create method as it was received +and validated with the following exceptions: + +1. The provider will be removed as this is used for driver selection. +2. The flavor will be expanded from the provided ID to be the full + dictionary representing the flavor metadata. + +**Load balancer object** + +As of the writing of this specification the create load balancer object may +contain the following: + ++-----------------+--------+-----------------------------------------------+ +| Name | Type | Description | ++=================+========+===============================================+ +| admin_state_up | bool | Admin state: True if up, False if down. | ++-----------------+--------+-----------------------------------------------+ +| description | string | A human-readable description for the resource.| ++-----------------+--------+-----------------------------------------------+ +| flavor | dict | The flavor keys and values. | ++-----------------+--------+-----------------------------------------------+ +| listeners | list | A list of `Listener objects`_. | ++-----------------+--------+-----------------------------------------------+ +| loadbalancer_id | string | ID of load balancer to create. | ++-----------------+--------+-----------------------------------------------+ +| name | string | Human-readable name of the resource. | ++-----------------+--------+-----------------------------------------------+ +| pools | list | A list of `Pool object`_. | ++-----------------+--------+-----------------------------------------------+ +| project_id | string | ID of the project owning this resource. | ++-----------------+--------+-----------------------------------------------+ +| vip_address | string | The IP address of the Virtual IP (VIP). | ++-----------------+--------+-----------------------------------------------+ +| vip_network_id | string | The ID of the network for the VIP. | ++-----------------+--------+-----------------------------------------------+ +| vip_port_id | string | The ID of the VIP port. | ++-----------------+--------+-----------------------------------------------+ +|vip_qos_policy_id| string | The ID of the qos policy for the VIP. | ++-----------------+--------+-----------------------------------------------+ +| vip_subnet_id | string | The ID of the subnet for the VIP. | ++-----------------+--------+-----------------------------------------------+ + +The driver is expected to validate that the driver supports the request +and raise an exception if the request cannot be accepted. + +**VIP port creation** + +Some provider drivers will want to create the Neutron port for the VIP, and +others will want Octavia to create the port instead. In order to support both +use cases, the create_vip_port() method will ask provider drivers to create +a VIP port. If the driver expects Octavia to create the port, the driver +will raise a NotImplementedError exception. Octavia will call this function +before calling loadbalancer_create() in order to determine if it should +create the VIP port. Octavia will call create_vip_port() with a loadbalancer +ID and a partially defined VIP dictionary. Provider drivers that support +port creation will create the port and return a fully populated VIP +dictionary. + +**VIP dictionary** + ++-----------------+--------+-----------------------------------------------+ +| Name | Type | Description | ++=================+========+===============================================+ +| project_id | string | ID of the project owning this resource. | ++-----------------+--------+-----------------------------------------------+ +| vip_address | string | The IP address of the Virtual IP (VIP). | ++-----------------+--------+-----------------------------------------------+ +| vip_network_id | string | The ID of the network for the VIP. | ++-----------------+--------+-----------------------------------------------+ +| vip_port_id | string | The ID of the VIP port. | ++-----------------+--------+-----------------------------------------------+ +|vip_qos_policy_id| string | The ID of the qos policy for the VIP. | ++-----------------+--------+-----------------------------------------------+ +| vip_subnet_id | string | The ID of the subnet for the VIP. | ++-----------------+--------+-----------------------------------------------+ + +**Creating a Fully Populated Load Balancer** + +If the "listener" option is specified, the provider driver will iterate +through the list and create all of the child objects in addition to +creating the load balancer instance. + +Delete +^^^^^^ + +Removes an existing load balancer. + +Octavia will pass in the load balancer ID and cascade bollean as parameters. + +The load balancer will be in the ``PENDING_DELETE`` provisioning_status when +it is passed to the driver. The driver will notify Octavia that the delete +was successful by setting the provisioning_status to ``DELETED``. If the +delete failed, the driver will update the provisioning_status to ``ERROR``. + +The API includes an option for cascade delete. When cascade is set to +True, the provider driver will delete all child objects of the load balancer. + +Failover +^^^^^^^^ + +Performs a failover of a load balancer. + +Octavia will pass in the load balancer ID as a parameter. + +The load balancer will be in the ``PENDING_UPDATE`` provisioning_status when +it is passed to the driver. The driver will update the provisioning_status +of the load balancer to either ``ACTIVE`` if successfully failed over, or +``ERROR`` if not failed over. + +Failover can mean different things in the context of a provider driver. For +example, the Octavia driver replaces the current amphora(s) with another +amphora. For another provider driver, failover may mean failing over from +an active system to a standby system. + +Update +^^^^^^ + +Modifies an existing load balancer using the values supplied in the load +balancer object. + +Octavia will pass in a load balancer object with the fields to be updated. +Fields not updated by the user will contain "Unset" as defined in the data +model. + +As of the writing of this specification the update load balancer object may +contain the following: + ++-----------------+--------+-----------------------------------------------+ +| Name | Type | Description | ++=================+========+===============================================+ +| admin_state_up | bool | Admin state: True if up, False if down. | ++-----------------+--------+-----------------------------------------------+ +| description | string | A human-readable description for the resource.| ++-----------------+--------+-----------------------------------------------+ +| loadbalancer_id | string | ID of load balancer to update. | ++-----------------+--------+-----------------------------------------------+ +| name | string | Human-readable name of the resource. | ++-----------------+--------+-----------------------------------------------+ +|vip_qos_policy_id| string | The ID of the qos policy for the VIP. | ++-----------------+--------+-----------------------------------------------+ + +The load balancer will be in the ``PENDING_UPDATE`` provisioning_status when +it is passed to the driver. The driver will update the provisioning_status +of the load balancer to either ``ACTIVE`` if successfully updated, or +``ERROR`` if the update was not successful. + +The driver is expected to validate that the driver supports the request. +The method will then return or raise an exception if the request cannot be +accepted. + +**Abstract class definition** + +.. code-block:: python + + class Driver(object): + + def create_vip_port(self, loadbalancer_id, vip_dictionary): + """Creates a port for a load balancer VIP. + + If the driver supports creating VIP ports, the driver will create a + VIP port and return the vip_dictionary populated with the vip_port_id. + If the driver does not support port creation, the driver will raise + a NotImplementedError. + + :param: loadbalancer_id (string): ID of loadbalancer. + :param: vip_dictionary (dict): The VIP dictionary. + :returns: VIP dictionary with vip_port_id. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: The driver does not support creating + VIP ports. + """ + raise NotImplementedError() + + def loadbalancer_create(self, loadbalancer): + """Creates a new load balancer. + + :param loadbalancer (object): The load balancer object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: The driver does not support create. + :raises UnsupportedOptionError: The driver does not + support one of the configuration options. + """ + raise NotImplementedError() + + def loadbalancer_delete(self, loadbalancer_id, cascade=False): + """Deletes a load balancer. + + :param loadbalancer_id (string): ID of the load balancer to delete. + :param cascade (bool): If True, deletes all child objects (listeners, + pools, etc.) in addition to the load balancer. + :return: Nothing if the delete request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + """ + raise NotImplementedError() + + def loadbalancer_failover(self, loadbalancer_id): + """Performs a fail over of a load balancer. + + :param loadbalancer_id (string): ID of the load balancer to failover. + :return: Nothing if the failover request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises: NotImplementedError if driver does not support request. + """ + raise NotImplementedError() + + def loadbalancer_update(self, loadbalancer): + """Updates a load balancer. + + :param loadbalancer (object): The load balancer object. + :return: Nothing if the update request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: The driver does not support request. + :raises UnsupportedOptionError: The driver does not + support one of the configuration options. + """ + raise NotImplementedError() + +Listener +-------- + +Create +^^^^^^ + +Creates a listener for a load balancer. + +Octavia will pass in the listener object with all requested settings. + +The listener will be in the ``PENDING_CREATE`` provisioning_status and +``OFFLINE`` operating_status when it is passed to the driver. The driver +will be responsible for updating the provisioning status of the listener +to either ``ACTIVE`` if successfully created, or ``ERROR`` if not created. + +The Octavia API will accept and do basic API validation of the create +request from the user. The listener python object representing the +request body will be passed to the driver create method as it was received +and validated with the following exceptions: + +1. The project_id will be removed, if present, as this field is now +deprecated. The listener will inherit the project_id from the parent +load balancer. +2. The default_tls_container_ref will be expanded and provided to the driver +in PEM format. +3. The sni_container_refs will be expanded and provided to the driver in +PEM format. + +.. _Listener objects: + +**Listener object** + +As of the writing of this specification the create listener object may +contain the following: + ++----------------------------+--------+-------------------------------------+ +| Name | Type | Description | ++============================+========+=====================================+ +| admin_state_up | bool | Admin state: True if up, False if | +| | | down. | ++----------------------------+--------+-------------------------------------+ +| connection_limit | int | The max number of connections | +| | | permitted for this listener. Default| +| | | is -1, which is infinite | +| | | connections. | ++----------------------------+--------+-------------------------------------+ +| default_pool | object | A `Pool object`_. | ++----------------------------+--------+-------------------------------------+ +| default_pool_id | string | The ID of the pool used by the | +| | | listener if no L7 policies match. | ++----------------------------+--------+-------------------------------------+ +| default_tls_container_data | dict | A `TLS container`_ dict. | ++----------------------------+--------+-------------------------------------+ +| default_tls_container_refs | string | The reference to the secrets | +| | | container. | ++----------------------------+--------+-------------------------------------+ +| description | string | A human-readable description for the| +| | | listener. | ++----------------------------+--------+-------------------------------------+ +| insert_headers | dict | A dictionary of optional headers to | +| | | insert into the request before it is| +| | | sent to the backend member. See | +| | | `Supported HTTP Header Insertions`_.| +| | | Keys and values are specified as | +| | | strings. | ++----------------------------+--------+-------------------------------------+ +| l7policies | list | A list of `L7policy objects`_. | ++----------------------------+--------+-------------------------------------+ +| listener_id | string | ID of listener to create. | ++----------------------------+--------+-------------------------------------+ +| loadbalancer_id | string | ID of load balancer. | ++----------------------------+--------+-------------------------------------+ +| name | string | Human-readable name of the listener.| ++----------------------------+--------+-------------------------------------+ +| protocol | string | Protocol type: One of HTTP, HTTPS, | +| | | TCP, or TERMINATED_HTTPS. | ++----------------------------+--------+-------------------------------------+ +| protocol_port | int | Protocol port number. | ++----------------------------+--------+-------------------------------------+ +| sni_container_data | list | A list of `TLS container`_ dict. | ++----------------------------+--------+-------------------------------------+ +| sni_container_refs | list | A list of references to the SNI | +| | | secrets containers. | ++----------------------------+--------+-------------------------------------+ +| timeout_client_data | int | Frontend client inactivity timeout | +| | | in milliseconds. | ++----------------------------+--------+-------------------------------------+ +| timeout_member_connect | int | Backend member connection timeout in| +| | | milliseconds. | ++----------------------------+--------+-------------------------------------+ +| timeout_member_data | int | Backend member inactivity timeout in| +| | | milliseconds. | ++----------------------------+--------+-------------------------------------+ +| timeout_tcp_inspect | int | Time, in milliseconds, to wait for | +| | | additional TCP packets for content | +| | | inspection. | ++----------------------------+--------+-------------------------------------+ + +.. _TLS container: + +As of the writing of this specification the TLS container dictionary +contains the following: + ++---------------+--------+------------------------------------------------+ +| Key | Type | Description | ++===============+========+================================================+ +| certificate | string | The PEM encoded certificate. | ++---------------+--------+------------------------------------------------+ +| intermediates | List | A list of intermediate PEM certificates. | ++---------------+--------+------------------------------------------------+ +| passphrase | string | The private_key passphrase. | ++---------------+--------+------------------------------------------------+ +| primary_cn | string | The primary common name of the certificate. | ++---------------+--------+------------------------------------------------+ +| private_key | string | The PEM encoded private key. | ++---------------+--------+------------------------------------------------+ + +.. _Supported HTTP Header Insertions: + +As of the writing of this specification the Supported HTTP Header Insertions +are: + ++-------------------+------+------------------------------------------------+ +| Key | Type | Description | ++===================+======+================================================+ +| X-Forwarded-For | bool | When True a X-Forwarded-For header is inserted | +| | | into the request to the backend member that | +| | | specifies the client IP address. | ++-------------------+------+------------------------------------------------+ +| X-Forwarded-Port | int | A X-Forwarded-Port header is inserted into the | +| | | request to the backend member that specifies | +| | | the integer provided. Typically this is used to| +| | | indicate the port the client connected to on | +| | | the load balancer. | ++-------------------+------+------------------------------------------------+ + +**Creating a Fully Populated Listener** + +If the "default_pool" or "l7policies" option is specified, the provider +driver will create all of the child objects in addition to creating the +listener instance. + +Delete +^^^^^^ + +Deletes an existing listener. + +Octavia will pass the listener ID as a parameter. + +The listener will be in the ``PENDING_DELETE`` provisioning_status when +it is passed to the driver. The driver will notify Octavia that the delete +was successful by setting the provisioning_status to ``DELETED``. If the +delete failed, the driver will update the provisioning_status to ``ERROR``. + +Update +^^^^^^ + +Modifies an existing listener using the values supplied in the listener +object. + +Octavia will pass in a listener object with the fields to be updated. +Fields not updated by the user will contain "Unset" as defined in the data +model. + +As of the writing of this specification the update listener object may +contain the following: + ++----------------------------+--------+-------------------------------------+ +| Name | Type | Description | ++============================+========+=====================================+ +| admin_state_up | bool | Admin state: True if up, False if | +| | | down. | ++----------------------------+--------+-------------------------------------+ +| connection_limit | int | The max number of connections | +| | | permitted for this listener. Default| +| | | is -1, which is infinite | +| | | connections. | ++----------------------------+--------+-------------------------------------+ +| default_pool_id | string | The ID of the pool used by the | +| | | listener if no L7 policies match. | ++----------------------------+--------+-------------------------------------+ +| default_tls_container_data | dict | A `TLS container`_ dict. | ++----------------------------+--------+-------------------------------------+ +| default_tls_container_refs | string | The reference to the secrets | +| | | container. | ++----------------------------+--------+-------------------------------------+ +| description | string | A human-readable description for | +| | | the listener. | ++----------------------------+--------+-------------------------------------+ +| insert_headers | dict | A dictionary of optional headers to | +| | | insert into the request before it is| +| | | sent to the backend member. See | +| | | `Supported HTTP Header Insertions`_.| +| | | Keys and values are specified as | +| | | strings. | ++----------------------------+--------+-------------------------------------+ +| listener_id | string | ID of listener to update. | ++----------------------------+--------+-------------------------------------+ +| name | string | Human-readable name of the listener.| ++----------------------------+--------+-------------------------------------+ +| sni_container_data | list | A list of `TLS container`_ dict. | ++----------------------------+--------+-------------------------------------+ +| sni_container_refs | list | A list of references to the SNI | +| | | secrets containers. | ++----------------------------+--------+-------------------------------------+ +| timeout_client_data | int | Frontend client inactivity timeout | +| | | in milliseconds. | ++----------------------------+--------+-------------------------------------+ +| timeout_member_connect | int | Backend member connection timeout in| +| | | milliseconds. | ++----------------------------+--------+-------------------------------------+ +| timeout_member_data | int | Backend member inactivity timeout in| +| | | milliseconds. | ++----------------------------+--------+-------------------------------------+ +| timeout_tcp_inspect | int | Time, in milliseconds, to wait for | +| | | additional TCP packets for content | +| | | inspection. | ++----------------------------+--------+-------------------------------------+ + +The listener will be in the ``PENDING_UPDATE`` provisioning_status when +it is passed to the driver. The driver will update the provisioning_status +of the listener to either ``ACTIVE`` if successfully updated, or ``ERROR`` +if the update was not successful. + +The driver is expected to validate that the driver supports the request. +The method will then return or raise an exception if the request cannot be +accepted. + +**Abstract class definition** + +.. code-block:: python + + class Driver(object): + def listener_create(self, listener): + """Creates a new listener. + + :param listener (object): The listener object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + + def listener_delete(self, listener_id): + """Deletes a listener. + + :param listener_id (string): ID of the listener to delete. + :return: Nothing if the delete request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + """ + raise NotImplementedError() + + def listener_update(self, listener): + """Updates a listener. + + :param listener (object): The listener object. + :return: Nothing if the update request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + +Pool +---- + +Create +^^^^^^ + +Creates a pool for a load balancer. + +Octavia will pass in the pool object with all requested settings. + +The pool will be in the ``PENDING_CREATE`` provisioning_status and +``OFFLINE`` operating_status when it is passed to the driver. The driver +will be responsible for updating the provisioning status of the pool +to either ``ACTIVE`` if successfully created, or ``ERROR`` if not created. + +The Octavia API will accept and do basic API validation of the create +request from the user. The pool python object representing the request +body will be passed to the driver create method as it was received and +validated with the following exceptions: + +1. The project_id will be removed, if present, as this field is now + deprecated. The listener will inherit the project_id from the parent + load balancer. + +.. _Pool object: + +**Pool object** + +As of the writing of this specification the create pool object may +contain the following: + ++-----------------------+--------+------------------------------------------+ +| Name | Type | Description | ++=======================+========+==========================================+ +| admin_state_up | bool | Admin state: True if up, False if down. | ++-----------------------+--------+------------------------------------------+ +| description | string | A human-readable description for the | +| | | pool. | ++-----------------------+--------+------------------------------------------+ +| healthmonitor | object | A `Healthmonitor object`_. | ++-----------------------+--------+------------------------------------------+ +| lb_algorithm | string | Load balancing algorithm: One of | +| | | ROUND_ROBIN, LEAST_CONNECTIONS, or | +| | | SOURCE_IP. | ++-----------------------+--------+------------------------------------------+ +| loadbalancer_id | string | ID of load balancer. | ++-----------------------+--------+------------------------------------------+ +| members | list | A list of `Member objects`_. | ++-----------------------+--------+------------------------------------------+ +| name | string | Human-readable name of the pool. | ++-----------------------+--------+------------------------------------------+ +| pool_id | string | ID of pool to create. | ++-----------------------+--------+------------------------------------------+ +| protocol | string | Protocol type: One of HTTP, HTTPS, | +| | | PROXY, or TCP. | ++-----------------------+--------+------------------------------------------+ +| session_persistence | dict | Defines session persistence as one of | +| | | {'type': <'HTTP_COOKIE' | 'SOURCE_IP'>} | +| | | OR | +| | | {'type': 'APP_COOKIE', | +| | | 'cookie_name': } | ++-----------------------+--------+------------------------------------------+ + +Delete +^^^^^^ + +Removes an existing pool and all of its members. + +Octavia will pass the pool ID as a parameter. + +The pool will be in the ``PENDING_DELETE`` provisioning_status when +it is passed to the driver. The driver will notify Octavia that the delete +was successful by setting the provisioning_status to ``DELETED``. If the +delete failed, the driver will update the provisioning_status to ``ERROR``. + +Update +^^^^^^ + +Modifies an existing pool using the values supplied in the pool object. + +Octavia will pass in a pool object with the fields to be updated. +Fields not updated by the user will contain "Unset" as defined in the data +model. + +As of the writing of this specification the update pool object may +contain the following: + ++-----------------------+--------+------------------------------------------+ +| Name | Type | Description | ++=======================+========+==========================================+ +| admin_state_up | bool | Admin state: True if up, False if down. | ++-----------------------+--------+------------------------------------------+ +| description | string | A human-readable description for the | +| | | pool. | ++-----------------------+--------+------------------------------------------+ +| lb_algorithm | string | Load balancing algorithm: One of | +| | | ROUND_ROBIN, LEAST_CONNECTIONS, or | +| | | SOURCE_IP. | ++-----------------------+--------+------------------------------------------+ +| name | string | Human-readable name of the pool. | ++-----------------------+--------+------------------------------------------+ +| pool_id | string | ID of pool to update. | ++-----------------------+--------+------------------------------------------+ +| session_persistence | dict | Defines session persistence as one of | +| | | {'type': <'HTTP_COOKIE' | 'SOURCE_IP'>} | +| | | OR | +| | | {'type': 'APP_COOKIE', | +| | | 'cookie_name': } | ++-----------------------+--------+------------------------------------------+ + +The pool will be in the ``PENDING_UPDATE`` provisioning_status when it is +passed to the driver. The driver will update the provisioning_status of the +pool to either ``ACTIVE`` if successfully updated, or ``ERROR`` if the +update was not successful. + +The driver is expected to validate that the driver supports the request. +The method will then return or raise an exception if the request cannot be +accepted. + +**Abstract class definition** + +.. code-block:: python + + class Driver(object): + def pool_create(self, pool): + """Creates a new pool. + + :param pool (object): The pool object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + + def pool_delete(self, pool_id): + """Deletes a pool and its members. + + :param pool_id (string): ID of the pool to delete. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + """ + raise NotImplementedError() + + def pool_update(self, pool): + """Updates a pool. + + :param pool (object): The pool object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + +Member +------ + +Create +^^^^^^ + +Creates a member for a pool. + +Octavia will pass in the member object with all requested settings. + +The member will be in the ``PENDING_CREATE`` provisioning_status and +``OFFLINE`` operating_status when it is passed to the driver. The driver +will be responsible for updating the provisioning status of the member +to either ``ACTIVE`` if successfully created, or ``ERROR`` if not created. + +The Octavia API will accept and do basic API validation of the create +request from the user. The member python object representing the +request body will be passed to the driver create method as it was received +and validated with the following exceptions: + +1. The project_id will be removed, if present, as this field is now + deprecated. The member will inherit the project_id from the parent + load balancer. + +.. _Member objects: + +**Member object** + +As of the writing of this specification the create member object may +contain the following: + ++-----------------------+--------+------------------------------------------+ +| Name | Type | Description | ++=======================+========+==========================================+ +| address | string | The IP address of the backend member to | +| | | receive traffic from the load balancer. | ++-----------------------+--------+------------------------------------------+ +| admin_state_up | bool | Admin state: True if up, False if down. | ++-----------------------+--------+------------------------------------------+ +| backup | bool | Is the member a backup? Backup members | +| | | only receive traffic when all non-backup | +| | | members are down. | ++-----------------------+--------+------------------------------------------+ +| member_id | string | ID of member to create. | ++-----------------------+--------+------------------------------------------+ +| monitor_address | string | An alternate IP address used for health | +| | | monitoring a backend member. | ++-----------------------+--------+------------------------------------------+ +| monitor_port | int | An alternate protocol port used for | +| | | health monitoring a backend member. | ++-----------------------+--------+------------------------------------------+ +| name | string | Human-readable name of the member. | ++-----------------------+--------+------------------------------------------+ +| pool_id | string | ID of pool. | ++-----------------------+--------+------------------------------------------+ +| protocol_port | int | The port on which the backend member | +| | | listens for traffic. | ++-----------------------+--------+------------------------------------------+ +| subnet_id | string | Subnet ID. | ++-----------------------+--------+------------------------------------------+ +| weight | int | The weight of a member determines the | +| | | portion of requests or connections it | +| | | services compared to the other members of| +| | | the pool. For example, a member with a | +| | | weight of 10 receives five times as many | +| | | requests as a member with a weight of 2. | +| | | A value of 0 means the member does not | +| | | receive new connections but continues to | +| | | service existing connections. A valid | +| | | value is from 0 to 256. Default is 1. | ++-----------------------+--------+------------------------------------------+ + +Delete +^^^^^^ + +Removes a pool member. + +Octavia will pass the member ID as a parameter. + +The member will be in the ``PENDING_DELETE`` provisioning_status when +it is passed to the driver. The driver will notify Octavia that the delete +was successful by setting the provisioning_status to ``DELETED``. If the +delete failed, the driver will update the provisioning_status to ``ERROR``. + +Update +^^^^^^ + +Modifies an existing member using the values supplied in the listener object. + +Octavia will pass in a member object with the fields to be updated. +Fields not updated by the user will contain "Unset" as defined in the data +model. + +As of the writing of this specification the update member object may contain +the following: + ++-----------------------+--------+------------------------------------------+ +| Name | Type | Description | ++=======================+========+==========================================+ +| admin_state_up | bool | Admin state: True if up, False if down. | ++-----------------------+--------+------------------------------------------+ +| backup | bool | Is the member a backup? Backup members | +| | | only receive traffic when all non-backup | +| | | members are down. | ++-----------------------+--------+------------------------------------------+ +| member_id | string | ID of member to update. | ++-----------------------+--------+------------------------------------------+ +| monitor_address | string | An alternate IP address used for health | +| | | monitoring a backend member. | ++-----------------------+--------+------------------------------------------+ +| monitor_port | int | An alternate protocol port used for | +| | | health monitoring a backend member. | ++-----------------------+--------+------------------------------------------+ +| name | string | Human-readable name of the member. | ++-----------------------+--------+------------------------------------------+ +| weight | int | The weight of a member determines the | +| | | portion of requests or connections it | +| | | services compared to the other members of| +| | | the pool. For example, a member with a | +| | | weight of 10 receives five times as many | +| | | requests as a member with a weight of 2. | +| | | A value of 0 means the member does not | +| | | receive new connections but continues to | +| | | service existing connections. A valid | +| | | value is from 0 to 256. Default is 1. | ++-----------------------+--------+------------------------------------------+ + +The member will be in the ``PENDING_UPDATE`` provisioning_status when +it is passed to the driver. The driver will update the provisioning_status +of the member to either ``ACTIVE`` if successfully updated, or ``ERROR`` +if the update was not successful. + +The driver is expected to validate that the driver supports the request. +The method will then return or raise an exception if the request cannot be +accepted. + +Batch Update +^^^^^^^^^^^^ + +Set the state of members for a pool in one API call. This may include +creating new members, deleting old members, and updating existing members. +Existing members are matched based on address/port combination. + +For example, assume a pool currently has two members. These members have the +following address/port combinations: '192.0.2.15:80' and '192.0.2.16:80'. +Now assume a PUT request is made that includes members with address/port +combinations: '192.0.2.16:80' and '192.0.2.17:80'. The member '192.0.2.15:80' +will be deleted because it was not in the request. The member '192.0.2.16:80' +will be updated to match the request data for that member, because it was +matched. The member '192.0.2.17:80' will be created, because no such member +existed. + +The members will be in the ``PENDING_CREATE``, ``PENDING_UPDATE``, or +``PENDING_DELETE`` provisioning_status when it is passed to the driver. +The driver will update the provisioning_status of the members to either +``ACTIVE`` or ``DELETED`` if successfully updated, or ``ERROR`` +if the update was not successful. + +The batch update method will supply a list of `Member objects`_. +Existing members not in this list should be deleted, +existing members in the list should be updated, +and members in the list that do not already exist should be created. + +**Abstract class definition** + +.. code-block:: python + + class Driver(object): + def member_create(self, member): + """Creates a new member for a pool. + + :param member (object): The member object. + + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + + def member_delete(self, member_id): + + """Deletes a pool member. + + :param member_id (string): ID of the member to delete. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + """ + raise NotImplementedError() + + def member_update(self, member): + + """Updates a pool member. + + :param member (object): The member object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + + def member_batch_update(self, members): + """Creates, updates, or deletes a set of pool members. + + :param members (list): List of member objects. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + +Health Monitor +-------------- + +Create +^^^^^^ + +Creates a health monitor on a pool. + +Octavia will pass in the health monitor object with all requested settings. + +The health monitor will be in the ``PENDING_CREATE`` provisioning_status and +``OFFLINE`` operating_status when it is passed to the driver. The driver +will be responsible for updating the provisioning status of the health +monitor to either ``ACTIVE`` if successfully created, or ``ERROR`` if not +created. + +The Octavia API will accept and do basic API validation of the create +request from the user. The healthmonitor python object representing the +request body will be passed to the driver create method as it was received +and validated with the following exceptions: + +1. The project_id will be removed, if present, as this field is now + deprecated. The listener will inherit the project_id from the parent + load balancer. + +.. _Healthmonitor object: + +**Healthmonitor object** + ++-----------------------+--------+------------------------------------------+ +| Name | Type | Description | ++=======================+========+==========================================+ +| admin_state_up | bool | Admin state: True if up, False if down. | ++-----------------------+--------+------------------------------------------+ +| delay | int | The interval, in seconds, between health | +| | | checks. | ++-----------------------+--------+------------------------------------------+ +| expected_codes | string | The expected HTTP status codes to get | +| | | from a successful health check. This may | +| | | be a single value, a list, or a range. | ++-----------------------+--------+------------------------------------------+ +| healthmonitor_id | string | ID of health monitor to create. | ++-----------------------+--------+------------------------------------------+ +| http_method | string | The HTTP method that the health monitor | +| | | uses for requests. One of CONNECT, | +| | | DELETE, GET, HEAD, OPTIONS, PATCH, POST, | +| | | PUT, or TRACE. | ++-----------------------+--------+------------------------------------------+ +| max_retries | int | The number of successful checks before | +| | | changing the operating status of the | +| | | member to ONLINE. | ++-----------------------+--------+------------------------------------------+ +| max_retries_down | int | The number of allowed check failures | +| | | before changing the operating status of | +| | | the member to ERROR. A valid value is | +| | | from 1 to 10. | ++-----------------------+--------+------------------------------------------+ +| name | string | Human-readable name of the monitor. | ++-----------------------+--------+------------------------------------------+ +| pool_id | string | The pool to monitor. | ++-----------------------+--------+------------------------------------------+ +| timeout | int | The time, in seconds, after which a | +| | | health check times out. This value must | +| | | be less than the delay value. | ++-----------------------+--------+------------------------------------------+ +| type | string | The type of health monitor. One of HTTP, | +| | | HTTPS, PING, TCP, or TLS-HELLO. | ++-----------------------+--------+------------------------------------------+ +| url_path | string | The HTTP URL path of the request sent by | +| | | the monitor to test the health of a | +| | | backend member. Must be a string that | +| | | begins with a forward slash (/). | ++-----------------------+--------+------------------------------------------+ + +Delete +^^^^^^ + +Deletes an existing health monitor. + +Octavia will pass in the health monitor ID as a parameter. + +The health monitor will be in the ``PENDING_DELETE`` provisioning_status +when it is passed to the driver. The driver will notify Octavia that the +delete was successful by setting the provisioning_status to ``DELETED``. +If the delete failed, the driver will update the provisioning_status to +``ERROR``. + +Update +^^^^^^ + +Modifies an existing health monitor using the values supplied in the +health monitor object. + +Octavia will pass in a health monitor object with the fields to be updated. +Fields not updated by the user will contain "Unset" as defined in the data +model. + +As of the writing of this specification the update health monitor object may +contain the following: + ++-----------------------+--------+------------------------------------------+ +| Name | Type | Description | ++=======================+========+==========================================+ +| admin_state_up | bool | Admin state: True if up, False if down. | ++-----------------------+--------+------------------------------------------+ +| delay | int | The interval, in seconds, between health | +| | | checks. | ++-----------------------+--------+------------------------------------------+ +| expected_codes | string | The expected HTTP status codes to get | +| | | from a successful health check. This may | +| | | be a single value, a list, or a range. | ++-----------------------+--------+------------------------------------------+ +| healthmonitor_id | string | ID of health monitor to create. | ++-----------------------+--------+------------------------------------------+ +| http_method | string | The HTTP method that the health monitor | +| | | uses for requests. One of CONNECT, | +| | | DELETE, GET, HEAD, OPTIONS, PATCH, POST, | +| | | PUT, or TRACE. | ++-----------------------+--------+------------------------------------------+ +| max_retries | int | The number of successful checks before | +| | | changing the operating status of the | +| | | member to ONLINE. | ++-----------------------+--------+------------------------------------------+ +| max_retries_down | int | The number of allowed check failures | +| | | before changing the operating status of | +| | | the member to ERROR. A valid value is | +| | | from 1 to 10. | ++-----------------------+--------+------------------------------------------+ +| name | string | Human-readable name of the monitor. | ++-----------------------+--------+------------------------------------------+ +| timeout | int | The time, in seconds, after which a | +| | | health check times out. This value must | +| | | be less than the delay value. | ++-----------------------+--------+------------------------------------------+ +| url_path | string | The HTTP URL path of the request sent by | +| | | the monitor to test the health of a | +| | | backend member. Must be a string that | +| | | begins with a forward slash (/). | ++-----------------------+--------+------------------------------------------+ + +The health monitor will be in the ``PENDING_UPDATE`` provisioning_status +when it is passed to the driver. The driver will update the +provisioning_status of the health monitor to either ``ACTIVE`` if +successfully updated, or ``ERROR`` if the update was not successful. + +The driver is expected to validate that the driver supports the request. +The method will then return or raise an exception if the request cannot be +accepted. + +**Abstract class definition** + +.. code-block:: python + + class Driver(object): + def health_monitor_create(self, healthmonitor): + """Creates a new health monitor. + + :param healthmonitor (object): The health monitor object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + + def health_monitor_delete(self, healthmonitor_id): + """Deletes a healthmonitor_id. + + :param healthmonitor_id (string): ID of the monitor to delete. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + """ + raise NotImplementedError() + + def health_monitor_update(self, healthmonitor): + """Updates a health monitor. + + :param healthmonitor (object): The health monitor object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + +L7 Policy +--------- + +Create +^^^^^^ + +Creates an L7 policy. + +Octavia will pass in the L7 policy object with all requested settings. + +The L7 policy will be in the ``PENDING_CREATE`` provisioning_status and +``OFFLINE`` operating_status when it is passed to the driver. The driver +will be responsible for updating the provisioning status of the L7 policy +to either ``ACTIVE`` if successfully created, or ``ERROR`` if not created. + +The Octavia API will accept and do basic API validation of the create +request from the user. The l7policy python object representing the +request body will be passed to the driver create method as it was received +and validated with the following exceptions: + +1. The project_id will be removed, if present, as this field is now + deprecated. The l7policy will inherit the project_id from the parent + load balancer. + +.. _L7policy objects: + +**L7policy object** + +As of the writing of this specification the create l7policy object may +contain the following: + ++-----------------------+--------+------------------------------------------+ +| Name | Type | Description | ++=======================+========+==========================================+ +| action | string | The L7 policy action. One of | +| | | REDIRECT_TO_POOL, REDIRECT_TO_URL, or | +| | | REJECT. | ++-----------------------+--------+------------------------------------------+ +| admin_state_up | bool | Admin state: True if up, False if down. | ++-----------------------+--------+------------------------------------------+ +| description | string | A human-readable description for the | +| | | L7 policy. | ++-----------------------+--------+------------------------------------------+ +| l7policy_id | string | The ID of the L7 policy. | ++-----------------------+--------+------------------------------------------+ +| listener_id | string | The ID of the listener. | ++-----------------------+--------+------------------------------------------+ +| name | string | Human-readable name of the L7 policy. | ++-----------------------+--------+------------------------------------------+ +| position | int | The position of this policy on the | +| | | listener. Positions start at 1. | ++-----------------------+--------+------------------------------------------+ +| redirect_pool_id | string | Requests matching this policy will be | +| | | redirected to the pool with this ID. | +| | | Only valid if action is REDIRECT_TO_POOL.| ++-----------------------+--------+------------------------------------------+ +| redirect_url | string | Requests matching this policy will be | +| | | redirected to this URL. Only valid if | +| | | action is REDIRECT_TO_URL. | ++-----------------------+--------+------------------------------------------+ +| rules | list | A list of l7rule objects. | ++-----------------------+--------+------------------------------------------+ + +*Creating a Fully Populated L7 policy* + +If the "rules" option is specified, the provider driver will create all of +the child objects in addition to creating the L7 policy instance. + +Delete +^^^^^^ + +Deletes an existing L7 policy. + +Octavia will pass in the L7 policy ID as a parameter. + +The l7policy will be in the ``PENDING_DELETE`` provisioning_status when +it is passed to the driver. The driver will notify Octavia that the delete +was successful by setting the provisioning_status to ``DELETED``. If the +delete failed, the driver will update the provisioning_status to ``ERROR``. + +Update +^^^^^^ + +Modifies an existing L7 policy using the values supplied in the l7policy +object. + +Octavia will pass in an L7 policy object with the fields to be updated. +Fields not updated by the user will contain "Unset" as defined in the data +model. + +As of the writing of this specification the update L7 policy object may +contain the following: + ++-----------------------+--------+------------------------------------------+ +| Name | Type | Description | ++=======================+========+==========================================+ +| action | string | The L7 policy action. One of | +| | | REDIRECT_TO_POOL, REDIRECT_TO_URL, or | +| | | REJECT. | ++-----------------------+--------+------------------------------------------+ ++-----------------------+--------+------------------------------------------+ +| admin_state_up | bool | Admin state: True if up, False if down. | ++-----------------------+--------+------------------------------------------+ +| description | string | A human-readable description for the | +| | | L7 policy. | ++-----------------------+--------+------------------------------------------+ +| l7policy_id | string | The ID of the L7 policy. | ++-----------------------+--------+------------------------------------------+ +| name | string | Human-readable name of the L7 policy. | ++-----------------------+--------+------------------------------------------+ +| position | int | The position of this policy on the | +| | | listener. Positions start at 1. | ++-----------------------+--------+------------------------------------------+ +| redirect_pool_id | string | Requests matching this policy will be | +| | | redirected to the pool with this ID. | +| | | Only valid if action is REDIRECT_TO_POOL.| ++-----------------------+--------+------------------------------------------+ +| redirect_url | string | Requests matching this policy will be | +| | | redirected to this URL. Only valid if | +| | | action is REDIRECT_TO_URL. | ++-----------------------+--------+------------------------------------------+ + +The L7 policy will be in the ``PENDING_UPDATE`` provisioning_status when +it is passed to the driver. The driver will update the provisioning_status +of the L7 policy to either ``ACTIVE`` if successfully updated, or ``ERROR`` +if the update was not successful. + +The driver is expected to validate that the driver supports the request. +The method will then return or raise an exception if the request cannot be +accepted. + +**Abstract class definition** + +.. code-block:: python + + class Driver(object): + def l7policy_create(self, l7policy): + """Creates a new L7 policy. + + :param l7policy (object): The l7policy object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + + def l7policy_delete(self, l7policy_id): + """Deletes an L7 policy. + + :param l7policy_id (string): ID of the L7 policy to delete. + :return: Nothing if the delete request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + """ + raise NotImplementedError() + + def l7policy_update(self, l7policy): + """Updates an L7 policy. + + :param l7policy (object): The l7policy object. + :return: Nothing if the update request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + +L7 Rule +------- + +Create +^^^^^^ + +Creates a new L7 rule for an existing L7 policy. + +Octavia will pass in the L7 rule object with all requested settings. + +The L7 rule will be in the ``PENDING_CREATE`` provisioning_status and +``OFFLINE`` operating_status when it is passed to the driver. The driver +will be responsible for updating the provisioning status of the L7 rule +to either ``ACTIVE`` if successfully created, or ``ERROR`` if not created. + +The Octavia API will accept and do basic API validation of the create +request from the user. The l7rule python object representing the +request body will be passed to the driver create method as it was received +and validated with the following exceptions: + +1. The project_id will be removed, if present, as this field is now + deprecated. The listener will inherit the project_id from the parent + load balancer. + +.. _L7rule objects: + +**L7rule object** + +As of the writing of this specification the create l7rule object may +contain the following: + ++-----------------------+--------+------------------------------------------+ +| Name | Type | Description | ++=======================+========+==========================================+ +| admin_state_up | bool | Admin state: True if up, False if down. | ++-----------------------+--------+------------------------------------------+ +| compare_type | string | The comparison type for the L7 rule. One | +| | | of CONTAINS, ENDS_WITH, EQUAL_TO, REGEX, | +| | | or STARTS_WITH. | ++-----------------------+--------+------------------------------------------+ +| invert | bool | When True the logic of the rule is | +| | | inverted. For example, with invert True, | +| | | equal to would become not equal to. | ++-----------------------+--------+------------------------------------------+ +| key | string | The key to use for the comparison. For | +| | | example, the name of the cookie to | +| | | evaluate. | ++-----------------------+--------+------------------------------------------+ +| l7policy_id | string | The ID of the L7 policy. | ++-----------------------+--------+------------------------------------------+ +| l7rule_id | string | The ID of the L7 rule. | ++-----------------------+--------+------------------------------------------+ +| type | string | The L7 rule type. One of COOKIE, | +| | | FILE_TYPE, HEADER, HOST_NAME, or PATH. | ++-----------------------+--------+------------------------------------------+ +| value | string | The value to use for the comparison. For | +| | | example, the file type to compare. | ++-----------------------+--------+------------------------------------------+ + +Delete +^^^^^^ + +Deletes an existing L7 rule. + +Octavia will pass in the L7 rule ID as a parameter. + +The L7 rule will be in the ``PENDING_DELETE`` provisioning_status when +it is passed to the driver. The driver will notify Octavia that the delete +was successful by setting the provisioning_status to ``DELETED``. If the +delete failed, the driver will update the provisioning_status to ``ERROR``. + +Update +^^^^^^ + +Modifies an existing L7 rule using the values supplied in the l7rule object. + +Octavia will pass in an L7 rule object with the fields to be updated. +Fields not updated by the user will contain "Unset" as defined in the data +model. + +As of the writing of this specification the update L7 rule object may +contain the following: + ++-----------------------+--------+------------------------------------------+ +| Name | Type | Description | ++=======================+========+==========================================+ +| admin_state_up | bool | Admin state: True if up, False if down. | ++-----------------------+--------+------------------------------------------+ +| compare_type | string | The comparison type for the L7 rule. One | +| | | of CONTAINS, ENDS_WITH, EQUAL_TO, REGEX, | +| | | or STARTS_WITH. | ++-----------------------+--------+------------------------------------------+ +| invert | bool | When True the logic of the rule is | +| | | inverted. For example, with invert True, | +| | | equal to would become not equal to. | ++-----------------------+--------+------------------------------------------+ +| key | string | The key to use for the comparison. For | +| | | example, the name of the cookie to | +| | | evaluate. | ++-----------------------+--------+------------------------------------------+ +| l7rule_id | string | The ID of the L7 rule. | ++-----------------------+--------+------------------------------------------+ +| type | string | The L7 rule type. One of COOKIE, | +| | | FILE_TYPE, HEADER, HOST_NAME, or PATH. | ++-----------------------+--------+------------------------------------------+ +| value | string | The value to use for the comparison. For | +| | | example, the file type to compare. | ++-----------------------+--------+------------------------------------------+ + +The L7 rule will be in the ``PENDING_UPDATE`` provisioning_status when +it is passed to the driver. The driver will update the provisioning_status +of the L7 rule to either ``ACTIVE`` if successfully updated, or ``ERROR`` +if the update was not successful. + +The driver is expected to validate that the driver supports the request. +The method will then return or raise an exception if the request cannot be +accepted. + +**Abstract class definition** + +.. code-block:: python + + class Driver(object): + def l7rule_create(self, l7rule): + + """Creates a new L7 rule. + + :param l7rule (object): The L7 rule object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + + def l7rule_delete(self, l7rule_id): + + """Deletes an L7 rule. + + :param l7rule_id (string): ID of the L7 rule to delete. + :return: Nothing if the delete request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + """ + raise NotImplementedError() + + def l7rule_update(self, l7rule): + + """Updates an L7 rule. + + :param l7rule (object): The L7 rule object. + :return: Nothing if the update request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + +Flavor +------ + +Octavia flavors are defined in a separate `flavor specification`_. +Support for flavors will be provided through two provider driver interfaces, +one to query supported flavor metadata keys and another to validate that a +flavor is supported. Both functions are synchronous. + +.. _flavor specification: ../specs/version1.0/flavors.html + +get_supported_flavor_keys +^^^^^^^^^^^^^^^^^^^^^^^^^ + +Retrieves a dictionary of supported flavor keys and their description. + +.. code-block:: python + + {"topology": "The load balancer topology for the flavor. One of: SINGLE, ACTIVE_STANDBY", + "compute_flavor": "The compute driver flavor to use for the load balancer instances"} + +validate_flavor +^^^^^^^^^^^^^^^ + +Validates that the driver supports the flavor metadata dictionary. + +The validate_flavor method will be passed a flavor metadata dictionary that +the driver will validate. This is used when an operator uploads a new flavor +that applies to the driver. + +The validate_flavor method will either return or raise a +``UnsupportedOptionError`` exception. + +Following are interface definitions for flavor support: + +.. code-block:: python + + def get_supported_flavor_metadata(): + """Returns a dictionary of flavor metadata keys supported by this driver. + + The returned dictionary will include key/value pairs, 'name' and + 'description.' + + :returns: The flavor metadata dictionary + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: The driver does not support flavors. + """ + raise NotImplementedError() + +.. code-block:: python + + def validate_flavor(flavor_metadata): + """Validates if driver can support flavor as defined in flavor_metadata. + + :param flavor_metadata (dict): Dictionary with flavor metadata. + :return: Nothing if the flavor is valid and supported. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: The driver does not support flavors. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + +Exception Model +--------------- + +DriverError +^^^^^^^^^^^ + +This is a catch all exception that drivers can return if there is an +unexpected error. An example might be a delete call for a load balancer the +driver does not recognize. This exception includes two strings: The user fault +string and the optional operator fault string. The user fault string, +"user_fault_string", will be provided to the API requester. The operator fault +string, "operator_fault_string", will be logged in the Octavia API log file +for the operator to use when debugging. + +.. code-block:: python + + + class DriverError(Exception): + user_fault_string = _("An unknown driver error occurred.") + operator_fault_string = _("An unknown driver error occurred.") + + def __init__(self, *args, **kwargs): + self.user_fault_string = kwargs.pop('user_fault_string', + self.user_fault_string) + self.operator_fault_string = kwargs.pop('operator_fault_string', + self.operator_fault_string) + + super(DriverError, self).__init__(*args, **kwargs) + +NotImplementedError +^^^^^^^^^^^^^^^^^^^ + +Driver implementations may not support all operations, and are free to reject +a request. If the driver does not implement an API function, the driver will +raise a NotImplementedError exception. + +.. code-block:: python + + class NotImplementedError(Exception): + user_fault_string = _("A feature is not implemented by this driver.") + operator_fault_string = _("A feature is not implemented by this driver.") + + def __init__(self, *args, **kwargs): + self.user_fault_string = kwargs.pop('user_fault_string', + self.user_fault_string) + self.operator_fault_string = kwargs.pop('operator_fault_string', + self.operator_fault_string) + + super(NotImplementedError, self).__init__(*args, **kwargs) + +UnsupportedOptionError +^^^^^^^^^^^^^^^^^^^^^^ + +Provider drivers will validate that they can complete the request -- that all +options are supported by the driver. If the request fails validation, drivers +will raise an UnsupportedOptionError exception. For example, if a driver does +not support a flavor passed as an option to load balancer create(), the driver +will raise an UnsupportedOptionError and include a message parameter providing +an explanation of the failure. + +.. code-block:: python + + class UnsupportedOptionError(Exception): + user_fault_string = _("A specified option is not supported by this driver.") + operator_fault_string = _("A specified option is not supported by this driver.") + + def __init__(self, *args, **kwargs): + self.user_fault_string = kwargs.pop('user_fault_string', + self.user_fault_string) + self.operator_fault_string = kwargs.pop('operator_fault_string', + self.operator_fault_string) + + super(UnsupportedOptionError, self).__init__(*args, **kwargs) + + +Driver Support Library +====================== + +Provider drivers need support for updating provisioning status, operating +status, and statistics. Drivers will not directly use database operations, +and instead will callback to Octavia using a new API. + +.. warning:: + + The methods listed here are the only callable methods for drivers. + All other interfaces are not considered stable or safe for drivers to + access. See `Stable Provider Driver Interface`_ for a list of acceptable + APIs for provider driver use. + +Update Provisioning and Operating Status API +-------------------------------------------- + +The update status API defined below can be used by provider drivers +to update the provisioning and/or operating status of Octavia resources +(load balancer, listener, pool, member, health monitor, L7 policy, or L7 +rule). + +For the following status API, valid values for provisioning status +and operating status parameters are as defined by Octavia status codes. If an +existing object is not included in the input parameter, the status remains +unchanged. + +provisioning_status: status associated with lifecycle of the +resource. See `Octavia Provisioning Status Codes `_. + +operating_status: the observed status of the resource. See `Octavia +Operating Status Codes `_. + +The dictionary takes this form: + +.. code-block:: python + + { "loadbalancers": [{"id": "123", + "provisioning_status": "ACTIVE", + "operating_status": "ONLINE"},...], + "healthmonitors": [], + "l7policies": [], + "l7rules": [], + "listeners": [], + "members": [], + "pools": [] + } + +.. code-block:: python + + def update_loadbalancer_status(status): + """Update load balancer status. + + :param status (dict): dictionary defining the provisioning status and + operating status for load balancer objects, including pools, + members, listeners, L7 policies, and L7 rules. + :raises: UpdateStatusError + :returns: None + """ + +Update Statistics API +--------------------- + +Provider drivers can update statistics for load balancers and listeners using +the following API. Similar to the status function above, a single dictionary +with multiple load balancer and/or listener statistics is used to update +statistics in a single call. If an existing load balancer or listener is not +included, the statistics those objects remain unchanged. + +The general form of the input dictionary is a list of load balancer and +listener statistics: + +.. code-block:: python + + { "loadbalancers": [{"id": "123", + "active_connections": 12, + "bytes_in": 238908, + "bytes_out": 290234}, + "request_errors": 0, + "total_connections": 3530},...] + "listeners": [] + } + +.. code-block:: python + + def update_loadbalancer_statistics(statistics): + """Update load balancer statistics. + + :param statistics (dict): Statistics for loadbalancers and listeners: + id (string): ID for load balancer or listener. + active_connections (int): Number of currently active connections. + bytes_in (int): Total bytes received. + bytes_out (int): Total bytes sent. + request_errors (int): Total requests not fulfilled. + total_connections (int): The total connections handled. + :raises: UpdateStatisticsError + :returns: None + """ + +Get Resource Support +-------------------- + +Provider drivers may need to get information about an Octavia resource. +As an example of its use, a provider driver may need to sync with Octavia, +and therefore need to fetch all of the Octavia resources it is responsible +for managing. Provider drivers can use the existing Octavia API to get these +resources. See the `Octavia API Reference `_. + +API Exception Model +------------------- + +The driver support API will include two Exceptions, one for each of the +two API groups: + +* UpdateStatusError +* UpdateStatisticsError + +Each exception class will include a message field that describes the error and +references to the failed record if available. + +.. code-block:: python + + class UpdateStatusError(Exception): + fault_string = _("The status update had an unknown error.") + status_object = None + status_object_id = None + status_record = None + + def __init__(self, *args, **kwargs): + self.fault_string = kwargs.pop('fault_string', + self.fault_string) + self.status_object = kwargs.pop('status_object', None) + self.status_object_id = kwargs.pop('status_object_id', None) + self.status_record = kwargs.pop('status_record', None) + + super(UnsupportedOptionError, self).__init__(*args, **kwargs) + + class UpdateStatisticsError(Exception): + fault_string = _("The statistics update had an unknown error.") + stats_object = None + stats_object_id = None + stats_record = None + + def __init__(self, *args, **kwargs): + self.fault_string = kwargs.pop('fault_string', + self.fault_string) + self.stats_object = kwargs.pop('stats_object', None) + self.stats_object_id = kwargs.pop('stats_object_id', None) + self.stats_record = kwargs.pop('stats_record', None) + + super(UnsupportedOptionError, self).__init__(*args, **kwargs) diff --git a/doc/source/contributor/index.rst b/doc/source/contributor/index.rst index bc1d41302e..d1bb63d09b 100644 --- a/doc/source/contributor/index.rst +++ b/doc/source/contributor/index.rst @@ -29,6 +29,12 @@ Internal APIs api/* +.. Note:: The documents listed below are design documents and specifications + created and approved at a previous point in time. The code base and + current functionality may deviate from these original documents. + Please see the Octavia documentation for the current feature + details. + Design Documentation -------------------- diff --git a/etc/octavia.conf b/etc/octavia.conf index 3defb70edb..218115e459 100644 --- a/etc/octavia.conf +++ b/etc/octavia.conf @@ -43,8 +43,10 @@ # Enable/disable ability for users to create PING type Health Monitors # allow_ping_health_monitors = True -# List of enabled provider drivers -# enabled_provider_drivers = octavia, amphora +# Dictionary of enabled provider driver names and descriptions +# enabled_provider_drivers = {'amphora': 'The Octavia Amphora driver.', +# 'octavia': 'Deprecated alias of the Octavia ' +# 'Amphora driver.'} # Default provider driver # default_provider_driver = amphora diff --git a/octavia/api/drivers/amphora_driver/driver.py b/octavia/api/drivers/amphora_driver/driver.py index 71efc06c09..47aec7f351 100644 --- a/octavia/api/drivers/amphora_driver/driver.py +++ b/octavia/api/drivers/amphora_driver/driver.py @@ -143,22 +143,21 @@ class AmphoraProviderDriver(driver_base.ProviderDriver): def member_batch_update(self, members): # Get a list of existing members pool_id = members[0].pool_id + # The DB should not have updated yet, so we can still use the pool db_pool = self.repositories.pool.get(db_apis.get_session(), id=pool_id) old_members = db_pool.members - old_member_uniques = { - (m.ip_address, m.protocol_port): m.id for m in old_members} - new_member_uniques = [ - (m.address, m.protocol_port) for m in members] + old_member_ids = [m.id for m in old_members] + # The driver will always pass objects with IDs. + new_member_ids = [m.member_id for m in members] # Find members that are brand new or updated new_members = [] updated_members = [] for m in members: - if (m.address, m.protocol_port) not in old_member_uniques: + if m.member_id not in old_member_ids: new_members.append(m) else: - m.id = old_member_uniques[(m.address, m.protocol_port)] member_dict = m.to_dict(render_unsets=False) member_dict['id'] = member_dict.pop('member_id') if 'address' in member_dict: @@ -170,7 +169,7 @@ class AmphoraProviderDriver(driver_base.ProviderDriver): # Find members that are deleted deleted_members = [] for m in old_members: - if (m.ip_address, m.protocol_port) not in new_member_uniques: + if m.id not in new_member_ids: deleted_members.append(m) payload = {'old_member_ids': [m.id for m in deleted_members], @@ -240,10 +239,3 @@ class AmphoraProviderDriver(driver_base.ProviderDriver): payload = {consts.L7RULE_ID: l7rule_id, consts.L7RULE_UPDATES: l7rule_dict} self.client.cast({}, 'update_l7rule', **payload) - - # Flavor - def get_supported_flavor_metadata(self): - pass - - def validate_flavor(self, flavor_metadata): - pass diff --git a/octavia/api/drivers/utils.py b/octavia/api/drivers/utils.py index 1f4fb24baf..82b4e5c0b3 100644 --- a/octavia/api/drivers/utils.py +++ b/octavia/api/drivers/utils.py @@ -52,7 +52,7 @@ def call_provider(provider, driver_method, *args, **kwargs): provider, e.operator_fault_string) raise exceptions.ProviderDriverError(prov=provider, user_msg=e.user_fault_string) - except driver_exceptions.NotImplementedError as e: + except (driver_exceptions.NotImplementedError, NotImplementedError) as e: LOG.info("Provider '%s' raised a not implemented error: %s", provider, e.operator_fault_string) raise exceptions.ProviderNotImplementedError( @@ -126,7 +126,7 @@ def db_listener_to_provider_listener(db_listener): provider_pool = db_pool_to_provider_pool(db_listener.default_pool) new_listener_dict['default_pool_id'] = provider_pool.pool_id new_listener_dict['default_pool'] = provider_pool - if 'l7policies' in new_listener_dict: + if new_listener_dict.get('l7policies', None): new_listener_dict['l7policies'] = ( db_l7policies_to_provider_l7policies(db_listener.l7policies)) provider_listener = driver_dm.Listener.from_dict(new_listener_dict) @@ -154,16 +154,15 @@ def listener_dict_to_provider_dict(listener_dict): if listener_obj.tls_certificate_id or listener_obj.sni_containers: SNI_objs = [] for sni in listener_obj.sni_containers: - if isinstance(sni, data_models.SNI): - SNI_objs.append(sni) - elif isinstance(sni, dict): + if isinstance(sni, dict): sni_obj = data_models.SNI(**sni) SNI_objs.append(sni_obj) elif isinstance(sni, six.string_types): sni_obj = data_models.SNI(tls_container_id=sni) SNI_objs.append(sni_obj) else: - raise Exception(_('Invalid SNI container on listener')) + raise exceptions.ValidationException( + detail=_('Invalid SNI container on listener')) listener_obj.sni_containers = SNI_objs cert_manager = stevedore_driver.DriverManager( namespace='octavia.cert_manager', @@ -221,7 +220,7 @@ def db_pool_to_provider_pool(db_pool): provider_healthmonitor = db_HM_to_provider_HM(db_pool.health_monitor) new_pool_dict['healthmonitor'] = provider_healthmonitor # Don't leave a 'members' None here, we want it to pass through to Unset - if 'members' in new_pool_dict: + if new_pool_dict.get('members', None): del new_pool_dict['members'] if db_pool.members: provider_members = db_members_to_provider_members(db_pool.members) diff --git a/octavia/api/v2/controllers/__init__.py b/octavia/api/v2/controllers/__init__.py index 36f018a164..3142a8def8 100644 --- a/octavia/api/v2/controllers/__init__.py +++ b/octavia/api/v2/controllers/__init__.py @@ -22,6 +22,7 @@ from octavia.api.v2.controllers import l7policy from octavia.api.v2.controllers import listener from octavia.api.v2.controllers import load_balancer from octavia.api.v2.controllers import pool +from octavia.api.v2.controllers import provider from octavia.api.v2.controllers import quotas @@ -41,6 +42,7 @@ class BaseV2Controller(base.BaseController): self.l7policies = l7policy.L7PolicyController() self.healthmonitors = health_monitor.HealthMonitorController() self.quotas = quotas.QuotasController() + self.providers = provider.ProviderController() @wsme_pecan.wsexpose(wtypes.text) def get(self): diff --git a/octavia/api/v2/controllers/amphora.py b/octavia/api/v2/controllers/amphora.py index d68b25c5b9..c67f11cbd6 100644 --- a/octavia/api/v2/controllers/amphora.py +++ b/octavia/api/v2/controllers/amphora.py @@ -16,6 +16,7 @@ import logging from oslo_config import cfg +import oslo_messaging as messaging from oslo_utils import excutils import pecan from wsme import types as wtypes @@ -35,7 +36,6 @@ class AmphoraController(base.BaseController): def __init__(self): super(AmphoraController, self).__init__() - self.handler = self.handler.amphora @wsme_pecan.wsexpose(amp_types.AmphoraRootResponse, wtypes.text, [wtypes.text], ignore_extra_args=True) @@ -92,7 +92,12 @@ class FailoverController(base.BaseController): def __init__(self, amp_id): super(FailoverController, self).__init__() - self.handler = self.handler.amphora + topic = cfg.CONF.oslo_messaging.topic + self.transport = messaging.get_rpc_transport(cfg.CONF) + self.target = messaging.Target( + namespace=constants.RPC_NAMESPACE_CONTROLLER_AGENT, + topic=topic, version="1.0", fanout=False) + self.client = messaging.RPCClient(self.transport, target=self.target) self.amp_id = amp_id @wsme_pecan.wsexpose(None, wtypes.text, status_code=202) @@ -117,9 +122,10 @@ class FailoverController(base.BaseController): context, context.project_id, constants.RBAC_PUT_FAILOVER) try: - LOG.info("Sending failover request for amphora %s to the handler", + LOG.info("Sending failover request for amphora %s to the queue", self.amp_id) - self.handler.failover(db_amp) + payload = {constants.AMPHORA_ID: db_amp.id} + self.client.cast({}, 'failover_amphora', **payload) except Exception: with excutils.save_and_reraise_exception(reraise=False): self.repositories.load_balancer.update( diff --git a/octavia/api/v2/controllers/base.py b/octavia/api/v2/controllers/base.py index 125db04d75..6c7e3506f4 100644 --- a/octavia/api/v2/controllers/base.py +++ b/octavia/api/v2/controllers/base.py @@ -16,7 +16,6 @@ import logging from oslo_config import cfg from pecan import rest -from stevedore import driver as stevedore_driver from wsme import types as wtypes from octavia.common import constants @@ -35,11 +34,6 @@ class BaseController(rest.RestController): def __init__(self): super(BaseController, self).__init__() self.repositories = repositories.Repositories() - self.handler = stevedore_driver.DriverManager( - namespace='octavia.api.handlers', - name=CONF.api_settings.api_handler, - invoke_on_load=True - ).driver @staticmethod def _convert_db_to_type(db_entity, to_type, children=False): diff --git a/octavia/api/v2/controllers/listener.py b/octavia/api/v2/controllers/listener.py index 58cb49977c..16d26d5b53 100644 --- a/octavia/api/v2/controllers/listener.py +++ b/octavia/api/v2/controllers/listener.py @@ -46,7 +46,6 @@ class ListenersController(base.BaseController): def __init__(self): super(ListenersController, self).__init__() - self.handler = self.handler.listener self.cert_manager = stevedore_driver.DriverManager( namespace='octavia.cert_manager', name=CONF.certificates.cert_manager, diff --git a/octavia/api/v2/controllers/load_balancer.py b/octavia/api/v2/controllers/load_balancer.py index 8621ceb7a5..b1de14cdc6 100644 --- a/octavia/api/v2/controllers/load_balancer.py +++ b/octavia/api/v2/controllers/load_balancer.py @@ -50,7 +50,6 @@ class LoadBalancersController(base.BaseController): def __init__(self): super(LoadBalancersController, self).__init__() - self.handler = self.handler.load_balancer @wsme_pecan.wsexpose(lb_types.LoadBalancerRootResponse, wtypes.text, [wtypes.text], ignore_extra_args=True) diff --git a/octavia/api/v2/controllers/pool.py b/octavia/api/v2/controllers/pool.py index c5889cd939..0c4e27ffdb 100644 --- a/octavia/api/v2/controllers/pool.py +++ b/octavia/api/v2/controllers/pool.py @@ -47,7 +47,6 @@ class PoolsController(base.BaseController): def __init__(self): super(PoolsController, self).__init__() - self.handler = self.handler.pool @wsme_pecan.wsexpose(pool_types.PoolRootResponse, wtypes.text, [wtypes.text], ignore_extra_args=True) diff --git a/octavia/api/v2/controllers/provider.py b/octavia/api/v2/controllers/provider.py new file mode 100644 index 0000000000..4ff3c133f7 --- /dev/null +++ b/octavia/api/v2/controllers/provider.py @@ -0,0 +1,50 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +import pecan +import six +from wsme import types as wtypes +from wsmeext import pecan as wsme_pecan + +from octavia.api.v2.controllers import base +from octavia.api.v2.types import provider as provider_types +from octavia.common import constants + +CONF = cfg.CONF + + +class ProviderController(base.BaseController): + RBAC_TYPE = constants.RBAC_PROVIDER + + def __init__(self): + super(ProviderController, self).__init__() + + @wsme_pecan.wsexpose(provider_types.ProvidersRootResponse, [wtypes.text], + ignore_extra_args=True) + def get_all(self, fields=None): + """List enabled provider drivers and their descriptions.""" + pcontext = pecan.request.context + context = pcontext.get('octavia_context') + + self._auth_validate_action(context, context.project_id, + constants.RBAC_GET_ALL) + + enabled_providers = CONF.api_settings.enabled_provider_drivers + response_list = [ + provider_types.ProviderResponse(name=key, description=value) for + key, value in six.iteritems(enabled_providers)] + if fields is not None: + response_list = self._filter_fields(response_list, fields) + return provider_types.ProvidersRootResponse(providers=response_list) diff --git a/octavia/api/v2/types/provider.py b/octavia/api/v2/types/provider.py new file mode 100644 index 0000000000..95afa7f6b9 --- /dev/null +++ b/octavia/api/v2/types/provider.py @@ -0,0 +1,26 @@ +# Copyright 2018 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from wsme import types as wtypes + +from octavia.api.common import types + + +class ProviderResponse(types.BaseType): + name = wtypes.wsattr(wtypes.StringType()) + description = wtypes.wsattr(wtypes.StringType()) + + +class ProvidersRootResponse(types.BaseType): + providers = wtypes.wsattr([ProviderResponse]) diff --git a/octavia/common/config.py b/octavia/common/config.py index 5edb2bee54..c2ae6368f6 100644 --- a/octavia/common/config.py +++ b/octavia/common/config.py @@ -102,10 +102,16 @@ api_opts = [ help=_("Allow users to create TLS Terminated listeners?")), cfg.BoolOpt('allow_ping_health_monitors', default=True, help=_("Allow users to create PING type Health Monitors?")), - cfg.ListOpt('enabled_provider_drivers', - help=_('List of enabled provider drivers. Must match the ' - 'driver name in the octavia.api.drivers entrypoint.'), - default=['amphora', 'octavia']), + cfg.DictOpt('enabled_provider_drivers', + help=_('List of enabled provider drivers and description ' + 'dictionaries. Must match the driver name in the ' + 'octavia.api.drivers entrypoint. Example: ' + '{\'amphora\': \'The Octavia Amphora driver.\', ' + '\'octavia\': \'Deprecated alias of the Octavia ' + 'Amphora driver.\'}'), + default={'amphora': 'The Octavia Amphora driver.', + 'octavia': 'Deprecated alias of the Octavia Amphora ' + 'driver.'}), cfg.StrOpt('default_provider_driver', default='amphora', help=_('Default provider driver.')), ] diff --git a/octavia/common/constants.py b/octavia/common/constants.py index 3688541f21..fed20fb014 100644 --- a/octavia/common/constants.py +++ b/octavia/common/constants.py @@ -487,6 +487,7 @@ RBAC_L7POLICY = '{}:l7policy:'.format(LOADBALANCER_API) RBAC_L7RULE = '{}:l7rule:'.format(LOADBALANCER_API) RBAC_QUOTA = '{}:quota:'.format(LOADBALANCER_API) RBAC_AMPHORA = '{}:amphora:'.format(LOADBALANCER_API) +RBAC_PROVIDER = '{}:provider:'.format(LOADBALANCER_API) RBAC_POST = 'post' RBAC_PUT = 'put' RBAC_PUT_FAILOVER = 'put_failover' diff --git a/octavia/policies/__init__.py b/octavia/policies/__init__.py index 719bb346c7..70c1f5e07b 100644 --- a/octavia/policies/__init__.py +++ b/octavia/policies/__init__.py @@ -22,6 +22,7 @@ from octavia.policies import listener from octavia.policies import loadbalancer from octavia.policies import member from octavia.policies import pool +from octavia.policies import provider from octavia.policies import quota @@ -35,6 +36,7 @@ def list_rules(): loadbalancer.list_rules(), member.list_rules(), pool.list_rules(), + provider.list_rules(), quota.list_rules(), amphora.list_rules(), ) diff --git a/octavia/policies/provider.py b/octavia/policies/provider.py new file mode 100644 index 0000000000..497b2d2514 --- /dev/null +++ b/octavia/policies/provider.py @@ -0,0 +1,29 @@ +# Copyright 2018 Rackspace, US Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from octavia.common import constants +from oslo_policy import policy + +rules = [ + policy.DocumentedRuleDefault( + '{rbac_obj}{action}'.format(rbac_obj=constants.RBAC_PROVIDER, + action=constants.RBAC_GET_ALL), + constants.RULE_API_READ, + "List enabled providers", + [{'method': 'GET', 'path': '/v2.0/lbaas/providers'}] + ), +] + + +def list_rules(): + return rules diff --git a/octavia/tests/functional/api/v2/base.py b/octavia/tests/functional/api/v2/base.py index 11ced3da26..202271ba20 100644 --- a/octavia/tests/functional/api/v2/base.py +++ b/octavia/tests/functional/api/v2/base.py @@ -68,6 +68,8 @@ class BaseAPITest(base_db_test.OctaviaDBTestBase): AMPHORA_PATH = AMPHORAE_PATH + '/{amphora_id}' AMPHORA_FAILOVER_PATH = AMPHORA_PATH + '/failover' + PROVIDERS_PATH = '/lbaas/providers' + NOT_AUTHORIZED_BODY = { 'debuginfo': None, 'faultcode': 'Client', 'faultstring': 'Policy does not allow this request to be performed.'} @@ -75,7 +77,6 @@ class BaseAPITest(base_db_test.OctaviaDBTestBase): def setUp(self): super(BaseAPITest, self).setUp() self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) - self.conf.config(group='api_settings', api_handler='simulated_handler') self.conf.config(group="controller_worker", network_driver='network_noop_driver') self.conf.config(group='api_settings', auth_strategy=constants.NOAUTH) @@ -83,8 +84,10 @@ class BaseAPITest(base_db_test.OctaviaDBTestBase): default_provider_driver='noop_driver') # We still need to test with the "octavia" alias self.conf.config(group='api_settings', - enabled_provider_drivers='amphora, noop_driver, ' - 'octavia') + enabled_provider_drivers={ + 'amphora': 'Amp driver.', + 'noop_driver': 'NoOp driver.', + 'octavia': 'Octavia driver.'}) self.lb_repo = repositories.LoadBalancerRepository() self.listener_repo = repositories.ListenerRepository() self.listener_stats_repo = repositories.ListenerStatisticsRepository() @@ -94,9 +97,6 @@ class BaseAPITest(base_db_test.OctaviaDBTestBase): self.l7rule_repo = repositories.L7RuleRepository() self.health_monitor_repo = repositories.HealthMonitorRepository() self.amphora_repo = repositories.AmphoraRepository() - patcher = mock.patch('octavia.api.handlers.controller_simulator.' - 'handler.SimulatedControllerHandler') - self.handler_mock = patcher.start() patcher2 = mock.patch('octavia.certificates.manager.barbican.' 'BarbicanCertManager') self.cert_manager_mock = patcher2.start() @@ -104,7 +104,6 @@ class BaseAPITest(base_db_test.OctaviaDBTestBase): self.project_id = uuidutils.generate_uuid() def reset_pecan(): - patcher.stop() pecan.set_config({}, overwrite=True) self.addCleanup(reset_pecan) diff --git a/octavia/tests/functional/api/v2/test_amphora.py b/octavia/tests/functional/api/v2/test_amphora.py index ca8bf08863..dc85fd98b0 100644 --- a/octavia/tests/functional/api/v2/test_amphora.py +++ b/octavia/tests/functional/api/v2/test_amphora.py @@ -96,14 +96,15 @@ class TestAmphora(base.BaseAPITest): amphora_id=self.amp_id)).json.get(self.root_tag) self._assert_amp_equal(self.amp_args, response) - def test_failover(self): + @mock.patch('oslo_messaging.RPCClient.cast') + def test_failover(self, mock_cast): self.put(self.AMPHORA_FAILOVER_PATH.format( amphora_id=self.amp_id), body={}, status=202) - self.handler_mock().amphora.failover.assert_has_calls( - [mock.call(self.amp)] - ) + payload = {constants.AMPHORA_ID: self.amp_id} + mock_cast.assert_called_with({}, 'failover_amphora', **payload) - def test_failover_spare(self): + @mock.patch('oslo_messaging.RPCClient.cast') + def test_failover_spare(self, mock_cast): amp_args = { 'compute_id': uuidutils.generate_uuid(), 'status': constants.AMPHORA_READY, @@ -118,8 +119,8 @@ class TestAmphora(base.BaseAPITest): amp = self.amphora_repo.create(self.session, **amp_args) self.put(self.AMPHORA_FAILOVER_PATH.format( amphora_id=amp.id), body={}, status=202) - self.handler_mock().amphora.failover.assert_has_calls( - [mock.call(amp)]) + payload = {constants.AMPHORA_ID: amp.id} + mock_cast.assert_called_once_with({}, 'failover_amphora', **payload) def test_failover_deleted(self): new_amp = self._create_additional_amp() @@ -175,7 +176,8 @@ class TestAmphora(base.BaseAPITest): self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) - def test_failover_authorized(self): + @mock.patch('oslo_messaging.RPCClient.cast') + def test_failover_authorized(self, mock_cast): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) @@ -202,10 +204,11 @@ class TestAmphora(base.BaseAPITest): # Reset api auth setting self.conf.config(group='api_settings', auth_strategy=auth_strategy) - self.handler_mock().amphora.failover.assert_has_calls( - [mock.call(self.amp)]) + payload = {constants.AMPHORA_ID: self.amp_id} + mock_cast.assert_called_once_with({}, 'failover_amphora', **payload) - def test_failover_not_authorized(self): + @mock.patch('oslo_messaging.RPCClient.cast') + def test_failover_not_authorized(self, mock_cast): self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') self.conf.config(group='api_settings', auth_strategy=constants.TESTING) @@ -216,7 +219,7 @@ class TestAmphora(base.BaseAPITest): # Reset api auth setting self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) - self.handler_mock().amphora.failover.assert_not_called() + mock_cast.assert_not_called() def test_get_deleted_gives_404(self): new_amp = self._create_additional_amp() diff --git a/octavia/tests/functional/api/v2/test_provider.py b/octavia/tests/functional/api/v2/test_provider.py new file mode 100644 index 0000000000..35465013d1 --- /dev/null +++ b/octavia/tests/functional/api/v2/test_provider.py @@ -0,0 +1,45 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from octavia.tests.functional.api.v2 import base + + +class TestProvider(base.BaseAPITest): + + root_tag_list = 'providers' + + def setUp(self): + super(TestProvider, self).setUp() + + def test_get_all_providers(self): + octavia_dict = {u'description': u'Octavia driver.', + u'name': u'octavia'} + amphora_dict = {u'description': u'Amp driver.', u'name': u'amphora'} + noop_dict = {u'description': u'NoOp driver.', u'name': u'noop_driver'} + providers = self.get(self.PROVIDERS_PATH).json.get(self.root_tag_list) + self.assertEqual(3, len(providers)) + self.assertTrue(octavia_dict in providers) + self.assertTrue(amphora_dict in providers) + self.assertTrue(noop_dict in providers) + + def test_get_all_providers_fields(self): + octavia_dict = {u'name': u'octavia'} + amphora_dict = {u'name': u'amphora'} + noop_dict = {u'name': u'noop_driver'} + providers = self.get(self.PROVIDERS_PATH, params={'fields': ['name']}) + providers_list = providers.json.get(self.root_tag_list) + self.assertEqual(3, len(providers_list)) + self.assertTrue(octavia_dict in providers_list) + self.assertTrue(amphora_dict in providers_list) + self.assertTrue(noop_dict in providers_list) diff --git a/octavia/tests/unit/api/drivers/amphora_driver/__init__.py b/octavia/tests/unit/api/drivers/amphora_driver/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/api/drivers/amphora_driver/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/api/drivers/amphora_driver/test_amphora_driver.py b/octavia/tests/unit/api/drivers/amphora_driver/test_amphora_driver.py new file mode 100644 index 0000000000..cf1198e75c --- /dev/null +++ b/octavia/tests/unit/api/drivers/amphora_driver/test_amphora_driver.py @@ -0,0 +1,376 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from octavia.api.drivers.amphora_driver import driver +from octavia.api.drivers import data_models as driver_dm +from octavia.api.drivers import exceptions +from octavia.common import constants as consts +from octavia.network import base as network_base +from octavia.tests.unit.api.drivers import sample_data_models +from octavia.tests.unit import base + + +class TestAmphoraDriver(base.TestCase): + def setUp(self): + super(TestAmphoraDriver, self).setUp() + self.amp_driver = driver.AmphoraProviderDriver() + self.sample_data = sample_data_models.SampleDriverDataModels() + + @mock.patch('octavia.common.utils.get_network_driver') + def test_create_vip_port(self, mock_get_net_driver): + mock_net_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_net_driver + mock_net_driver.allocate_vip.return_value = self.sample_data.db_vip + + provider_vip_dict = self.amp_driver.create_vip_port( + self.sample_data.lb_id, self.sample_data.project_id, + self.sample_data.provider_vip_dict) + + self.assertEqual(self.sample_data.provider_vip_dict, provider_vip_dict) + + @mock.patch('octavia.common.utils.get_network_driver') + def test_create_vip_port_failed(self, mock_get_net_driver): + mock_net_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_net_driver + mock_net_driver.allocate_vip.side_effect = ( + network_base.AllocateVIPException()) + + self.assertRaises(exceptions.DriverError, + self.amp_driver.create_vip_port, + self.sample_data.lb_id, self.sample_data.project_id, + self.sample_data.provider_vip_dict) + + # Load Balancer + @mock.patch('oslo_messaging.RPCClient.cast') + def test_loadbalancer_create(self, mock_cast): + provider_lb = driver_dm.LoadBalancer( + loadbalancer_id=self.sample_data.lb_id) + self.amp_driver.loadbalancer_create(provider_lb) + payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id} + mock_cast.assert_called_with({}, 'create_load_balancer', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_loadbalancer_delete(self, mock_cast): + self.amp_driver.loadbalancer_delete(self.sample_data.lb_id) + payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id, + 'cascade': False} + mock_cast.assert_called_with({}, 'delete_load_balancer', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_loadbalancer_failover(self, mock_cast): + self.amp_driver.loadbalancer_failover(self.sample_data.lb_id) + payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id} + mock_cast.assert_called_with({}, 'failover_load_balancer', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_loadbalancer_update(self, mock_cast): + provider_lb = driver_dm.LoadBalancer( + loadbalancer_id=self.sample_data.lb_id, admin_state_up=True) + lb_dict = {'enabled': True} + self.amp_driver.loadbalancer_update(provider_lb) + payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id, + consts.LOAD_BALANCER_UPDATES: lb_dict} + mock_cast.assert_called_with({}, 'update_load_balancer', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_loadbalancer_update_name(self, mock_cast): + provider_lb = driver_dm.LoadBalancer( + loadbalancer_id=self.sample_data.lb_id, name='Great LB') + lb_dict = {'name': 'Great LB'} + self.amp_driver.loadbalancer_update(provider_lb) + payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id, + consts.LOAD_BALANCER_UPDATES: lb_dict} + mock_cast.assert_called_with({}, 'update_load_balancer', **payload) + + # Listener + @mock.patch('oslo_messaging.RPCClient.cast') + def test_listener_create(self, mock_cast): + provider_listener = driver_dm.Listener( + listener_id=self.sample_data.listener1_id) + self.amp_driver.listener_create(provider_listener) + payload = {consts.LISTENER_ID: self.sample_data.listener1_id} + mock_cast.assert_called_with({}, 'create_listener', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_listener_delete(self, mock_cast): + self.amp_driver.listener_delete(self.sample_data.listener1_id) + payload = {consts.LISTENER_ID: self.sample_data.listener1_id} + mock_cast.assert_called_with({}, 'delete_listener', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_listener_update(self, mock_cast): + provider_listener = driver_dm.Listener( + listener_id=self.sample_data.listener1_id, admin_state_up=False) + listener_dict = {'enabled': False} + self.amp_driver.listener_update(provider_listener) + payload = {consts.LISTENER_ID: self.sample_data.listener1_id, + consts.LISTENER_UPDATES: listener_dict} + mock_cast.assert_called_with({}, 'update_listener', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_listener_update_name(self, mock_cast): + provider_listener = driver_dm.Listener( + listener_id=self.sample_data.listener1_id, name='Great Listener') + listener_dict = {'name': 'Great Listener'} + self.amp_driver.listener_update(provider_listener) + payload = {consts.LISTENER_ID: self.sample_data.listener1_id, + consts.LISTENER_UPDATES: listener_dict} + mock_cast.assert_called_with({}, 'update_listener', **payload) + + # Pool + @mock.patch('oslo_messaging.RPCClient.cast') + def test_pool_create(self, mock_cast): + provider_pool = driver_dm.Pool( + pool_id=self.sample_data.pool1_id) + self.amp_driver.pool_create(provider_pool) + payload = {consts.POOL_ID: self.sample_data.pool1_id} + mock_cast.assert_called_with({}, 'create_pool', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_pool_delete(self, mock_cast): + self.amp_driver.pool_delete(self.sample_data.pool1_id) + payload = {consts.POOL_ID: self.sample_data.pool1_id} + mock_cast.assert_called_with({}, 'delete_pool', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_pool_update(self, mock_cast): + provider_pool = driver_dm.Pool( + pool_id=self.sample_data.pool1_id, admin_state_up=True) + pool_dict = {'enabled': True} + self.amp_driver.pool_update(provider_pool) + payload = {consts.POOL_ID: self.sample_data.pool1_id, + consts.POOL_UPDATES: pool_dict} + mock_cast.assert_called_with({}, 'update_pool', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_pool_update_name(self, mock_cast): + provider_pool = driver_dm.Pool( + pool_id=self.sample_data.pool1_id, name='Great pool') + pool_dict = {'name': 'Great pool'} + self.amp_driver.pool_update(provider_pool) + payload = {consts.POOL_ID: self.sample_data.pool1_id, + consts.POOL_UPDATES: pool_dict} + mock_cast.assert_called_with({}, 'update_pool', **payload) + + # Member + @mock.patch('oslo_messaging.RPCClient.cast') + def test_member_create(self, mock_cast): + provider_member = driver_dm.Member( + member_id=self.sample_data.member1_id) + self.amp_driver.member_create(provider_member) + payload = {consts.MEMBER_ID: self.sample_data.member1_id} + mock_cast.assert_called_with({}, 'create_member', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_member_delete(self, mock_cast): + self.amp_driver.member_delete(self.sample_data.member1_id) + payload = {consts.MEMBER_ID: self.sample_data.member1_id} + mock_cast.assert_called_with({}, 'delete_member', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_member_update(self, mock_cast): + provider_member = driver_dm.Member( + member_id=self.sample_data.member1_id, admin_state_up=True) + member_dict = {'enabled': True} + self.amp_driver.member_update(provider_member) + payload = {consts.MEMBER_ID: self.sample_data.member1_id, + consts.MEMBER_UPDATES: member_dict} + mock_cast.assert_called_with({}, 'update_member', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_member_update_name(self, mock_cast): + provider_member = driver_dm.Member( + member_id=self.sample_data.member1_id, name='Great member') + member_dict = {'name': 'Great member'} + self.amp_driver.member_update(provider_member) + payload = {consts.MEMBER_ID: self.sample_data.member1_id, + consts.MEMBER_UPDATES: member_dict} + mock_cast.assert_called_with({}, 'update_member', **payload) + + @mock.patch('octavia.db.api.get_session') + @mock.patch('octavia.db.repositories.PoolRepository.get') + @mock.patch('oslo_messaging.RPCClient.cast') + def test_member_batch_update(self, mock_cast, mock_pool_get, mock_session): + mock_pool = mock.MagicMock() + mock_pool.members = self.sample_data.db_pool1_members + mock_pool_get.return_value = mock_pool + + prov_mem_update = driver_dm.Member( + member_id=self.sample_data.member2_id, + pool_id=self.sample_data.pool1_id, admin_state_up=False, + address='192.0.2.17', monitor_address='192.0.2.77', + protocol_port=80, name='updated-member2') + prov_new_member = driver_dm.Member( + member_id=self.sample_data.member3_id, + pool_id=self.sample_data.pool1_id, + address='192.0.2.18', monitor_address='192.0.2.28', + protocol_port=80, name='member3') + prov_members = [prov_mem_update, prov_new_member] + + update_mem_dict = {'ip_address': '192.0.2.17', + 'name': 'updated-member2', + 'monitor_address': '192.0.2.77', + 'id': self.sample_data.member2_id, + 'enabled': False, + 'protocol_port': 80, + 'pool_id': self.sample_data.pool1_id} + + self.amp_driver.member_batch_update(prov_members) + + payload = {'old_member_ids': [self.sample_data.member1_id], + 'new_member_ids': [self.sample_data.member3_id], + 'updated_members': [update_mem_dict]} + mock_cast.assert_called_with({}, 'batch_update_members', **payload) + + @mock.patch('octavia.db.api.get_session') + @mock.patch('octavia.db.repositories.PoolRepository.get') + @mock.patch('oslo_messaging.RPCClient.cast') + def test_member_batch_update_no_admin_addr(self, mock_cast, + mock_pool_get, mock_session): + mock_pool = mock.MagicMock() + mock_pool.members = self.sample_data.db_pool1_members + mock_pool_get.return_value = mock_pool + + prov_mem_update = driver_dm.Member( + member_id=self.sample_data.member2_id, + pool_id=self.sample_data.pool1_id, + monitor_address='192.0.2.77', + protocol_port=80, name='updated-member2') + prov_new_member = driver_dm.Member( + member_id=self.sample_data.member3_id, + pool_id=self.sample_data.pool1_id, + address='192.0.2.18', monitor_address='192.0.2.28', + protocol_port=80, name='member3') + prov_members = [prov_mem_update, prov_new_member] + + update_mem_dict = {'name': 'updated-member2', + 'monitor_address': '192.0.2.77', + 'id': self.sample_data.member2_id, + 'protocol_port': 80, + 'pool_id': self.sample_data.pool1_id} + + self.amp_driver.member_batch_update(prov_members) + + payload = {'old_member_ids': [self.sample_data.member1_id], + 'new_member_ids': [self.sample_data.member3_id], + 'updated_members': [update_mem_dict]} + mock_cast.assert_called_with({}, 'batch_update_members', **payload) + + # Health Monitor + @mock.patch('oslo_messaging.RPCClient.cast') + def test_health_monitor_create(self, mock_cast): + provider_HM = driver_dm.HealthMonitor( + healthmonitor_id=self.sample_data.hm1_id) + self.amp_driver.health_monitor_create(provider_HM) + payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id} + mock_cast.assert_called_with({}, 'create_health_monitor', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_health_monitor_delete(self, mock_cast): + self.amp_driver.health_monitor_delete(self.sample_data.hm1_id) + payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id} + mock_cast.assert_called_with({}, 'delete_health_monitor', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_health_monitor_update(self, mock_cast): + provider_hm = driver_dm.HealthMonitor( + healthmonitor_id=self.sample_data.hm1_id, admin_state_up=True, + max_retries=1, max_retries_down=2) + hm_dict = {'enabled': True, 'rise_threshold': 1, 'fall_threshold': 2} + self.amp_driver.health_monitor_update(provider_hm) + payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id, + consts.HEALTH_MONITOR_UPDATES: hm_dict} + mock_cast.assert_called_with({}, 'update_health_monitor', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_health_monitor_update_name(self, mock_cast): + provider_hm = driver_dm.HealthMonitor( + healthmonitor_id=self.sample_data.hm1_id, name='Great HM') + hm_dict = {'name': 'Great HM'} + self.amp_driver.health_monitor_update(provider_hm) + payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id, + consts.HEALTH_MONITOR_UPDATES: hm_dict} + mock_cast.assert_called_with({}, 'update_health_monitor', **payload) + + # L7 Policy + @mock.patch('oslo_messaging.RPCClient.cast') + def test_l7policy_create(self, mock_cast): + provider_l7policy = driver_dm.L7Policy( + l7policy_id=self.sample_data.l7policy1_id) + self.amp_driver.l7policy_create(provider_l7policy) + payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id} + mock_cast.assert_called_with({}, 'create_l7policy', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_l7policy_delete(self, mock_cast): + self.amp_driver.l7policy_delete(self.sample_data.l7policy1_id) + payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id} + mock_cast.assert_called_with({}, 'delete_l7policy', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_l7policy_update(self, mock_cast): + provider_l7policy = driver_dm.L7Policy( + l7policy_id=self.sample_data.l7policy1_id, admin_state_up=True) + l7policy_dict = {'enabled': True} + self.amp_driver.l7policy_update(provider_l7policy) + payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id, + consts.L7POLICY_UPDATES: l7policy_dict} + mock_cast.assert_called_with({}, 'update_l7policy', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_l7policy_update_name(self, mock_cast): + provider_l7policy = driver_dm.L7Policy( + l7policy_id=self.sample_data.l7policy1_id, name='Great L7Policy') + l7policy_dict = {'name': 'Great L7Policy'} + self.amp_driver.l7policy_update(provider_l7policy) + payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id, + consts.L7POLICY_UPDATES: l7policy_dict} + mock_cast.assert_called_with({}, 'update_l7policy', **payload) + + # L7 Rules + @mock.patch('oslo_messaging.RPCClient.cast') + def test_l7rule_create(self, mock_cast): + provider_l7rule = driver_dm.L7Rule( + l7rule_id=self.sample_data.l7rule1_id) + self.amp_driver.l7rule_create(provider_l7rule) + payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id} + mock_cast.assert_called_with({}, 'create_l7rule', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_l7rule_delete(self, mock_cast): + self.amp_driver.l7rule_delete(self.sample_data.l7rule1_id) + payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id} + mock_cast.assert_called_with({}, 'delete_l7rule', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_l7rule_update(self, mock_cast): + provider_l7rule = driver_dm.L7Rule( + l7rule_id=self.sample_data.l7rule1_id, admin_state_up=True) + l7rule_dict = {'enabled': True} + self.amp_driver.l7rule_update(provider_l7rule) + payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id, + consts.L7RULE_UPDATES: l7rule_dict} + mock_cast.assert_called_with({}, 'update_l7rule', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_l7rule_update_invert(self, mock_cast): + provider_l7rule = driver_dm.L7Rule( + l7rule_id=self.sample_data.l7rule1_id, invert=True) + l7rule_dict = {'invert': True} + self.amp_driver.l7rule_update(provider_l7rule) + payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id, + consts.L7RULE_UPDATES: l7rule_dict} + mock_cast.assert_called_with({}, 'update_l7rule', **payload) diff --git a/octavia/tests/unit/api/drivers/sample_data_models.py b/octavia/tests/unit/api/drivers/sample_data_models.py new file mode 100644 index 0000000000..cbb685648b --- /dev/null +++ b/octavia/tests/unit/api/drivers/sample_data_models.py @@ -0,0 +1,471 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from oslo_utils import uuidutils + +from octavia.api.drivers import data_models as driver_dm +from octavia.common import constants +from octavia.common import data_models + + +class SampleDriverDataModels(object): + + def __init__(self): + self.project_id = uuidutils.generate_uuid() + self.lb_id = uuidutils.generate_uuid() + self.ip_address = '192.0.2.30' + self.port_id = uuidutils.generate_uuid() + self.network_id = uuidutils.generate_uuid() + self.subnet_id = uuidutils.generate_uuid() + self.qos_policy_id = uuidutils.generate_uuid() + + self.listener1_id = uuidutils.generate_uuid() + self.listener2_id = uuidutils.generate_uuid() + self.default_tls_container_ref = uuidutils.generate_uuid() + self.sni_container_ref_1 = uuidutils.generate_uuid() + self.sni_container_ref_2 = uuidutils.generate_uuid() + + self.pool1_id = uuidutils.generate_uuid() + self.pool2_id = uuidutils.generate_uuid() + + self.hm1_id = uuidutils.generate_uuid() + self.hm2_id = uuidutils.generate_uuid() + + self.member1_id = uuidutils.generate_uuid() + self.member2_id = uuidutils.generate_uuid() + self.member3_id = uuidutils.generate_uuid() + self.member4_id = uuidutils.generate_uuid() + + self.l7policy1_id = uuidutils.generate_uuid() + self.l7policy2_id = uuidutils.generate_uuid() + + self.l7rule1_id = uuidutils.generate_uuid() + self.l7rule2_id = uuidutils.generate_uuid() + + self._common_test_dict = {'provisioning_status': constants.ACTIVE, + 'operating_status': constants.ONLINE, + 'project_id': self.project_id, + 'created_at': 'then', + 'updated_at': 'now', + 'enabled': True} + + # Setup Health Monitors + self.test_hm1_dict = {'id': self.hm1_id, + 'type': constants.HEALTH_MONITOR_PING, + 'delay': 1, 'timeout': 3, 'fall_threshold': 1, + 'rise_threshold': 2, 'http_method': 'GET', + 'url_path': '/', 'expected_codes': '200', + 'name': 'hm1', 'pool_id': self.pool1_id} + + self.test_hm1_dict.update(self._common_test_dict) + + self.test_hm2_dict = copy.deepcopy(self.test_hm1_dict) + self.test_hm2_dict['id'] = self.hm2_id + self.test_hm2_dict['name'] = 'hm2' + + self.db_hm1 = data_models.HealthMonitor(**self.test_hm1_dict) + self.db_hm2 = data_models.HealthMonitor(**self.test_hm2_dict) + + self.provider_hm1_dict = {'admin_state_up': True, + 'delay': 1, 'expected_codes': '200', + 'healthmonitor_id': self.hm1_id, + 'http_method': 'GET', + 'max_retries': 2, + 'max_retries_down': 1, + 'name': 'hm1', + 'pool_id': self.pool1_id, + 'timeout': 3, + 'type': constants.HEALTH_MONITOR_PING, + 'url_path': '/'} + + self.provider_hm2_dict = copy.deepcopy(self.provider_hm1_dict) + self.provider_hm2_dict['healthmonitor_id'] = self.hm2_id + self.provider_hm2_dict['name'] = 'hm2' + + self.provider_hm1 = driver_dm.HealthMonitor(**self.provider_hm1_dict) + self.provider_hm2 = driver_dm.HealthMonitor(**self.provider_hm2_dict) + + # Setup Members + self.test_member1_dict = {'id': self.member1_id, + 'pool_id': self.pool1_id, + 'ip_address': '192.0.2.16', + 'protocol_port': 80, 'weight': 0, + 'backup': False, + 'subnet_id': self.subnet_id, + 'pool': None, + 'name': 'member1', + 'monitor_address': '192.0.2.26', + 'monitor_port': 81} + + self.test_member1_dict.update(self._common_test_dict) + + self.test_member2_dict = copy.deepcopy(self.test_member1_dict) + self.test_member2_dict['id'] = self.member2_id + self.test_member2_dict['ip_address'] = '192.0.2.17' + self.test_member2_dict['monitor_address'] = '192.0.2.27' + self.test_member2_dict['name'] = 'member2' + + self.test_member3_dict = copy.deepcopy(self.test_member1_dict) + self.test_member3_dict['id'] = self.member3_id + self.test_member3_dict['ip_address'] = '192.0.2.18' + self.test_member3_dict['monitor_address'] = '192.0.2.28' + self.test_member3_dict['name'] = 'member3' + self.test_member3_dict['pool_id'] = self.pool2_id + + self.test_member4_dict = copy.deepcopy(self.test_member1_dict) + self.test_member4_dict['id'] = self.member4_id + self.test_member4_dict['ip_address'] = '192.0.2.19' + self.test_member4_dict['monitor_address'] = '192.0.2.29' + self.test_member4_dict['name'] = 'member4' + self.test_member4_dict['pool_id'] = self.pool2_id + + self.test_pool1_members_dict = [self.test_member1_dict, + self.test_member2_dict] + self.test_pool2_members_dict = [self.test_member3_dict, + self.test_member4_dict] + + self.db_member1 = data_models.Member(**self.test_member1_dict) + self.db_member2 = data_models.Member(**self.test_member2_dict) + self.db_member3 = data_models.Member(**self.test_member3_dict) + self.db_member4 = data_models.Member(**self.test_member4_dict) + + self.db_pool1_members = [self.db_member1, self.db_member2] + self.db_pool2_members = [self.db_member3, self.db_member4] + + self.provider_member1_dict = {'address': '192.0.2.16', + 'admin_state_up': True, + 'member_id': self.member1_id, + 'monitor_address': '192.0.2.26', + 'monitor_port': 81, + 'name': 'member1', + 'pool_id': self.pool1_id, + 'protocol_port': 80, + 'subnet_id': self.subnet_id, + 'weight': 0, + 'backup': False} + + self.provider_member2_dict = copy.deepcopy(self.provider_member1_dict) + self.provider_member2_dict['member_id'] = self.member2_id + self.provider_member2_dict['address'] = '192.0.2.17' + self.provider_member2_dict['monitor_address'] = '192.0.2.27' + self.provider_member2_dict['name'] = 'member2' + + self.provider_member3_dict = copy.deepcopy(self.provider_member1_dict) + self.provider_member3_dict['member_id'] = self.member3_id + self.provider_member3_dict['address'] = '192.0.2.18' + self.provider_member3_dict['monitor_address'] = '192.0.2.28' + self.provider_member3_dict['name'] = 'member3' + self.provider_member3_dict['pool_id'] = self.pool2_id + + self.provider_member4_dict = copy.deepcopy(self.provider_member1_dict) + self.provider_member4_dict['member_id'] = self.member4_id + self.provider_member4_dict['address'] = '192.0.2.19' + self.provider_member4_dict['monitor_address'] = '192.0.2.29' + self.provider_member4_dict['name'] = 'member4' + self.provider_member4_dict['pool_id'] = self.pool2_id + + self.provider_pool1_members_dict = [self.provider_member1_dict, + self.provider_member2_dict] + + self.provider_pool2_members_dict = [self.provider_member3_dict, + self.provider_member4_dict] + + self.provider_member1 = driver_dm.Member(**self.provider_member1_dict) + self.provider_member2 = driver_dm.Member(**self.provider_member2_dict) + self.provider_member3 = driver_dm.Member(**self.provider_member3_dict) + self.provider_member4 = driver_dm.Member(**self.provider_member4_dict) + + self.provider_pool1_members = [self.provider_member1, + self.provider_member2] + self.provider_pool2_members = [self.provider_member3, + self.provider_member4] + + # Setup test pools + self.test_pool1_dict = {'id': self.pool1_id, + 'name': 'pool1', 'description': 'Pool 1', + 'load_balancer_id': self.lb_id, + 'protocol': 'avian', + 'lb_algorithm': 'round_robin', + 'members': self.test_pool1_members_dict, + 'health_monitor': self.test_hm1_dict, + 'session_persistence': {'type': 'SOURCE'}, + 'listeners': [], + 'l7policies': []} + + self.test_pool1_dict.update(self._common_test_dict) + + self.test_pool2_dict = copy.deepcopy(self.test_pool1_dict) + self.test_pool2_dict['id'] = self.pool2_id + self.test_pool2_dict['name'] = 'pool2' + self.test_pool2_dict['description'] = 'Pool 2' + self.test_pool2_dict['members'] = self.test_pool2_members_dict + + self.test_pools = [self.test_pool1_dict, self.test_pool2_dict] + + self.db_pool1 = data_models.Pool(**self.test_pool1_dict) + self.db_pool1.health_monitor = self.db_hm1 + self.db_pool1.members = self.db_pool1_members + self.db_pool2 = data_models.Pool(**self.test_pool2_dict) + self.db_pool2.health_monitor = self.db_hm2 + self.db_pool2.members = self.db_pool2_members + + self.test_db_pools = [self.db_pool1, self.db_pool2] + + self.provider_pool1_dict = { + 'admin_state_up': True, + 'description': 'Pool 1', + 'healthmonitor': self.provider_hm1_dict, + 'lb_algorithm': 'round_robin', + 'loadbalancer_id': self.lb_id, + 'members': self.provider_pool1_members_dict, + 'name': 'pool1', + 'pool_id': self.pool1_id, + 'protocol': 'avian', + 'session_persistence': {'type': 'SOURCE'}} + + self.provider_pool2_dict = copy.deepcopy(self.provider_pool1_dict) + self.provider_pool2_dict['pool_id'] = self.pool2_id + self.provider_pool2_dict['name'] = 'pool2' + self.provider_pool2_dict['description'] = 'Pool 2' + self.provider_pool2_dict['members'] = self.provider_pool2_members_dict + self.provider_pool2_dict['healthmonitor'] = self.provider_hm2_dict + + self.provider_pool1 = driver_dm.Pool(**self.provider_pool1_dict) + self.provider_pool1.members = self.provider_pool1_members + self.provider_pool1.healthmonitor = self.provider_hm1 + self.provider_pool2 = driver_dm.Pool(**self.provider_pool2_dict) + self.provider_pool2.members = self.provider_pool2_members + self.provider_pool2.healthmonitor = self.provider_hm2 + + self.provider_pools = [self.provider_pool1, self.provider_pool2] + + # Setup L7Rules + self.test_l7rule1_dict = {'id': self.l7rule1_id, + 'l7policy_id': self.l7policy1_id, + 'type': 'o', + 'compare_type': 'fake_type', + 'key': 'fake_key', + 'value': 'fake_value', + 'l7policy': None, + 'invert': False} + + self.test_l7rule1_dict.update(self._common_test_dict) + + self.test_l7rule2_dict = copy.deepcopy(self.test_l7rule1_dict) + self.test_l7rule2_dict['id'] = self.l7rule2_id + + self.test_l7rules = [self.test_l7rule1_dict, self.test_l7rule2_dict] + + self.db_l7Rule1 = data_models.L7Rule(**self.test_l7rule1_dict) + self.db_l7Rule2 = data_models.L7Rule(**self.test_l7rule2_dict) + + self.db_l7Rules = [self.db_l7Rule1, self.db_l7Rule2] + + self.provider_l7rule1_dict = {'admin_state_up': True, + 'compare_type': 'fake_type', + 'invert': False, + 'key': 'fake_key', + 'l7policy_id': self.l7policy1_id, + 'l7rule_id': self.l7rule1_id, + 'type': 'o', + 'value': 'fake_value'} + + self.provider_l7rule2_dict = copy.deepcopy(self.provider_l7rule1_dict) + self.provider_l7rule2_dict['l7rule_id'] = self.l7rule2_id + self.provider_l7rules_dicts = [self.provider_l7rule1_dict, + self.provider_l7rule2_dict] + + self.provider_l7rule1 = driver_dm.L7Rule(**self.provider_l7rule1_dict) + self.provider_l7rule2 = driver_dm.L7Rule(**self.provider_l7rule2_dict) + + self.provider_rules = [self.provider_l7rule1, self.provider_l7rule2] + + # Setup L7Policies + self.test_l7policy1_dict = {'id': self.l7policy1_id, + 'name': 'l7policy_1', + 'description': 'L7policy 1', + 'listener_id': self.listener1_id, + 'action': 'go', + 'redirect_pool_id': self.pool1_id, + 'redirect_url': '/index.html', + 'position': 1, + 'listener': None, + 'redirect_pool': None, + 'l7rules': self.test_l7rules} + + self.test_l7policy1_dict.update(self._common_test_dict) + + self.test_l7policy2_dict = copy.deepcopy(self.test_l7policy1_dict) + self.test_l7policy2_dict['id'] = self.l7policy2_id + self.test_l7policy2_dict['name'] = 'l7policy_2' + self.test_l7policy2_dict['description'] = 'L7policy 2' + + self.test_l7policies = [self.test_l7policy1_dict, + self.test_l7policy2_dict] + + self.db_l7policy1 = data_models.L7Policy(**self.test_l7policy1_dict) + self.db_l7policy2 = data_models.L7Policy(**self.test_l7policy2_dict) + self.db_l7policy1.l7rules = self.db_l7Rules + self.db_l7policy2.l7rules = self.db_l7Rules + + self.db_l7policies = [self.db_l7policy1, self.db_l7policy2] + + self.provider_l7policy1_dict = {'action': 'go', + 'admin_state_up': True, + 'description': 'L7policy 1', + 'l7policy_id': self.l7policy1_id, + 'listener_id': self.listener1_id, + 'name': 'l7policy_1', + 'position': 1, + 'redirect_pool_id': self.pool1_id, + 'redirect_url': '/index.html', + 'rules': self.provider_l7rules_dicts} + + self.provider_l7policy2_dict = copy.deepcopy( + self.provider_l7policy1_dict) + self.provider_l7policy2_dict['l7policy_id'] = self.l7policy2_id + self.provider_l7policy2_dict['name'] = 'l7policy_2' + self.provider_l7policy2_dict['description'] = 'L7policy 2' + + self.provider_l7policies_dict = [self.provider_l7policy1_dict, + self.provider_l7policy2_dict] + + self.provider_l7policy1 = driver_dm.L7Policy( + **self.provider_l7policy1_dict) + self.provider_l7policy1.rules = self.provider_rules + self.provider_l7policy2 = driver_dm.L7Policy( + **self.provider_l7policy2_dict) + self.provider_l7policy2.rules = self.provider_rules + + self.provider_l7policies = [self.provider_l7policy1, + self.provider_l7policy2] + + # Setup Listeners + self.test_listener1_dict = { + 'id': self.listener1_id, + 'name': 'listener_1', + 'description': 'Listener 1', + 'default_pool_id': self.pool1_id, + 'load_balancer_id': self.lb_id, + 'protocol': 'avian', + 'protocol_port': 90, + 'connection_limit': 10000, + 'tls_certificate_id': self.default_tls_container_ref, + 'stats': None, + 'default_pool': self.test_pool1_dict, + 'load_balancer': None, + 'sni_containers': [self.sni_container_ref_1, + self.sni_container_ref_2], + 'peer_port': 55, + 'l7policies': self.test_l7policies, + 'insert_headers': {}, + 'pools': None, + 'timeout_client_data': 1000, + 'timeout_member_connect': 2000, + 'timeout_member_data': 3000, + 'timeout_tcp_inspect': 4000} + + self.test_listener1_dict.update(self._common_test_dict) + + self.test_listener2_dict = copy.deepcopy(self.test_listener1_dict) + self.test_listener2_dict['id'] = self.listener2_id + self.test_listener2_dict['name'] = 'listener_2' + self.test_listener2_dict['description'] = 'Listener 1' + self.test_listener2_dict['default_pool_id'] = self.pool2_id + self.test_listener2_dict['default_pool'] = self.test_pool2_dict + del self.test_listener2_dict['l7policies'] + del self.test_listener2_dict['sni_containers'] + + self.test_listeners = [self.test_listener1_dict, + self.test_listener2_dict] + + self.db_listener1 = data_models.Listener(**self.test_listener1_dict) + self.db_listener2 = data_models.Listener(**self.test_listener2_dict) + self.db_listener1.default_pool = self.db_pool1 + self.db_listener2.default_pool = self.db_pool2 + self.db_listener1.l7policies = self.db_l7policies + self.db_listener1.sni_containers = [ + data_models.SNI(tls_container_id='2'), + data_models.SNI(tls_container_id='3')] + + self.test_db_listeners = [self.db_listener1, self.db_listener2] + + cert1 = data_models.TLSContainer(certificate='cert 1') + cert2 = data_models.TLSContainer(certificate='cert 2') + cert3 = data_models.TLSContainer(certificate='cert 3') + + self.provider_listener1_dict = { + 'admin_state_up': True, + 'connection_limit': 10000, + 'default_pool': self.provider_pool1_dict, + 'default_pool_id': self.pool1_id, + 'default_tls_container_data': cert1.to_dict(), + 'default_tls_container_ref': self.default_tls_container_ref, + 'description': 'Listener 1', + 'insert_headers': {}, + 'l7policies': self.provider_l7policies_dict, + 'listener_id': self.listener1_id, + 'loadbalancer_id': self.lb_id, + 'name': 'listener_1', + 'protocol': 'avian', + 'protocol_port': 90, + 'sni_container_data': [cert2.to_dict(), cert3.to_dict()], + 'sni_container_refs': [self.sni_container_ref_1, + self.sni_container_ref_2], + 'timeout_client_data': 1000, + 'timeout_member_connect': 2000, + 'timeout_member_data': 3000, + 'timeout_tcp_inspect': 4000} + + self.provider_listener2_dict = copy.deepcopy( + self.provider_listener1_dict) + self.provider_listener2_dict['listener_id'] = self.listener2_id + self.provider_listener2_dict['name'] = 'listener_2' + self.provider_listener2_dict['description'] = 'Listener 1' + self.provider_listener2_dict['default_pool_id'] = self.pool2_id + self.provider_listener2_dict['default_pool'] = self.provider_pool2_dict + del self.provider_listener2_dict['l7policies'] + + self.provider_listener1 = driver_dm.Listener( + **self.provider_listener1_dict) + self.provider_listener2 = driver_dm.Listener( + **self.provider_listener2_dict) + self.provider_listener1.default_pool = self.provider_pool1 + self.provider_listener2.default_pool = self.provider_pool2 + self.provider_listener1.l7policies = self.provider_l7policies + + self.provider_listeners = [self.provider_listener1, + self.provider_listener2] + + self.test_vip_dict = {'ip_address': self.ip_address, + 'network_id': self.network_id, + 'port_id': self.port_id, + 'subnet_id': self.subnet_id, + 'qos_policy_id': self.qos_policy_id} + + self.provider_vip_dict = { + 'vip_address': self.ip_address, + 'vip_network_id': self.network_id, + 'vip_port_id': self.port_id, + 'vip_subnet_id': self.subnet_id, + 'vip_qos_policy_id': self.qos_policy_id} + + self.db_vip = data_models.Vip( + ip_address=self.ip_address, + network_id=self.network_id, + port_id=self.port_id, + subnet_id=self.subnet_id, + qos_policy_id=self.qos_policy_id) diff --git a/octavia/tests/unit/api/drivers/test_data_models.py b/octavia/tests/unit/api/drivers/test_data_models.py index 6f541fce3a..f23271d649 100644 --- a/octavia/tests/unit/api/drivers/test_data_models.py +++ b/octavia/tests/unit/api/drivers/test_data_models.py @@ -43,7 +43,7 @@ class TestProviderDataModels(base.TestCase): default_pool_id=None, default_tls_container_data='default_cert_data', default_tls_container_ref=self.default_tls_container_ref, - description='The listener', + description=data_models.Unset, insert_headers={'X-Forwarded-For': 'true'}, l7policies=[], listener_id=self.listener_id, @@ -87,7 +87,6 @@ class TestProviderDataModels(base.TestCase): 'default_pool_id': None, 'default_tls_container_data': 'default_cert_data', 'default_tls_container_ref': self.default_tls_container_ref, - 'description': 'The listener', 'insert_headers': {'X-Forwarded-For': 'true'}, 'listener_id': self.listener_id, 'l7policies': [], @@ -140,10 +139,15 @@ class TestProviderDataModels(base.TestCase): self.assertEqual(self.ref_lb_dict, ref_lb_converted_to_dict) + def test_to_dict_private_attrs(self): + private_dict = {'_test': 'foo'} + ref_lb_converted_to_dict = self.ref_lb.to_dict(**private_dict) + + self.assertEqual(self.ref_lb_dict, ref_lb_converted_to_dict) + def test_to_dict_partial(self): ref_lb = data_models.LoadBalancer(loadbalancer_id=self.loadbalancer_id) ref_lb_dict = {'loadbalancer_id': self.loadbalancer_id} - ref_lb_converted_to_dict = ref_lb.to_dict() self.assertEqual(ref_lb_dict, ref_lb_converted_to_dict) @@ -205,3 +209,9 @@ class TestProviderDataModels(base.TestCase): lb_object = data_models.LoadBalancer.from_dict(self.ref_lb_dict) self.assertEqual(self.ref_lb, lb_object) + + def test_unset_bool(self): + self.assertFalse(data_models.Unset) + + def test_unset_repr(self): + self.assertEqual('Unset', repr(data_models.Unset)) diff --git a/octavia/tests/unit/api/drivers/test_driver_factory.py b/octavia/tests/unit/api/drivers/test_driver_factory.py new file mode 100644 index 0000000000..215307fceb --- /dev/null +++ b/octavia/tests/unit/api/drivers/test_driver_factory.py @@ -0,0 +1,48 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from octavia.api.drivers import driver_factory +from octavia.common import exceptions +import octavia.tests.unit.base as base + + +class TestDriverFactory(base.TestCase): + + def setUp(self): + super(TestDriverFactory, self).setUp() + + @mock.patch('stevedore.driver.DriverManager') + def test_driver_factory_no_provider(self, mock_drivermgr): + mock_mgr = mock.MagicMock() + mock_drivermgr.return_value = mock_mgr + + driver = driver_factory.get_driver(None) + + self.assertEqual(mock_mgr.driver, driver) + + @mock.patch('stevedore.driver.DriverManager') + def test_driver_factory_failed_to_load_driver(self, mock_drivermgr): + mock_drivermgr.side_effect = Exception('boom') + + self.assertRaises(exceptions.ProviderNotFound, + driver_factory.get_driver, None) + + @mock.patch('stevedore.driver.DriverManager') + def test_driver_factory_not_enabled(self, mock_drivermgr): + + self.assertRaises(exceptions.ProviderNotEnabled, + driver_factory.get_driver, + 'dont-enable-this-fake-driver-name') diff --git a/octavia/tests/unit/api/drivers/test_utils.py b/octavia/tests/unit/api/drivers/test_utils.py index 385c19396f..b396bbff2c 100644 --- a/octavia/tests/unit/api/drivers/test_utils.py +++ b/octavia/tests/unit/api/drivers/test_utils.py @@ -12,446 +12,24 @@ # License for the specific language governing permissions and limitations # under the License. - import copy import mock -from oslo_utils import uuidutils - from octavia.api.drivers import data_models as driver_dm from octavia.api.drivers import exceptions as driver_exceptions from octavia.api.drivers import utils from octavia.common import constants from octavia.common import data_models from octavia.common import exceptions +from octavia.tests.unit.api.drivers import sample_data_models from octavia.tests.unit import base class TestUtils(base.TestCase): def setUp(self): super(TestUtils, self).setUp() - - hm1_id = uuidutils.generate_uuid() - hm2_id = uuidutils.generate_uuid() - l7policy1_id = uuidutils.generate_uuid() - l7policy2_id = uuidutils.generate_uuid() - l7rule1_id = uuidutils.generate_uuid() - l7rule2_id = uuidutils.generate_uuid() - listener1_id = uuidutils.generate_uuid() - listener2_id = uuidutils.generate_uuid() - member1_id = uuidutils.generate_uuid() - member2_id = uuidutils.generate_uuid() - member3_id = uuidutils.generate_uuid() - member4_id = uuidutils.generate_uuid() - pool1_id = uuidutils.generate_uuid() - pool2_id = uuidutils.generate_uuid() - self.lb_id = uuidutils.generate_uuid() - self.project_id = uuidutils.generate_uuid() - self.ip_address = '192.0.2.30' - self.port_id = uuidutils.generate_uuid() - self.network_id = uuidutils.generate_uuid() - self.subnet_id = uuidutils.generate_uuid() - self.qos_policy_id = uuidutils.generate_uuid() - self.default_tls_container_ref = uuidutils.generate_uuid() - self.sni_container_ref_1 = uuidutils.generate_uuid() - self.sni_container_ref_2 = uuidutils.generate_uuid() - - _common_test_dict = {'provisioning_status': constants.ACTIVE, - 'operating_status': constants.ONLINE, - 'project_id': self.project_id, - 'created_at': 'then', - 'updated_at': 'now', - 'enabled': True} - - # Setup Health Monitors - self.test_hm1_dict = {'id': hm1_id, - 'type': constants.HEALTH_MONITOR_PING, - 'delay': 1, 'timeout': 3, 'fall_threshold': 1, - 'rise_threshold': 2, 'http_method': 'GET', - 'url_path': '/', 'expected_codes': '200', - 'name': 'hm1', 'pool_id': pool1_id} - - self.test_hm1_dict.update(_common_test_dict) - - self.test_hm2_dict = copy.deepcopy(self.test_hm1_dict) - self.test_hm2_dict['id'] = hm2_id - self.test_hm2_dict['name'] = 'hm2' - - self.db_hm1 = data_models.HealthMonitor(**self.test_hm1_dict) - self.db_hm2 = data_models.HealthMonitor(**self.test_hm2_dict) - - self.provider_hm1_dict = {'admin_state_up': True, - 'delay': 1, 'expected_codes': '200', - 'healthmonitor_id': hm1_id, - 'http_method': 'GET', - 'max_retries': 2, - 'max_retries_down': 1, - 'name': 'hm1', - 'pool_id': pool1_id, - 'timeout': 3, - 'type': constants.HEALTH_MONITOR_PING, - 'url_path': '/'} - - self.provider_hm2_dict = copy.deepcopy(self.provider_hm1_dict) - self.provider_hm2_dict['healthmonitor_id'] = hm2_id - self.provider_hm2_dict['name'] = 'hm2' - - self.provider_hm1 = driver_dm.HealthMonitor(**self.provider_hm1_dict) - self.provider_hm2 = driver_dm.HealthMonitor(**self.provider_hm2_dict) - - # Setup Members - self.test_member1_dict = {'id': member1_id, - 'pool_id': pool1_id, - 'ip_address': '192.0.2.16', - 'protocol_port': 80, 'weight': 0, - 'backup': False, - 'subnet_id': self.subnet_id, - 'pool': None, - 'name': 'member1', - 'monitor_address': '192.0.2.26', - 'monitor_port': 81} - - self.test_member1_dict.update(_common_test_dict) - - self.test_member2_dict = copy.deepcopy(self.test_member1_dict) - self.test_member2_dict['id'] = member2_id - self.test_member2_dict['ip_address'] = '192.0.2.17' - self.test_member2_dict['monitor_address'] = '192.0.2.27' - self.test_member2_dict['name'] = 'member2' - - self.test_member3_dict = copy.deepcopy(self.test_member1_dict) - self.test_member3_dict['id'] = member3_id - self.test_member3_dict['ip_address'] = '192.0.2.18' - self.test_member3_dict['monitor_address'] = '192.0.2.28' - self.test_member3_dict['name'] = 'member3' - self.test_member3_dict['pool_id'] = pool2_id - - self.test_member4_dict = copy.deepcopy(self.test_member1_dict) - self.test_member4_dict['id'] = member4_id - self.test_member4_dict['ip_address'] = '192.0.2.19' - self.test_member4_dict['monitor_address'] = '192.0.2.29' - self.test_member4_dict['name'] = 'member4' - self.test_member4_dict['pool_id'] = pool2_id - - self.test_pool1_members_dict = [self.test_member1_dict, - self.test_member2_dict] - self.test_pool2_members_dict = [self.test_member3_dict, - self.test_member4_dict] - - self.db_member1 = data_models.Member(**self.test_member1_dict) - self.db_member2 = data_models.Member(**self.test_member2_dict) - self.db_member3 = data_models.Member(**self.test_member3_dict) - self.db_member4 = data_models.Member(**self.test_member4_dict) - - self.db_pool1_members = [self.db_member1, self.db_member2] - self.db_pool2_members = [self.db_member3, self.db_member4] - - self.provider_member1_dict = {'address': '192.0.2.16', - 'admin_state_up': True, - 'member_id': member1_id, - 'monitor_address': '192.0.2.26', - 'monitor_port': 81, - 'name': 'member1', - 'pool_id': pool1_id, - 'protocol_port': 80, - 'subnet_id': self.subnet_id, - 'weight': 0, - 'backup': False} - - self.provider_member2_dict = copy.deepcopy(self.provider_member1_dict) - self.provider_member2_dict['member_id'] = member2_id - self.provider_member2_dict['address'] = '192.0.2.17' - self.provider_member2_dict['monitor_address'] = '192.0.2.27' - self.provider_member2_dict['name'] = 'member2' - - self.provider_member3_dict = copy.deepcopy(self.provider_member1_dict) - self.provider_member3_dict['member_id'] = member3_id - self.provider_member3_dict['address'] = '192.0.2.18' - self.provider_member3_dict['monitor_address'] = '192.0.2.28' - self.provider_member3_dict['name'] = 'member3' - self.provider_member3_dict['pool_id'] = pool2_id - - self.provider_member4_dict = copy.deepcopy(self.provider_member1_dict) - self.provider_member4_dict['member_id'] = member4_id - self.provider_member4_dict['address'] = '192.0.2.19' - self.provider_member4_dict['monitor_address'] = '192.0.2.29' - self.provider_member4_dict['name'] = 'member4' - self.provider_member4_dict['pool_id'] = pool2_id - - self.provider_pool1_members_dict = [self.provider_member1_dict, - self.provider_member2_dict] - - self.provider_pool2_members_dict = [self.provider_member3_dict, - self.provider_member4_dict] - - self.provider_member1 = driver_dm.Member(**self.provider_member1_dict) - self.provider_member2 = driver_dm.Member(**self.provider_member2_dict) - self.provider_member3 = driver_dm.Member(**self.provider_member3_dict) - self.provider_member4 = driver_dm.Member(**self.provider_member4_dict) - - self.provider_pool1_members = [self.provider_member1, - self.provider_member2] - self.provider_pool2_members = [self.provider_member3, - self.provider_member4] - - # Setup test pools - self.test_pool1_dict = {'id': pool1_id, - 'name': 'pool1', 'description': 'Pool 1', - 'load_balancer_id': self.lb_id, - 'protocol': 'avian', - 'lb_algorithm': 'round_robin', - 'members': self.test_pool1_members_dict, - 'health_monitor': self.test_hm1_dict, - 'session_persistence': {'type': 'SOURCE'}, - 'listeners': [], - 'l7policies': []} - - self.test_pool1_dict.update(_common_test_dict) - - self.test_pool2_dict = copy.deepcopy(self.test_pool1_dict) - self.test_pool2_dict['id'] = pool2_id - self.test_pool2_dict['name'] = 'pool2' - self.test_pool2_dict['description'] = 'Pool 2' - self.test_pool2_dict['members'] = self.test_pool2_members_dict - - self.test_pools = [self.test_pool1_dict, self.test_pool2_dict] - - self.db_pool1 = data_models.Pool(**self.test_pool1_dict) - self.db_pool1.health_monitor = self.db_hm1 - self.db_pool1.members = self.db_pool1_members - self.db_pool2 = data_models.Pool(**self.test_pool2_dict) - self.db_pool2.health_monitor = self.db_hm2 - self.db_pool2.members = self.db_pool2_members - - self.test_db_pools = [self.db_pool1, self.db_pool2] - - self.provider_pool1_dict = { - 'admin_state_up': True, - 'description': 'Pool 1', - 'healthmonitor': self.provider_hm1_dict, - 'lb_algorithm': 'round_robin', - 'loadbalancer_id': self.lb_id, - 'members': self.provider_pool1_members_dict, - 'name': 'pool1', - 'pool_id': pool1_id, - 'protocol': 'avian', - 'session_persistence': {'type': 'SOURCE'}} - - self.provider_pool2_dict = copy.deepcopy(self.provider_pool1_dict) - self.provider_pool2_dict['pool_id'] = pool2_id - self.provider_pool2_dict['name'] = 'pool2' - self.provider_pool2_dict['description'] = 'Pool 2' - self.provider_pool2_dict['members'] = self.provider_pool2_members_dict - self.provider_pool2_dict['healthmonitor'] = self.provider_hm2_dict - - self.provider_pool1 = driver_dm.Pool(**self.provider_pool1_dict) - self.provider_pool1.members = self.provider_pool1_members - self.provider_pool1.healthmonitor = self.provider_hm1 - self.provider_pool2 = driver_dm.Pool(**self.provider_pool2_dict) - self.provider_pool2.members = self.provider_pool2_members - self.provider_pool2.healthmonitor = self.provider_hm2 - - self.provider_pools = [self.provider_pool1, self.provider_pool2] - - # Setup L7Rules - self.test_l7rule1_dict = {'id': l7rule1_id, - 'l7policy_id': l7policy1_id, - 'type': 'o', - 'compare_type': 'fake_type', - 'key': 'fake_key', - 'value': 'fake_value', - 'l7policy': None, - 'invert': False} - - self.test_l7rule1_dict.update(_common_test_dict) - - self.test_l7rule2_dict = copy.deepcopy(self.test_l7rule1_dict) - self.test_l7rule2_dict['id'] = l7rule2_id - - self.test_l7rules = [self.test_l7rule1_dict, self.test_l7rule2_dict] - - self.db_l7Rule1 = data_models.L7Rule(**self.test_l7rule1_dict) - self.db_l7Rule2 = data_models.L7Rule(**self.test_l7rule2_dict) - - self.db_l7Rules = [self.db_l7Rule1, self.db_l7Rule2] - - self.provider_l7rule1_dict = {'admin_state_up': True, - 'compare_type': 'fake_type', - 'invert': False, - 'key': 'fake_key', - 'l7policy_id': l7policy1_id, - 'l7rule_id': l7rule1_id, - 'type': 'o', - 'value': 'fake_value'} - - self.provider_l7rule2_dict = copy.deepcopy(self.provider_l7rule1_dict) - self.provider_l7rule2_dict['l7rule_id'] = l7rule2_id - - self.provider_l7rules_dicts = [self.provider_l7rule1_dict, - self.provider_l7rule2_dict] - - self.provider_l7rule1 = driver_dm.L7Rule(**self.provider_l7rule1_dict) - self.provider_l7rule2 = driver_dm.L7Rule(**self.provider_l7rule2_dict) - - self.provider_rules = [self.provider_l7rule1, self.provider_l7rule2] - - # Setup L7Policies - self.test_l7policy1_dict = {'id': l7policy1_id, - 'name': 'l7policy_1', - 'description': 'L7policy 1', - 'listener_id': listener1_id, - 'action': 'go', - 'redirect_pool_id': pool1_id, - 'redirect_url': '/index.html', - 'position': 1, - 'listener': None, - 'redirect_pool': None, - 'l7rules': self.test_l7rules} - - self.test_l7policy1_dict.update(_common_test_dict) - - self.test_l7policy2_dict = copy.deepcopy(self.test_l7policy1_dict) - self.test_l7policy2_dict['id'] = l7policy2_id - self.test_l7policy2_dict['name'] = 'l7policy_2' - self.test_l7policy2_dict['description'] = 'L7policy 2' - - self.test_l7policies = [self.test_l7policy1_dict, - self.test_l7policy2_dict] - - self.db_l7policy1 = data_models.L7Policy(**self.test_l7policy1_dict) - self.db_l7policy2 = data_models.L7Policy(**self.test_l7policy2_dict) - self.db_l7policy1.l7rules = self.db_l7Rules - self.db_l7policy2.l7rules = self.db_l7Rules - - self.db_l7policies = [self.db_l7policy1, self.db_l7policy2] - - self.provider_l7policy1_dict = {'action': 'go', - 'admin_state_up': True, - 'description': 'L7policy 1', - 'l7policy_id': l7policy1_id, - 'listener_id': listener1_id, - 'name': 'l7policy_1', - 'position': 1, - 'redirect_pool_id': pool1_id, - 'redirect_url': '/index.html', - 'rules': self.provider_l7rules_dicts} - - self.provider_l7policy2_dict = copy.deepcopy( - self.provider_l7policy1_dict) - self.provider_l7policy2_dict['l7policy_id'] = l7policy2_id - self.provider_l7policy2_dict['name'] = 'l7policy_2' - self.provider_l7policy2_dict['description'] = 'L7policy 2' - - self.provider_l7policies_dict = [self.provider_l7policy1_dict, - self.provider_l7policy2_dict] - - self.provider_l7policy1 = driver_dm.L7Policy( - **self.provider_l7policy1_dict) - self.provider_l7policy1.rules = self.provider_rules - self.provider_l7policy2 = driver_dm.L7Policy( - **self.provider_l7policy2_dict) - self.provider_l7policy2.rules = self.provider_rules - - self.provider_l7policies = [self.provider_l7policy1, - self.provider_l7policy2] - - # Setup Listeners - self.test_listener1_dict = { - 'id': listener1_id, - 'name': 'listener_1', - 'description': 'Listener 1', - 'default_pool_id': pool1_id, - 'load_balancer_id': self.lb_id, - 'protocol': 'avian', - 'protocol_port': 90, - 'connection_limit': 10000, - 'tls_certificate_id': self.default_tls_container_ref, - 'stats': None, - 'default_pool': self.test_pool1_dict, - 'load_balancer': None, - 'sni_containers': [self.sni_container_ref_1, - self.sni_container_ref_2], - 'peer_port': 55, - 'l7policies': self.test_l7policies, - 'insert_headers': {}, - 'pools': None, - 'timeout_client_data': 1000, - 'timeout_member_connect': 2000, - 'timeout_member_data': 3000, - 'timeout_tcp_inspect': 4000} - - self.test_listener1_dict.update(_common_test_dict) - - self.test_listener2_dict = copy.deepcopy(self.test_listener1_dict) - self.test_listener2_dict['id'] = listener2_id - self.test_listener2_dict['name'] = 'listener_2' - self.test_listener2_dict['description'] = 'Listener 1' - self.test_listener2_dict['default_pool_id'] = pool2_id - self.test_listener2_dict['default_pool'] = self.test_pool2_dict - del self.test_listener2_dict['l7policies'] - del self.test_listener2_dict['sni_containers'] - - self.test_listeners = [self.test_listener1_dict, - self.test_listener2_dict] - - self.db_listener1 = data_models.Listener(**self.test_listener1_dict) - self.db_listener2 = data_models.Listener(**self.test_listener2_dict) - self.db_listener1.default_pool = self.db_pool1 - self.db_listener2.default_pool = self.db_pool2 - self.db_listener1.l7policies = self.db_l7policies - self.db_listener1.sni_containers = [ - data_models.SNI(tls_container_id='2'), - data_models.SNI(tls_container_id='3')] - - self.test_db_listeners = [self.db_listener1, self.db_listener2] - - cert1 = data_models.TLSContainer(certificate='cert 1') - cert2 = data_models.TLSContainer(certificate='cert 2') - cert3 = data_models.TLSContainer(certificate='cert 3') - - self.provider_listener1_dict = { - 'admin_state_up': True, - 'connection_limit': 10000, - 'default_pool': self.provider_pool1_dict, - 'default_pool_id': pool1_id, - 'default_tls_container_data': cert1.to_dict(), - 'default_tls_container_ref': self.default_tls_container_ref, - 'description': 'Listener 1', - 'insert_headers': {}, - 'l7policies': self.provider_l7policies_dict, - 'listener_id': listener1_id, - 'loadbalancer_id': self.lb_id, - 'name': 'listener_1', - 'protocol': 'avian', - 'protocol_port': 90, - 'sni_container_data': [cert2.to_dict(), cert3.to_dict()], - 'sni_container_refs': [self.sni_container_ref_1, - self.sni_container_ref_2], - 'timeout_client_data': 1000, - 'timeout_member_connect': 2000, - 'timeout_member_data': 3000, - 'timeout_tcp_inspect': 4000} - - self.provider_listener2_dict = copy.deepcopy( - self.provider_listener1_dict) - self.provider_listener2_dict['listener_id'] = listener2_id - self.provider_listener2_dict['name'] = 'listener_2' - self.provider_listener2_dict['description'] = 'Listener 1' - self.provider_listener2_dict['default_pool_id'] = pool2_id - self.provider_listener2_dict['default_pool'] = self.provider_pool2_dict - del self.provider_listener2_dict['l7policies'] - - self.provider_listener1 = driver_dm.Listener( - **self.provider_listener1_dict) - self.provider_listener2 = driver_dm.Listener( - **self.provider_listener2_dict) - self.provider_listener1.default_pool = self.provider_pool1 - self.provider_listener2.default_pool = self.provider_pool2 - self.provider_listener1.l7policies = self.provider_l7policies - - self.provider_listeners = [self.provider_listener1, - self.provider_listener2] + self.sample_data = sample_data_models.SampleDriverDataModels() def test_call_provider(self): mock_driver_method = mock.MagicMock() @@ -513,14 +91,15 @@ class TestUtils(base.TestCase): cert3 = data_models.TLSContainer(certificate='cert 3') mock_load_cert.return_value = {'tls_cert': cert1, 'sni_certs': [cert2, cert3]} - test_lb_dict = {'name': 'lb1', 'project_id': self.project_id, - 'vip_subnet_id': self.subnet_id, - 'vip_port_id': self.port_id, - 'vip_address': self.ip_address, - 'vip_network_id': self.network_id, - 'vip_qos_policy_id': self.qos_policy_id, + test_lb_dict = {'name': 'lb1', + 'project_id': self.sample_data.project_id, + 'vip_subnet_id': self.sample_data.subnet_id, + 'vip_port_id': self.sample_data.port_id, + 'vip_address': self.sample_data.ip_address, + 'vip_network_id': self.sample_data.network_id, + 'vip_qos_policy_id': self.sample_data.qos_policy_id, 'provider': 'noop_driver', - 'id': self.lb_id, + 'id': self.sample_data.lb_id, 'listeners': [], 'pools': [], 'description': '', 'admin_state_up': True, @@ -528,30 +107,40 @@ class TestUtils(base.TestCase): 'operating_status': constants.OFFLINE, 'flavor_id': '', 'provider': 'noop_driver'} - ref_prov_lb_dict = {'vip_address': self.ip_address, - 'admin_state_up': True, - 'loadbalancer_id': self.lb_id, - 'vip_subnet_id': self.subnet_id, - 'listeners': self.provider_listeners, - 'description': '', - 'project_id': self.project_id, - 'flavor_id': '', - 'vip_port_id': self.port_id, - 'vip_qos_policy_id': self.qos_policy_id, - 'vip_network_id': self.network_id, - 'pools': self.provider_pools, - 'name': 'lb1'} - vip = data_models.Vip(ip_address=self.ip_address, - network_id=self.network_id, - port_id=self.port_id, subnet_id=self.subnet_id, - qos_policy_id=self.qos_policy_id) + ref_prov_lb_dict = { + 'vip_address': self.sample_data.ip_address, + 'admin_state_up': True, + 'loadbalancer_id': self.sample_data.lb_id, + 'vip_subnet_id': self.sample_data.subnet_id, + 'listeners': self.sample_data.provider_listeners, + 'description': '', + 'project_id': self.sample_data.project_id, + 'flavor_id': '', + 'vip_port_id': self.sample_data.port_id, + 'vip_qos_policy_id': self.sample_data.qos_policy_id, + 'vip_network_id': self.sample_data.network_id, + 'pools': self.sample_data.provider_pools, + 'name': 'lb1'} + vip = data_models.Vip(ip_address=self.sample_data.ip_address, + network_id=self.sample_data.network_id, + port_id=self.sample_data.port_id, + subnet_id=self.sample_data.subnet_id, + qos_policy_id=self.sample_data.qos_policy_id) provider_lb_dict = utils.lb_dict_to_provider_dict( - test_lb_dict, vip=vip, db_pools=self.test_db_pools, - db_listeners=self.test_db_listeners) + test_lb_dict, vip=vip, db_pools=self.sample_data.test_db_pools, + db_listeners=self.sample_data.test_db_listeners) self.assertEqual(ref_prov_lb_dict, provider_lb_dict) + def test_db_listener_to_provider_listener(self): + test_db_list = data_models.Listener(id=1) + provider_list = utils.db_listener_to_provider_listener(test_db_list) + ref_provider_list = driver_dm.Listener(listener_id=1, + insert_headers={}) + self.assertEqual(ref_provider_list.to_dict(render_unsets=True), + provider_list.to_dict(render_unsets=True)) + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_db_listeners_to_provider_listeners(self, mock_load_cert): cert1 = data_models.TLSContainer(certificate='cert 1') @@ -560,8 +149,9 @@ class TestUtils(base.TestCase): mock_load_cert.return_value = {'tls_cert': cert1, 'sni_certs': [cert2, cert3]} provider_listeners = utils.db_listeners_to_provider_listeners( - self.test_db_listeners) - self.assertEqual(self.provider_listeners, provider_listeners) + self.sample_data.test_db_listeners) + self.assertEqual(self.sample_data.provider_listeners, + provider_listeners) @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') def test_listener_dict_to_provider_dict(self, mock_load_cert): @@ -571,88 +161,104 @@ class TestUtils(base.TestCase): mock_load_cert.return_value = {'tls_cert': cert1, 'sni_certs': [cert2, cert3]} provider_listener = utils.listener_dict_to_provider_dict( - self.test_listener1_dict) - self.assertEqual(self.provider_listener1_dict, provider_listener) + self.sample_data.test_listener1_dict) + self.assertEqual(self.sample_data.provider_listener1_dict, + provider_listener) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_listener_dict_to_provider_dict_SNI(self, mock_load_cert): + cert1 = data_models.TLSContainer(certificate='cert 1') + cert2 = data_models.TLSContainer(certificate='cert 2') + cert3 = data_models.TLSContainer(certificate='cert 3') + mock_load_cert.return_value = {'tls_cert': cert1, + 'sni_certs': [cert2, cert3]} + # Test with bad SNI content + test_listener = copy.deepcopy(self.sample_data.test_listener1_dict) + test_listener['sni_containers'] = [[]] + self.assertRaises(exceptions.ValidationException, + utils.listener_dict_to_provider_dict, + test_listener) def test_db_pool_to_provider_pool(self): - provider_pool = utils.db_pool_to_provider_pool(self.db_pool1) - self.assertEqual(self.provider_pool1, provider_pool) + provider_pool = utils.db_pool_to_provider_pool( + self.sample_data.db_pool1) + self.assertEqual(self.sample_data.provider_pool1, provider_pool) + + def test_db_pool_to_provider_pool_partial(self): + test_db_pool = self.sample_data.db_pool1 + test_db_pool.members = [self.sample_data.db_member1] + provider_pool = utils.db_pool_to_provider_pool(test_db_pool) + self.assertEqual(self.sample_data.provider_pool1, provider_pool) def test_db_pools_to_provider_pools(self): - provider_pools = utils.db_pools_to_provider_pools(self.test_db_pools) - self.assertEqual(self.provider_pools, provider_pools) + provider_pools = utils.db_pools_to_provider_pools( + self.sample_data.test_db_pools) + self.assertEqual(self.sample_data.provider_pools, provider_pools) def test_pool_dict_to_provider_dict(self): provider_pool_dict = utils.pool_dict_to_provider_dict( - self.test_pool1_dict) - self.assertEqual(self.provider_pool1_dict, provider_pool_dict) + self.sample_data.test_pool1_dict) + self.assertEqual(self.sample_data.provider_pool1_dict, + provider_pool_dict) def test_db_HM_to_provider_HM(self): - provider_hm = utils.db_HM_to_provider_HM(self.db_hm1) - self.assertEqual(self.provider_hm1, provider_hm) + provider_hm = utils.db_HM_to_provider_HM(self.sample_data.db_hm1) + self.assertEqual(self.sample_data.provider_hm1, provider_hm) def test_hm_dict_to_provider_dict(self): - provider_hm_dict = utils.hm_dict_to_provider_dict(self.test_hm1_dict) - self.assertEqual(self.provider_hm1_dict, provider_hm_dict) + provider_hm_dict = utils.hm_dict_to_provider_dict( + self.sample_data.test_hm1_dict) + self.assertEqual(self.sample_data.provider_hm1_dict, provider_hm_dict) + + def test_hm_dict_to_provider_dict_partial(self): + provider_hm_dict = utils.hm_dict_to_provider_dict({'id': 1}) + self.assertEqual({'healthmonitor_id': 1}, provider_hm_dict) def test_db_members_to_provider_members(self): provider_members = utils.db_members_to_provider_members( - self.db_pool1_members) - self.assertEqual(self.provider_pool1_members, provider_members) + self.sample_data.db_pool1_members) + self.assertEqual(self.sample_data.provider_pool1_members, + provider_members) def test_member_dict_to_provider_dict(self): provider_member_dict = utils.member_dict_to_provider_dict( - self.test_member1_dict) - self.assertEqual(self.provider_member1_dict, provider_member_dict) + self.sample_data.test_member1_dict) + self.assertEqual(self.sample_data.provider_member1_dict, + provider_member_dict) def test_db_l7policies_to_provider_l7policies(self): provider_rules = utils.db_l7policies_to_provider_l7policies( - self.db_l7policies) - self.assertEqual(self.provider_l7policies, provider_rules) + self.sample_data.db_l7policies) + self.assertEqual(self.sample_data.provider_l7policies, provider_rules) def test_l7policy_dict_to_provider_dict(self): provider_l7policy_dict = utils.l7policy_dict_to_provider_dict( - self.test_l7policy1_dict) - self.assertEqual(self.provider_l7policy1_dict, provider_l7policy_dict) + self.sample_data.test_l7policy1_dict) + self.assertEqual(self.sample_data.provider_l7policy1_dict, + provider_l7policy_dict) def test_db_l7rules_to_provider_l7rules(self): - provider_rules = utils.db_l7rules_to_provider_l7rules(self.db_l7Rules) - self.assertEqual(self.provider_rules, provider_rules) + provider_rules = utils.db_l7rules_to_provider_l7rules( + self.sample_data.db_l7Rules) + self.assertEqual(self.sample_data.provider_rules, provider_rules) def test_l7rule_dict_to_provider_dict(self): provider_rules_dict = utils.l7rule_dict_to_provider_dict( - self.test_l7rule1_dict) - self.assertEqual(self.provider_l7rule1_dict, provider_rules_dict) + self.sample_data.test_l7rule1_dict) + self.assertEqual(self.sample_data.provider_l7rule1_dict, + provider_rules_dict) def test_vip_dict_to_provider_dict(self): - test_vip_dict = {'ip_address': self.ip_address, - 'network_id': self.network_id, - 'port_id': self.port_id, - 'subnet_id': self.subnet_id, - 'qos_policy_id': self.qos_policy_id} + new_vip_dict = utils.vip_dict_to_provider_dict( + self.sample_data.test_vip_dict) + self.assertEqual(self.sample_data.provider_vip_dict, new_vip_dict) - provider_vip_dict = {'vip_address': self.ip_address, - 'vip_network_id': self.network_id, - 'vip_port_id': self.port_id, - 'vip_subnet_id': self.subnet_id, - 'vip_qos_policy_id': self.qos_policy_id} - - new_vip_dict = utils.vip_dict_to_provider_dict(test_vip_dict) - self.assertEqual(provider_vip_dict, new_vip_dict) + def test_vip_dict_to_provider_dict_partial(self): + new_vip_dict = utils.vip_dict_to_provider_dict( + {'ip_address': '192.0.2.44'}) + self.assertEqual({'vip_address': '192.0.2.44'}, new_vip_dict) def test_provider_vip_dict_to_vip_obj(self): - provider_vip_dict = {'vip_address': self.ip_address, - 'vip_network_id': self.network_id, - 'vip_port_id': self.port_id, - 'vip_subnet_id': self.subnet_id, - 'vip_qos_policy_id': self.qos_policy_id} - - ref_vip = data_models.Vip(ip_address=self.ip_address, - network_id=self.network_id, - port_id=self.port_id, - subnet_id=self.subnet_id, - qos_policy_id=self.qos_policy_id) - new_provider_vip = utils.provider_vip_dict_to_vip_obj( - provider_vip_dict) - self.assertEqual(ref_vip, new_provider_vip) + self.sample_data.provider_vip_dict) + self.assertEqual(self.sample_data.db_vip, new_provider_vip) diff --git a/releasenotes/notes/add_provider_driver_support-7523f130dd5025af.yaml b/releasenotes/notes/add_provider_driver_support-7523f130dd5025af.yaml new file mode 100644 index 0000000000..50efedb09e --- /dev/null +++ b/releasenotes/notes/add_provider_driver_support-7523f130dd5025af.yaml @@ -0,0 +1,44 @@ +--- +features: + - | + Octavia now supports provider drivers. This allows third party load + balancing drivers to be integrated with the Octavia v2 API. Users select + the "provider" for a load balancer at creation time. + - | + There is now an API available to list enabled provider drivers. +upgrade: + - | + Two new options are included with provider driver support. The + enabled_provider_drivers option defaults to "amphora, octavia" to support + existing Octavia load balancers. The default_provider_driver option + defaults to "amphora" for all new load balancers that do not specify a + provider at creation time. These defaults should cover most existing + deployments. + - | + The provider driver support requires a database migration and follows + Octavia standard rolling upgrade procedures; database migration followed + by rolling control plane upgrades. Existing load balancers with no + provider specified will be assigned "amphora" as part of the database + migration. +deprecations: + - | + The Octavia API handlers are now deprecated and replaced by the new + provider driver support. Octavia API handlers will remain in the code to + support the Octavia v1 API (used for neutron-lbaas). + - | + Provider of "octavia" has been deprecated in favor of "amphora" to clarify + the provider driver supporting the load balancer. +other: + - | + A provider driver developer guide has been added to the documentation to + aid driver providers. + - | + An operator documentation page has been added to list known Octavia + provider drivers and provide links to those drivers. + Non-reference drivers, drivers other than the "amphora" driver, will be + outside of the octavia code repository but are dynamically loadable via + a well defined interface described in the provider driver developers + guide. + - | + Installed drivers need to be enabled for use in the Octavia + configuration file once you are ready to expose the driver to users.