From 0ab16921ae3a0380dd18b6bd6e7f0d72511b80d0 Mon Sep 17 00:00:00 2001 From: Michael Johnson Date: Thu, 16 May 2019 16:30:05 -0700 Subject: [PATCH] Create Amphora V2 provider driver This patch creates an Amphora v2 provider driver as well as a V2 controller worker. This is in preparation for having the amphora driver use the new provider driver data models and rely less on native Octavia database access. It is also a prepartion step for enabling TaskFlow JobBoard as this work will move to storing dictionaries in the flows instead of database models. Change-Id: Ia65539a8c39560e2276750d8e79a637be4c0f265 Story: 2005072 Task: 30806 --- .../drivers/amphora_driver/v1}/__init__.py | 0 .../drivers/amphora_driver/{ => v1}/driver.py | 0 .../drivers/amphora_driver/v2}/__init__.py | 0 .../api/drivers/amphora_driver/v2/driver.py | 344 +++ octavia/api/v2/controllers/listener.py | 4 +- octavia/cmd/octavia_worker.py | 7 +- octavia/common/constants.py | 2 + .../healthmanager/health_manager.py | 2 +- .../controller/housekeeping/house_keeping.py | 2 +- .../flows => controller/queue/v1}/__init__.py | 0 octavia/controller/queue/{ => v1}/consumer.py | 4 +- .../queue/{endpoint.py => v1/endpoints.py} | 2 +- .../tasks => controller/queue/v2}/__init__.py | 0 octavia/controller/queue/v2/consumer.py | 65 + octavia/controller/queue/v2/endpoints.py | 156 + octavia/controller/worker/v1/__init__.py | 11 + .../worker/{ => v1}/controller_worker.py | 16 +- .../controller/worker/v1/flows/__init__.py | 11 + .../worker/{ => v1}/flows/amphora_flows.py | 12 +- .../{ => v1}/flows/health_monitor_flows.py | 8 +- .../worker/{ => v1}/flows/l7policy_flows.py | 8 +- .../worker/{ => v1}/flows/l7rule_flows.py | 8 +- .../worker/{ => v1}/flows/listener_flows.py | 8 +- .../{ => v1}/flows/load_balancer_flows.py | 18 +- .../worker/{ => v1}/flows/member_flows.py | 10 +- .../worker/{ => v1}/flows/pool_flows.py | 8 +- .../controller/worker/v1/tasks/__init__.py | 11 + .../{ => v1}/tasks/amphora_driver_tasks.py | 0 .../worker/{ => v1}/tasks/cert_task.py | 0 .../worker/{ => v1}/tasks/compute_tasks.py | 0 .../worker/{ => v1}/tasks/database_tasks.py | 0 .../worker/{ => v1}/tasks/lifecycle_tasks.py | 0 .../worker/{ => v1}/tasks/model_tasks.py | 0 .../worker/{ => v1}/tasks/network_tasks.py | 0 octavia/controller/worker/v2/__init__.py | 11 + .../controller/worker/v2/controller_worker.py | 964 ++++++ .../controller/worker/v2/flows/__init__.py | 11 + .../worker/v2/flows/amphora_flows.py | 599 ++++ .../worker/v2/flows/health_monitor_flows.py | 105 + .../worker/v2/flows/l7policy_flows.py | 92 + .../worker/v2/flows/l7rule_flows.py | 98 + .../worker/v2/flows/listener_flows.py | 126 + .../worker/v2/flows/load_balancer_flows.py | 341 +++ .../worker/v2/flows/member_flows.py | 209 ++ .../controller/worker/v2/flows/pool_flows.py | 127 + .../controller/worker/v2/tasks/__init__.py | 11 + .../worker/v2/tasks/amphora_driver_tasks.py | 397 +++ .../controller/worker/v2/tasks/cert_task.py | 51 + .../worker/v2/tasks/compute_tasks.py | 251 ++ .../worker/v2/tasks/database_tasks.py | 2707 ++++++++++++++++ .../worker/v2/tasks/lifecycle_tasks.py | 173 ++ .../controller/worker/v2/tasks/model_tasks.py | 41 + .../worker/v2/tasks/network_tasks.py | 659 ++++ .../api/drivers/amphora_driver/v1/__init__.py | 11 + .../{ => v1}/test_amphora_driver.py | 2 +- .../api/drivers/amphora_driver/v2/__init__.py | 11 + .../amphora_driver/v2/test_amphora_driver.py | 486 +++ .../healthmanager/test_health_manager.py | 6 +- .../housekeeping/test_house_keeping.py | 6 +- .../unit/controller/queue/v1/__init__.py | 11 + .../queue/{ => v1}/test_consumer.py | 6 +- .../test_endpoints.py} | 14 +- .../unit/controller/queue/v2/__init__.py | 11 + .../unit/controller/queue/v2/test_consumer.py | 72 + .../controller/queue/v2/test_endpoints.py | 182 ++ .../unit/controller/worker/v1/__init__.py | 11 + .../controller/worker/v1/flows/__init__.py | 11 + .../{ => v1}/flows/test_amphora_flows.py | 2 +- .../flows/test_health_monitor_flows.py | 2 +- .../{ => v1}/flows/test_l7policy_flows.py | 2 +- .../{ => v1}/flows/test_l7rule_flows.py | 2 +- .../{ => v1}/flows/test_listener_flows.py | 2 +- .../flows/test_load_balancer_flows.py | 2 +- .../{ => v1}/flows/test_member_flows.py | 2 +- .../worker/{ => v1}/flows/test_pool_flows.py | 2 +- .../controller/worker/v1/tasks/__init__.py | 11 + .../tasks/test_amphora_driver_tasks.py | 4 +- .../worker/{ => v1}/tasks/test_cert_task.py | 2 +- .../{ => v1}/tasks/test_compute_tasks.py | 2 +- .../{ => v1}/tasks/test_database_tasks.py | 4 +- .../tasks/test_database_tasks_quota.py | 2 +- .../{ => v1}/tasks/test_lifecycle_tasks.py | 2 +- .../worker/{ => v1}/tasks/test_model_tasks.py | 2 +- .../{ => v1}/tasks/test_network_tasks.py | 2 +- .../worker/{ => v1}/test_controller_worker.py | 84 +- .../unit/controller/worker/v2/__init__.py | 11 + .../controller/worker/v2/flows/__init__.py | 11 + .../worker/v2/flows/test_amphora_flows.py | 422 +++ .../v2/flows/test_health_monitor_flows.py | 72 + .../worker/v2/flows/test_l7policy_flows.py | 67 + .../worker/v2/flows/test_l7rule_flows.py | 67 + .../worker/v2/flows/test_listener_flows.py | 90 + .../v2/flows/test_load_balancer_flows.py | 227 ++ .../worker/v2/flows/test_member_flows.py | 88 + .../worker/v2/flows/test_pool_flows.py | 77 + .../controller/worker/v2/tasks/__init__.py | 11 + .../v2/tasks/test_amphora_driver_tasks.py | 672 ++++ .../worker/v2/tasks/test_cert_task.py | 47 + .../worker/v2/tasks/test_compute_tasks.py | 466 +++ .../worker/v2/tasks/test_database_tasks.py | 2727 +++++++++++++++++ .../v2/tasks/test_database_tasks_quota.py | 322 ++ .../worker/v2/tasks/test_lifecycle_tasks.py | 401 +++ .../worker/v2/tasks/test_model_tasks.py | 44 + .../worker/v2/tasks/test_network_tasks.py | 801 +++++ .../worker/v2/test_controller_worker.py | 1465 +++++++++ setup.cfg | 7 +- tools/flow-list.txt | 54 +- zuul.d/jobs.yaml | 16 + zuul.d/projects.yaml | 8 + 109 files changed, 16671 insertions(+), 163 deletions(-) rename octavia/{controller/worker/flows => api/drivers/amphora_driver/v1}/__init__.py (100%) rename octavia/api/drivers/amphora_driver/{ => v1}/driver.py (100%) rename octavia/{controller/worker/tasks => api/drivers/amphora_driver/v2}/__init__.py (100%) create mode 100644 octavia/api/drivers/amphora_driver/v2/driver.py rename octavia/{tests/unit/controller/worker/flows => controller/queue/v1}/__init__.py (100%) rename octavia/controller/queue/{ => v1}/consumer.py (95%) rename octavia/controller/queue/{endpoint.py => v1/endpoints.py} (99%) rename octavia/{tests/unit/controller/worker/tasks => controller/queue/v2}/__init__.py (100%) create mode 100644 octavia/controller/queue/v2/consumer.py create mode 100644 octavia/controller/queue/v2/endpoints.py create mode 100644 octavia/controller/worker/v1/__init__.py rename octavia/controller/worker/{ => v1}/controller_worker.py (98%) create mode 100644 octavia/controller/worker/v1/flows/__init__.py rename octavia/controller/worker/{ => v1}/flows/amphora_flows.py (98%) rename octavia/controller/worker/{ => v1}/flows/health_monitor_flows.py (94%) rename octavia/controller/worker/{ => v1}/flows/l7policy_flows.py (94%) rename octavia/controller/worker/{ => v1}/flows/l7rule_flows.py (94%) rename octavia/controller/worker/{ => v1}/flows/listener_flows.py (95%) rename octavia/controller/worker/{ => v1}/flows/load_balancer_flows.py (96%) rename octavia/controller/worker/{ => v1}/flows/member_flows.py (96%) rename octavia/controller/worker/{ => v1}/flows/pool_flows.py (95%) create mode 100644 octavia/controller/worker/v1/tasks/__init__.py rename octavia/controller/worker/{ => v1}/tasks/amphora_driver_tasks.py (100%) rename octavia/controller/worker/{ => v1}/tasks/cert_task.py (100%) rename octavia/controller/worker/{ => v1}/tasks/compute_tasks.py (100%) rename octavia/controller/worker/{ => v1}/tasks/database_tasks.py (100%) rename octavia/controller/worker/{ => v1}/tasks/lifecycle_tasks.py (100%) rename octavia/controller/worker/{ => v1}/tasks/model_tasks.py (100%) rename octavia/controller/worker/{ => v1}/tasks/network_tasks.py (100%) create mode 100644 octavia/controller/worker/v2/__init__.py create mode 100644 octavia/controller/worker/v2/controller_worker.py create mode 100644 octavia/controller/worker/v2/flows/__init__.py create mode 100644 octavia/controller/worker/v2/flows/amphora_flows.py create mode 100644 octavia/controller/worker/v2/flows/health_monitor_flows.py create mode 100644 octavia/controller/worker/v2/flows/l7policy_flows.py create mode 100644 octavia/controller/worker/v2/flows/l7rule_flows.py create mode 100644 octavia/controller/worker/v2/flows/listener_flows.py create mode 100644 octavia/controller/worker/v2/flows/load_balancer_flows.py create mode 100644 octavia/controller/worker/v2/flows/member_flows.py create mode 100644 octavia/controller/worker/v2/flows/pool_flows.py create mode 100644 octavia/controller/worker/v2/tasks/__init__.py create mode 100644 octavia/controller/worker/v2/tasks/amphora_driver_tasks.py create mode 100644 octavia/controller/worker/v2/tasks/cert_task.py create mode 100644 octavia/controller/worker/v2/tasks/compute_tasks.py create mode 100644 octavia/controller/worker/v2/tasks/database_tasks.py create mode 100644 octavia/controller/worker/v2/tasks/lifecycle_tasks.py create mode 100644 octavia/controller/worker/v2/tasks/model_tasks.py create mode 100644 octavia/controller/worker/v2/tasks/network_tasks.py create mode 100644 octavia/tests/unit/api/drivers/amphora_driver/v1/__init__.py rename octavia/tests/unit/api/drivers/amphora_driver/{ => v1}/test_amphora_driver.py (99%) create mode 100644 octavia/tests/unit/api/drivers/amphora_driver/v2/__init__.py create mode 100644 octavia/tests/unit/api/drivers/amphora_driver/v2/test_amphora_driver.py create mode 100644 octavia/tests/unit/controller/queue/v1/__init__.py rename octavia/tests/unit/controller/queue/{ => v1}/test_consumer.py (94%) rename octavia/tests/unit/controller/queue/{test_endpoint.py => v1/test_endpoints.py} (95%) create mode 100644 octavia/tests/unit/controller/queue/v2/__init__.py create mode 100644 octavia/tests/unit/controller/queue/v2/test_consumer.py create mode 100644 octavia/tests/unit/controller/queue/v2/test_endpoints.py create mode 100644 octavia/tests/unit/controller/worker/v1/__init__.py create mode 100644 octavia/tests/unit/controller/worker/v1/flows/__init__.py rename octavia/tests/unit/controller/worker/{ => v1}/flows/test_amphora_flows.py (99%) rename octavia/tests/unit/controller/worker/{ => v1}/flows/test_health_monitor_flows.py (97%) rename octavia/tests/unit/controller/worker/{ => v1}/flows/test_l7policy_flows.py (97%) rename octavia/tests/unit/controller/worker/{ => v1}/flows/test_l7rule_flows.py (97%) rename octavia/tests/unit/controller/worker/{ => v1}/flows/test_listener_flows.py (98%) rename octavia/tests/unit/controller/worker/{ => v1}/flows/test_load_balancer_flows.py (99%) rename octavia/tests/unit/controller/worker/{ => v1}/flows/test_member_flows.py (98%) rename octavia/tests/unit/controller/worker/{ => v1}/flows/test_pool_flows.py (97%) create mode 100644 octavia/tests/unit/controller/worker/v1/tasks/__init__.py rename octavia/tests/unit/controller/worker/{ => v1}/tasks/test_amphora_driver_tasks.py (99%) rename octavia/tests/unit/controller/worker/{ => v1}/tasks/test_cert_task.py (96%) rename octavia/tests/unit/controller/worker/{ => v1}/tasks/test_compute_tasks.py (99%) rename octavia/tests/unit/controller/worker/{ => v1}/tasks/test_database_tasks.py (99%) rename octavia/tests/unit/controller/worker/{ => v1}/tasks/test_database_tasks_quota.py (99%) rename octavia/tests/unit/controller/worker/{ => v1}/tasks/test_lifecycle_tasks.py (99%) rename octavia/tests/unit/controller/worker/{ => v1}/tasks/test_model_tasks.py (95%) rename octavia/tests/unit/controller/worker/{ => v1}/tasks/test_network_tasks.py (99%) rename octavia/tests/unit/controller/worker/{ => v1}/test_controller_worker.py (96%) create mode 100644 octavia/tests/unit/controller/worker/v2/__init__.py create mode 100644 octavia/tests/unit/controller/worker/v2/flows/__init__.py create mode 100644 octavia/tests/unit/controller/worker/v2/flows/test_amphora_flows.py create mode 100644 octavia/tests/unit/controller/worker/v2/flows/test_health_monitor_flows.py create mode 100644 octavia/tests/unit/controller/worker/v2/flows/test_l7policy_flows.py create mode 100644 octavia/tests/unit/controller/worker/v2/flows/test_l7rule_flows.py create mode 100644 octavia/tests/unit/controller/worker/v2/flows/test_listener_flows.py create mode 100644 octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py create mode 100644 octavia/tests/unit/controller/worker/v2/flows/test_member_flows.py create mode 100644 octavia/tests/unit/controller/worker/v2/flows/test_pool_flows.py create mode 100644 octavia/tests/unit/controller/worker/v2/tasks/__init__.py create mode 100644 octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py create mode 100644 octavia/tests/unit/controller/worker/v2/tasks/test_cert_task.py create mode 100644 octavia/tests/unit/controller/worker/v2/tasks/test_compute_tasks.py create mode 100644 octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py create mode 100644 octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks_quota.py create mode 100644 octavia/tests/unit/controller/worker/v2/tasks/test_lifecycle_tasks.py create mode 100644 octavia/tests/unit/controller/worker/v2/tasks/test_model_tasks.py create mode 100644 octavia/tests/unit/controller/worker/v2/tasks/test_network_tasks.py create mode 100644 octavia/tests/unit/controller/worker/v2/test_controller_worker.py diff --git a/octavia/controller/worker/flows/__init__.py b/octavia/api/drivers/amphora_driver/v1/__init__.py similarity index 100% rename from octavia/controller/worker/flows/__init__.py rename to octavia/api/drivers/amphora_driver/v1/__init__.py diff --git a/octavia/api/drivers/amphora_driver/driver.py b/octavia/api/drivers/amphora_driver/v1/driver.py similarity index 100% rename from octavia/api/drivers/amphora_driver/driver.py rename to octavia/api/drivers/amphora_driver/v1/driver.py diff --git a/octavia/controller/worker/tasks/__init__.py b/octavia/api/drivers/amphora_driver/v2/__init__.py similarity index 100% rename from octavia/controller/worker/tasks/__init__.py rename to octavia/api/drivers/amphora_driver/v2/__init__.py diff --git a/octavia/api/drivers/amphora_driver/v2/driver.py b/octavia/api/drivers/amphora_driver/v2/driver.py new file mode 100644 index 0000000000..a1fdcd9d41 --- /dev/null +++ b/octavia/api/drivers/amphora_driver/v2/driver.py @@ -0,0 +1,344 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from jsonschema import exceptions as js_exceptions +from jsonschema import validate + +from oslo_config import cfg +from oslo_log import log as logging +import oslo_messaging as messaging +from stevedore import driver as stevedore_driver + +from octavia_lib.api.drivers import data_models as driver_dm +from octavia_lib.api.drivers import exceptions +from octavia_lib.api.drivers import provider_base as driver_base + +from octavia.api.drivers.amphora_driver import flavor_schema +from octavia.api.drivers import utils as driver_utils +from octavia.common import constants as consts +from octavia.common import data_models +from octavia.common import rpc +from octavia.common import utils +from octavia.db import api as db_apis +from octavia.db import repositories +from octavia.network import base as network_base + +CONF = cfg.CONF +CONF.import_group('oslo_messaging', 'octavia.common.config') +LOG = logging.getLogger(__name__) + + +class AmphoraProviderDriver(driver_base.ProviderDriver): + def __init__(self): + super(AmphoraProviderDriver, self).__init__() + self.target = messaging.Target( + namespace=consts.RPC_NAMESPACE_CONTROLLER_AGENT, + topic=consts.TOPIC_AMPHORA_V2, version="2.0", fanout=False) + self.client = rpc.get_client(self.target) + self.repositories = repositories.Repositories() + + # Load Balancer + def create_vip_port(self, loadbalancer_id, project_id, vip_dictionary): + vip_obj = driver_utils.provider_vip_dict_to_vip_obj(vip_dictionary) + lb_obj = data_models.LoadBalancer(id=loadbalancer_id, + project_id=project_id, vip=vip_obj) + + network_driver = utils.get_network_driver() + try: + vip = network_driver.allocate_vip(lb_obj) + except network_base.AllocateVIPException as e: + raise exceptions.DriverError(user_fault_string=e.orig_msg, + operator_fault_string=e.orig_msg) + + LOG.info('Amphora provider created VIP port %s for load balancer %s.', + vip.port_id, loadbalancer_id) + return driver_utils.vip_dict_to_provider_dict(vip.to_dict()) + + # TODO(johnsom) convert this to octavia_lib constant flavor + # once octavia is transitioned to use octavia_lib + def loadbalancer_create(self, loadbalancer): + if loadbalancer.flavor == driver_dm.Unset: + loadbalancer.flavor = None + payload = {consts.LOAD_BALANCER_ID: loadbalancer.loadbalancer_id, + consts.FLAVOR: loadbalancer.flavor} + self.client.cast({}, 'create_load_balancer', **payload) + + def loadbalancer_delete(self, loadbalancer, cascade=False): + loadbalancer_id = loadbalancer.loadbalancer_id + payload = {consts.LOAD_BALANCER_ID: loadbalancer_id, + 'cascade': cascade} + self.client.cast({}, 'delete_load_balancer', **payload) + + def loadbalancer_failover(self, loadbalancer_id): + payload = {consts.LOAD_BALANCER_ID: loadbalancer_id} + self.client.cast({}, 'failover_load_balancer', **payload) + + def loadbalancer_update(self, old_loadbalancer, new_loadbalancer): + # Adapt the provider data model to the queue schema + lb_dict = new_loadbalancer.to_dict() + if 'admin_state_up' in lb_dict: + lb_dict['enabled'] = lb_dict.pop('admin_state_up') + lb_id = lb_dict.pop('loadbalancer_id') + # Put the qos_policy_id back under the vip element the controller + # expects + vip_qos_policy_id = lb_dict.pop('vip_qos_policy_id', None) + if vip_qos_policy_id: + vip_dict = {"qos_policy_id": vip_qos_policy_id} + lb_dict["vip"] = vip_dict + + payload = {consts.LOAD_BALANCER_ID: lb_id, + consts.LOAD_BALANCER_UPDATES: lb_dict} + self.client.cast({}, 'update_load_balancer', **payload) + + # Listener + def listener_create(self, listener): + payload = {consts.LISTENER_ID: listener.listener_id} + self.client.cast({}, 'create_listener', **payload) + + def listener_delete(self, listener): + listener_id = listener.listener_id + payload = {consts.LISTENER_ID: listener_id} + self.client.cast({}, 'delete_listener', **payload) + + def listener_update(self, old_listener, new_listener): + listener_dict = new_listener.to_dict() + if 'admin_state_up' in listener_dict: + listener_dict['enabled'] = listener_dict.pop('admin_state_up') + listener_id = listener_dict.pop('listener_id') + if 'client_ca_tls_container_ref' in listener_dict: + listener_dict['client_ca_tls_container_id'] = listener_dict.pop( + 'client_ca_tls_container_ref') + listener_dict.pop('client_ca_tls_container_data', None) + if 'client_crl_container_ref' in listener_dict: + listener_dict['client_crl_container_id'] = listener_dict.pop( + 'client_crl_container_ref') + listener_dict.pop('client_crl_container_data', None) + + payload = {consts.LISTENER_ID: listener_id, + consts.LISTENER_UPDATES: listener_dict} + self.client.cast({}, 'update_listener', **payload) + + # Pool + def pool_create(self, pool): + payload = {consts.POOL_ID: pool.pool_id} + self.client.cast({}, 'create_pool', **payload) + + def pool_delete(self, pool): + pool_id = pool.pool_id + payload = {consts.POOL_ID: pool_id} + self.client.cast({}, 'delete_pool', **payload) + + def pool_update(self, old_pool, new_pool): + pool_dict = new_pool.to_dict() + if 'admin_state_up' in pool_dict: + pool_dict['enabled'] = pool_dict.pop('admin_state_up') + pool_id = pool_dict.pop('pool_id') + if 'tls_container_ref' in pool_dict: + pool_dict['tls_container_id'] = pool_dict.pop('tls_container_ref') + pool_dict.pop('tls_container_data', None) + if 'ca_tls_container_ref' in pool_dict: + pool_dict['ca_tls_certificate_id'] = pool_dict.pop( + 'ca_tls_container_ref') + pool_dict.pop('ca_tls_container_data', None) + if 'client_crl_container_ref' in pool_dict: + pool_dict['client_crl_container_id'] = pool_dict.pop( + 'client_crl_container_ref') + pool_dict.pop('client_crl_container_data', None) + + payload = {consts.POOL_ID: pool_id, + consts.POOL_UPDATES: pool_dict} + self.client.cast({}, 'update_pool', **payload) + + # Member + def member_create(self, member): + payload = {consts.MEMBER_ID: member.member_id} + self.client.cast({}, 'create_member', **payload) + + def member_delete(self, member): + member_id = member.member_id + payload = {consts.MEMBER_ID: member_id} + self.client.cast({}, 'delete_member', **payload) + + def member_update(self, old_member, new_member): + member_dict = new_member.to_dict() + if 'admin_state_up' in member_dict: + member_dict['enabled'] = member_dict.pop('admin_state_up') + member_id = member_dict.pop('member_id') + + payload = {consts.MEMBER_ID: member_id, + consts.MEMBER_UPDATES: member_dict} + self.client.cast({}, 'update_member', **payload) + + def member_batch_update(self, members): + # Get a list of existing members + pool_id = members[0].pool_id + # The DB should not have updated yet, so we can still use the pool + db_pool = self.repositories.pool.get(db_apis.get_session(), id=pool_id) + old_members = db_pool.members + + old_member_ids = [m.id for m in old_members] + # The driver will always pass objects with IDs. + new_member_ids = [m.member_id for m in members] + + # Find members that are brand new or updated + new_members = [] + updated_members = [] + for m in members: + if m.member_id not in old_member_ids: + new_members.append(m) + else: + member_dict = m.to_dict(render_unsets=False) + member_dict['id'] = member_dict.pop('member_id') + if 'address' in member_dict: + member_dict['ip_address'] = member_dict.pop('address') + if 'admin_state_up' in member_dict: + member_dict['enabled'] = member_dict.pop('admin_state_up') + updated_members.append(member_dict) + + # Find members that are deleted + deleted_members = [] + for m in old_members: + if m.id not in new_member_ids: + deleted_members.append(m) + + payload = {'old_member_ids': [m.id for m in deleted_members], + 'new_member_ids': [m.member_id for m in new_members], + 'updated_members': updated_members} + self.client.cast({}, 'batch_update_members', **payload) + + # Health Monitor + def health_monitor_create(self, healthmonitor): + payload = {consts.HEALTH_MONITOR_ID: healthmonitor.healthmonitor_id} + self.client.cast({}, 'create_health_monitor', **payload) + + def health_monitor_delete(self, healthmonitor): + healthmonitor_id = healthmonitor.healthmonitor_id + payload = {consts.HEALTH_MONITOR_ID: healthmonitor_id} + self.client.cast({}, 'delete_health_monitor', **payload) + + def health_monitor_update(self, old_healthmonitor, new_healthmonitor): + healthmon_dict = new_healthmonitor.to_dict() + if 'admin_state_up' in healthmon_dict: + healthmon_dict['enabled'] = healthmon_dict.pop('admin_state_up') + if 'max_retries_down' in healthmon_dict: + healthmon_dict['fall_threshold'] = healthmon_dict.pop( + 'max_retries_down') + if 'max_retries' in healthmon_dict: + healthmon_dict['rise_threshold'] = healthmon_dict.pop( + 'max_retries') + healthmon_id = healthmon_dict.pop('healthmonitor_id') + + payload = {consts.HEALTH_MONITOR_ID: healthmon_id, + consts.HEALTH_MONITOR_UPDATES: healthmon_dict} + self.client.cast({}, 'update_health_monitor', **payload) + + # L7 Policy + def l7policy_create(self, l7policy): + payload = {consts.L7POLICY_ID: l7policy.l7policy_id} + self.client.cast({}, 'create_l7policy', **payload) + + def l7policy_delete(self, l7policy): + l7policy_id = l7policy.l7policy_id + payload = {consts.L7POLICY_ID: l7policy_id} + self.client.cast({}, 'delete_l7policy', **payload) + + def l7policy_update(self, old_l7policy, new_l7policy): + l7policy_dict = new_l7policy.to_dict() + if 'admin_state_up' in l7policy_dict: + l7policy_dict['enabled'] = l7policy_dict.pop('admin_state_up') + l7policy_id = l7policy_dict.pop('l7policy_id') + + payload = {consts.L7POLICY_ID: l7policy_id, + consts.L7POLICY_UPDATES: l7policy_dict} + self.client.cast({}, 'update_l7policy', **payload) + + # L7 Rule + def l7rule_create(self, l7rule): + payload = {consts.L7RULE_ID: l7rule.l7rule_id} + self.client.cast({}, 'create_l7rule', **payload) + + def l7rule_delete(self, l7rule): + l7rule_id = l7rule.l7rule_id + payload = {consts.L7RULE_ID: l7rule_id} + self.client.cast({}, 'delete_l7rule', **payload) + + def l7rule_update(self, old_l7rule, new_l7rule): + l7rule_dict = new_l7rule.to_dict() + if 'admin_state_up' in l7rule_dict: + l7rule_dict['enabled'] = l7rule_dict.pop('admin_state_up') + l7rule_id = l7rule_dict.pop('l7rule_id') + + payload = {consts.L7RULE_ID: l7rule_id, + consts.L7RULE_UPDATES: l7rule_dict} + self.client.cast({}, 'update_l7rule', **payload) + + # Flavor + def get_supported_flavor_metadata(self): + """Returns the valid flavor metadata keys and descriptions. + + This extracts the valid flavor metadata keys and descriptions + from the JSON validation schema and returns it as a dictionary. + + :return: Dictionary of flavor metadata keys and descriptions. + :raises DriverError: An unexpected error occurred. + """ + try: + props = flavor_schema.SUPPORTED_FLAVOR_SCHEMA['properties'] + return {k: v.get('description', '') for k, v in props.items()} + except Exception as e: + raise exceptions.DriverError( + user_fault_string='Failed to get the supported flavor ' + 'metadata due to: {}'.format(str(e)), + operator_fault_string='Failed to get the supported flavor ' + 'metadata due to: {}'.format(str(e))) + + def validate_flavor(self, flavor_dict): + """Validates flavor profile data. + + This will validate a flavor profile dataset against the flavor + settings the amphora driver supports. + + :param flavor_dict: The flavor dictionary to validate. + :type flavor: dict + :return: None + :raises DriverError: An unexpected error occurred. + :raises UnsupportedOptionError: If the driver does not support + one of the flavor settings. + """ + try: + validate(flavor_dict, flavor_schema.SUPPORTED_FLAVOR_SCHEMA) + except js_exceptions.ValidationError as e: + error_object = '' + if e.relative_path: + error_object = '{} '.format(e.relative_path[0]) + raise exceptions.UnsupportedOptionError( + user_fault_string='{0}{1}'.format(error_object, e.message), + operator_fault_string=str(e)) + except Exception as e: + raise exceptions.DriverError( + user_fault_string='Failed to validate the flavor metadata ' + 'due to: {}'.format(str(e)), + operator_fault_string='Failed to validate the flavor metadata ' + 'due to: {}'.format(str(e))) + compute_flavor = flavor_dict.get(consts.COMPUTE_FLAVOR, None) + if compute_flavor: + compute_driver = stevedore_driver.DriverManager( + namespace='octavia.compute.drivers', + name=CONF.controller_worker.compute_driver, + invoke_on_load=True + ).driver + + # TODO(johnsom) Fix this to raise a NotFound error + # when the octavia-lib supports it. + compute_driver.validate_flavor(compute_flavor) diff --git a/octavia/api/v2/controllers/listener.py b/octavia/api/v2/controllers/listener.py index cfad3c75ff..ea8763e648 100644 --- a/octavia/api/v2/controllers/listener.py +++ b/octavia/api/v2/controllers/listener.py @@ -301,7 +301,9 @@ class ListenersController(base.BaseController): # re-inject the sni container references lost due to SNI # being a separate table in the DB - provider_listener.sni_container_refs = listener.sni_container_refs + if listener.sni_container_refs != wtypes.Unset: + provider_listener.sni_container_refs = ( + listener.sni_container_refs) # Dispatch to the driver LOG.info("Sending create Listener %s to provider %s", diff --git a/octavia/cmd/octavia_worker.py b/octavia/cmd/octavia_worker.py index 70ce26d14a..03ec52efd1 100644 --- a/octavia/cmd/octavia_worker.py +++ b/octavia/cmd/octavia_worker.py @@ -20,7 +20,8 @@ from oslo_config import cfg from oslo_reports import guru_meditation_report as gmr from octavia.common import service as octavia_service -from octavia.controller.queue import consumer +from octavia.controller.queue.v1 import consumer as consumer_v1 +from octavia.controller.queue.v2 import consumer as consumer_v2 from octavia import version CONF = cfg.CONF @@ -32,7 +33,9 @@ def main(): gmr.TextGuruMeditation.setup_autorun(version) sm = cotyledon.ServiceManager() - sm.add(consumer.ConsumerService, workers=CONF.controller_worker.workers, + sm.add(consumer_v1.ConsumerService, workers=CONF.controller_worker.workers, args=(CONF,)) + sm.add(consumer_v2.ConsumerService, + workers=CONF.controller_worker.workers, args=(CONF,)) oslo_config_glue.setup(sm, CONF, reload_method="mutate") sm.run() diff --git a/octavia/common/constants.py b/octavia/common/constants.py index f86480496e..917ddcea86 100644 --- a/octavia/common/constants.py +++ b/octavia/common/constants.py @@ -656,3 +656,5 @@ CLIENT_AUTH_OPTIONAL = 'OPTIONAL' CLIENT_AUTH_MANDATORY = 'MANDATORY' SUPPORTED_CLIENT_AUTH_MODES = [CLIENT_AUTH_NONE, CLIENT_AUTH_OPTIONAL, CLIENT_AUTH_MANDATORY] + +TOPIC_AMPHORA_V2 = 'octavia_provisioning_v2' diff --git a/octavia/controller/healthmanager/health_manager.py b/octavia/controller/healthmanager/health_manager.py index 31182e26b1..f824596b15 100644 --- a/octavia/controller/healthmanager/health_manager.py +++ b/octavia/controller/healthmanager/health_manager.py @@ -23,7 +23,7 @@ from oslo_log import log as logging from oslo_utils import excutils from octavia.common import constants -from octavia.controller.worker import controller_worker as cw +from octavia.controller.worker.v1 import controller_worker as cw from octavia.db import api as db_api from octavia.db import repositories as repo diff --git a/octavia/controller/housekeeping/house_keeping.py b/octavia/controller/housekeeping/house_keeping.py index 102f4423bf..4536027afa 100644 --- a/octavia/controller/housekeeping/house_keeping.py +++ b/octavia/controller/housekeeping/house_keeping.py @@ -20,7 +20,7 @@ from oslo_log import log as logging from oslo_utils import timeutils from sqlalchemy.orm import exc as sqlalchemy_exceptions -from octavia.controller.worker import controller_worker as cw +from octavia.controller.worker.v1 import controller_worker as cw from octavia.db import api as db_api from octavia.db import repositories as repo diff --git a/octavia/tests/unit/controller/worker/flows/__init__.py b/octavia/controller/queue/v1/__init__.py similarity index 100% rename from octavia/tests/unit/controller/worker/flows/__init__.py rename to octavia/controller/queue/v1/__init__.py diff --git a/octavia/controller/queue/consumer.py b/octavia/controller/queue/v1/consumer.py similarity index 95% rename from octavia/controller/queue/consumer.py rename to octavia/controller/queue/v1/consumer.py index 5cf8766dfa..15763927ed 100644 --- a/octavia/controller/queue/consumer.py +++ b/octavia/controller/queue/v1/consumer.py @@ -18,7 +18,7 @@ import oslo_messaging as messaging from oslo_messaging.rpc import dispatcher from octavia.common import rpc -from octavia.controller.queue import endpoint +from octavia.controller.queue.v1 import endpoints LOG = logging.getLogger(__name__) @@ -38,7 +38,7 @@ class ConsumerService(cotyledon.Service): LOG.info('Starting consumer...') target = messaging.Target(topic=self.topic, server=self.server, fanout=False) - self.endpoints = [endpoint.Endpoint()] + self.endpoints = [endpoints.Endpoints()] self.message_listener = rpc.get_server( target, self.endpoints, executor='threading', diff --git a/octavia/controller/queue/endpoint.py b/octavia/controller/queue/v1/endpoints.py similarity index 99% rename from octavia/controller/queue/endpoint.py rename to octavia/controller/queue/v1/endpoints.py index 72af65fbe6..3355da7d90 100644 --- a/octavia/controller/queue/endpoint.py +++ b/octavia/controller/queue/v1/endpoints.py @@ -24,7 +24,7 @@ CONF = cfg.CONF LOG = logging.getLogger(__name__) -class Endpoint(object): +class Endpoints(object): # API version history: # 1.0 - Initial version. diff --git a/octavia/tests/unit/controller/worker/tasks/__init__.py b/octavia/controller/queue/v2/__init__.py similarity index 100% rename from octavia/tests/unit/controller/worker/tasks/__init__.py rename to octavia/controller/queue/v2/__init__.py diff --git a/octavia/controller/queue/v2/consumer.py b/octavia/controller/queue/v2/consumer.py new file mode 100644 index 0000000000..9143dca15f --- /dev/null +++ b/octavia/controller/queue/v2/consumer.py @@ -0,0 +1,65 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import cotyledon +from oslo_log import log as logging +import oslo_messaging as messaging +from oslo_messaging.rpc import dispatcher + +from octavia.common import constants +from octavia.common import rpc +from octavia.controller.queue.v2 import endpoints + +LOG = logging.getLogger(__name__) + + +class ConsumerService(cotyledon.Service): + + def __init__(self, worker_id, conf): + super(ConsumerService, self).__init__(worker_id) + self.conf = conf + self.topic = constants.TOPIC_AMPHORA_V2 + self.server = conf.host + self.endpoints = [] + self.access_policy = dispatcher.DefaultRPCAccessPolicy + self.message_listener = None + + def run(self): + LOG.info('Starting V2 consumer...') + target = messaging.Target(topic=self.topic, server=self.server, + fanout=False) + self.endpoints = [endpoints.Endpoints()] + self.message_listener = rpc.get_server( + target, self.endpoints, + executor='threading', + access_policy=self.access_policy + ) + self.message_listener.start() + + def terminate(self, graceful=False): + if self.message_listener: + LOG.info('Stopping V2 consumer...') + self.message_listener.stop() + if graceful: + LOG.info('V2 Consumer successfully stopped. Waiting for ' + 'final messages to be processed...') + self.message_listener.wait() + if self.endpoints: + LOG.info('Shutting down V2 endpoint worker executors...') + for e in self.endpoints: + try: + e.worker.executor.shutdown() + except AttributeError: + pass + super(ConsumerService, self).terminate() diff --git a/octavia/controller/queue/v2/endpoints.py b/octavia/controller/queue/v2/endpoints.py new file mode 100644 index 0000000000..00eaef7487 --- /dev/null +++ b/octavia/controller/queue/v2/endpoints.py @@ -0,0 +1,156 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_log import log as logging +import oslo_messaging as messaging +from stevedore import driver as stevedore_driver + +from octavia.common import constants + +CONF = cfg.CONF + +LOG = logging.getLogger(__name__) + + +class Endpoints(object): + + # API version history: + # 1.0 - Initial version. + # 2.0 - Provider driver format + target = messaging.Target( + namespace=constants.RPC_NAMESPACE_CONTROLLER_AGENT, + version='2.0') + + def __init__(self): + self.worker = stevedore_driver.DriverManager( + namespace='octavia.plugins', + name=CONF.octavia_plugins, + invoke_on_load=True + ).driver + + def create_load_balancer(self, context, load_balancer_id, + flavor=None): + LOG.info('Creating load balancer \'%s\'...', load_balancer_id) + self.worker.create_load_balancer(load_balancer_id, flavor) + + def update_load_balancer(self, context, load_balancer_id, + load_balancer_updates): + LOG.info('Updating load balancer \'%s\'...', load_balancer_id) + self.worker.update_load_balancer(load_balancer_id, + load_balancer_updates) + + def delete_load_balancer(self, context, load_balancer_id, cascade=False): + LOG.info('Deleting load balancer \'%s\'...', load_balancer_id) + self.worker.delete_load_balancer(load_balancer_id, cascade) + + def failover_load_balancer(self, context, load_balancer_id): + LOG.info('Failing over amphora in load balancer \'%s\'...', + load_balancer_id) + self.worker.failover_loadbalancer(load_balancer_id) + + def failover_amphora(self, context, amphora_id): + LOG.info('Failing over amphora \'%s\'...', + amphora_id) + self.worker.failover_amphora(amphora_id) + + def create_listener(self, context, listener_id): + LOG.info('Creating listener \'%s\'...', listener_id) + self.worker.create_listener(listener_id) + + def update_listener(self, context, listener_id, listener_updates): + LOG.info('Updating listener \'%s\'...', listener_id) + self.worker.update_listener(listener_id, listener_updates) + + def delete_listener(self, context, listener_id): + LOG.info('Deleting listener \'%s\'...', listener_id) + self.worker.delete_listener(listener_id) + + def create_pool(self, context, pool_id): + LOG.info('Creating pool \'%s\'...', pool_id) + self.worker.create_pool(pool_id) + + def update_pool(self, context, pool_id, pool_updates): + LOG.info('Updating pool \'%s\'...', pool_id) + self.worker.update_pool(pool_id, pool_updates) + + def delete_pool(self, context, pool_id): + LOG.info('Deleting pool \'%s\'...', pool_id) + self.worker.delete_pool(pool_id) + + def create_health_monitor(self, context, health_monitor_id): + LOG.info('Creating health monitor \'%s\'...', health_monitor_id) + self.worker.create_health_monitor(health_monitor_id) + + def update_health_monitor(self, context, health_monitor_id, + health_monitor_updates): + LOG.info('Updating health monitor \'%s\'...', health_monitor_id) + self.worker.update_health_monitor(health_monitor_id, + health_monitor_updates) + + def delete_health_monitor(self, context, health_monitor_id): + LOG.info('Deleting health monitor \'%s\'...', health_monitor_id) + self.worker.delete_health_monitor(health_monitor_id) + + def create_member(self, context, member_id): + LOG.info('Creating member \'%s\'...', member_id) + self.worker.create_member(member_id) + + def update_member(self, context, member_id, member_updates): + LOG.info('Updating member \'%s\'...', member_id) + self.worker.update_member(member_id, member_updates) + + def batch_update_members(self, context, old_member_ids, new_member_ids, + updated_members): + updated_member_ids = [m.get('id') for m in updated_members] + LOG.info( + 'Batch updating members: old=\'%(old)s\', new=\'%(new)s\', ' + 'updated=\'%(updated)s\'...', + {'old': old_member_ids, 'new': new_member_ids, + 'updated': updated_member_ids}) + self.worker.batch_update_members( + old_member_ids, new_member_ids, updated_members) + + def delete_member(self, context, member_id): + LOG.info('Deleting member \'%s\'...', member_id) + self.worker.delete_member(member_id) + + def create_l7policy(self, context, l7policy_id): + LOG.info('Creating l7policy \'%s\'...', l7policy_id) + self.worker.create_l7policy(l7policy_id) + + def update_l7policy(self, context, l7policy_id, l7policy_updates): + LOG.info('Updating l7policy \'%s\'...', l7policy_id) + self.worker.update_l7policy(l7policy_id, l7policy_updates) + + def delete_l7policy(self, context, l7policy_id): + LOG.info('Deleting l7policy \'%s\'...', l7policy_id) + self.worker.delete_l7policy(l7policy_id) + + def create_l7rule(self, context, l7rule_id): + LOG.info('Creating l7rule \'%s\'...', l7rule_id) + self.worker.create_l7rule(l7rule_id) + + def update_l7rule(self, context, l7rule_id, l7rule_updates): + LOG.info('Updating l7rule \'%s\'...', l7rule_id) + self.worker.update_l7rule(l7rule_id, l7rule_updates) + + def delete_l7rule(self, context, l7rule_id): + LOG.info('Deleting l7rule \'%s\'...', l7rule_id) + self.worker.delete_l7rule(l7rule_id) + + def update_amphora_agent_config(self, context, amphora_id): + LOG.info('Updating amphora \'%s\' agent configuration...', + amphora_id) + self.worker.update_amphora_agent_config(amphora_id) diff --git a/octavia/controller/worker/v1/__init__.py b/octavia/controller/worker/v1/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/controller/worker/v1/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/controller/worker/controller_worker.py b/octavia/controller/worker/v1/controller_worker.py similarity index 98% rename from octavia/controller/worker/controller_worker.py rename to octavia/controller/worker/v1/controller_worker.py index 4c011e2ff5..9114344854 100644 --- a/octavia/controller/worker/controller_worker.py +++ b/octavia/controller/worker/v1/controller_worker.py @@ -23,14 +23,14 @@ import tenacity from octavia.common import base_taskflow from octavia.common import constants -from octavia.controller.worker.flows import amphora_flows -from octavia.controller.worker.flows import health_monitor_flows -from octavia.controller.worker.flows import l7policy_flows -from octavia.controller.worker.flows import l7rule_flows -from octavia.controller.worker.flows import listener_flows -from octavia.controller.worker.flows import load_balancer_flows -from octavia.controller.worker.flows import member_flows -from octavia.controller.worker.flows import pool_flows +from octavia.controller.worker.v1.flows import amphora_flows +from octavia.controller.worker.v1.flows import health_monitor_flows +from octavia.controller.worker.v1.flows import l7policy_flows +from octavia.controller.worker.v1.flows import l7rule_flows +from octavia.controller.worker.v1.flows import listener_flows +from octavia.controller.worker.v1.flows import load_balancer_flows +from octavia.controller.worker.v1.flows import member_flows +from octavia.controller.worker.v1.flows import pool_flows from octavia.db import api as db_apis from octavia.db import repositories as repo diff --git a/octavia/controller/worker/v1/flows/__init__.py b/octavia/controller/worker/v1/flows/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/controller/worker/v1/flows/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/controller/worker/flows/amphora_flows.py b/octavia/controller/worker/v1/flows/amphora_flows.py similarity index 98% rename from octavia/controller/worker/flows/amphora_flows.py rename to octavia/controller/worker/v1/flows/amphora_flows.py index 137466f580..966de2a54b 100644 --- a/octavia/controller/worker/flows/amphora_flows.py +++ b/octavia/controller/worker/v1/flows/amphora_flows.py @@ -19,12 +19,12 @@ from taskflow.patterns import linear_flow from taskflow.patterns import unordered_flow from octavia.common import constants -from octavia.controller.worker.tasks import amphora_driver_tasks -from octavia.controller.worker.tasks import cert_task -from octavia.controller.worker.tasks import compute_tasks -from octavia.controller.worker.tasks import database_tasks -from octavia.controller.worker.tasks import lifecycle_tasks -from octavia.controller.worker.tasks import network_tasks +from octavia.controller.worker.v1.tasks import amphora_driver_tasks +from octavia.controller.worker.v1.tasks import cert_task +from octavia.controller.worker.v1.tasks import compute_tasks +from octavia.controller.worker.v1.tasks import database_tasks +from octavia.controller.worker.v1.tasks import lifecycle_tasks +from octavia.controller.worker.v1.tasks import network_tasks CONF = cfg.CONF diff --git a/octavia/controller/worker/flows/health_monitor_flows.py b/octavia/controller/worker/v1/flows/health_monitor_flows.py similarity index 94% rename from octavia/controller/worker/flows/health_monitor_flows.py rename to octavia/controller/worker/v1/flows/health_monitor_flows.py index c37c1742b7..81a37741fa 100644 --- a/octavia/controller/worker/flows/health_monitor_flows.py +++ b/octavia/controller/worker/v1/flows/health_monitor_flows.py @@ -16,10 +16,10 @@ from taskflow.patterns import linear_flow from octavia.common import constants -from octavia.controller.worker.tasks import amphora_driver_tasks -from octavia.controller.worker.tasks import database_tasks -from octavia.controller.worker.tasks import lifecycle_tasks -from octavia.controller.worker.tasks import model_tasks +from octavia.controller.worker.v1.tasks import amphora_driver_tasks +from octavia.controller.worker.v1.tasks import database_tasks +from octavia.controller.worker.v1.tasks import lifecycle_tasks +from octavia.controller.worker.v1.tasks import model_tasks class HealthMonitorFlows(object): diff --git a/octavia/controller/worker/flows/l7policy_flows.py b/octavia/controller/worker/v1/flows/l7policy_flows.py similarity index 94% rename from octavia/controller/worker/flows/l7policy_flows.py rename to octavia/controller/worker/v1/flows/l7policy_flows.py index 02a0e81489..59a1890ed7 100644 --- a/octavia/controller/worker/flows/l7policy_flows.py +++ b/octavia/controller/worker/v1/flows/l7policy_flows.py @@ -16,10 +16,10 @@ from taskflow.patterns import linear_flow from octavia.common import constants -from octavia.controller.worker.tasks import amphora_driver_tasks -from octavia.controller.worker.tasks import database_tasks -from octavia.controller.worker.tasks import lifecycle_tasks -from octavia.controller.worker.tasks import model_tasks +from octavia.controller.worker.v1.tasks import amphora_driver_tasks +from octavia.controller.worker.v1.tasks import database_tasks +from octavia.controller.worker.v1.tasks import lifecycle_tasks +from octavia.controller.worker.v1.tasks import model_tasks class L7PolicyFlows(object): diff --git a/octavia/controller/worker/flows/l7rule_flows.py b/octavia/controller/worker/v1/flows/l7rule_flows.py similarity index 94% rename from octavia/controller/worker/flows/l7rule_flows.py rename to octavia/controller/worker/v1/flows/l7rule_flows.py index e3ea6d8d21..62ab8c72e4 100644 --- a/octavia/controller/worker/flows/l7rule_flows.py +++ b/octavia/controller/worker/v1/flows/l7rule_flows.py @@ -16,10 +16,10 @@ from taskflow.patterns import linear_flow from octavia.common import constants -from octavia.controller.worker.tasks import amphora_driver_tasks -from octavia.controller.worker.tasks import database_tasks -from octavia.controller.worker.tasks import lifecycle_tasks -from octavia.controller.worker.tasks import model_tasks +from octavia.controller.worker.v1.tasks import amphora_driver_tasks +from octavia.controller.worker.v1.tasks import database_tasks +from octavia.controller.worker.v1.tasks import lifecycle_tasks +from octavia.controller.worker.v1.tasks import model_tasks class L7RuleFlows(object): diff --git a/octavia/controller/worker/flows/listener_flows.py b/octavia/controller/worker/v1/flows/listener_flows.py similarity index 95% rename from octavia/controller/worker/flows/listener_flows.py rename to octavia/controller/worker/v1/flows/listener_flows.py index eaaaf48ce4..43d7903689 100644 --- a/octavia/controller/worker/flows/listener_flows.py +++ b/octavia/controller/worker/v1/flows/listener_flows.py @@ -16,10 +16,10 @@ from taskflow.patterns import linear_flow from octavia.common import constants -from octavia.controller.worker.tasks import amphora_driver_tasks -from octavia.controller.worker.tasks import database_tasks -from octavia.controller.worker.tasks import lifecycle_tasks -from octavia.controller.worker.tasks import network_tasks +from octavia.controller.worker.v1.tasks import amphora_driver_tasks +from octavia.controller.worker.v1.tasks import database_tasks +from octavia.controller.worker.v1.tasks import lifecycle_tasks +from octavia.controller.worker.v1.tasks import network_tasks class ListenerFlows(object): diff --git a/octavia/controller/worker/flows/load_balancer_flows.py b/octavia/controller/worker/v1/flows/load_balancer_flows.py similarity index 96% rename from octavia/controller/worker/flows/load_balancer_flows.py rename to octavia/controller/worker/v1/flows/load_balancer_flows.py index ff1b987c7c..96abd55c0b 100644 --- a/octavia/controller/worker/flows/load_balancer_flows.py +++ b/octavia/controller/worker/v1/flows/load_balancer_flows.py @@ -20,15 +20,15 @@ from taskflow.patterns import unordered_flow from octavia.common import constants from octavia.common import exceptions -from octavia.controller.worker.flows import amphora_flows -from octavia.controller.worker.flows import listener_flows -from octavia.controller.worker.flows import member_flows -from octavia.controller.worker.flows import pool_flows -from octavia.controller.worker.tasks import amphora_driver_tasks -from octavia.controller.worker.tasks import compute_tasks -from octavia.controller.worker.tasks import database_tasks -from octavia.controller.worker.tasks import lifecycle_tasks -from octavia.controller.worker.tasks import network_tasks +from octavia.controller.worker.v1.flows import amphora_flows +from octavia.controller.worker.v1.flows import listener_flows +from octavia.controller.worker.v1.flows import member_flows +from octavia.controller.worker.v1.flows import pool_flows +from octavia.controller.worker.v1.tasks import amphora_driver_tasks +from octavia.controller.worker.v1.tasks import compute_tasks +from octavia.controller.worker.v1.tasks import database_tasks +from octavia.controller.worker.v1.tasks import lifecycle_tasks +from octavia.controller.worker.v1.tasks import network_tasks CONF = cfg.CONF LOG = logging.getLogger(__name__) diff --git a/octavia/controller/worker/flows/member_flows.py b/octavia/controller/worker/v1/flows/member_flows.py similarity index 96% rename from octavia/controller/worker/flows/member_flows.py rename to octavia/controller/worker/v1/flows/member_flows.py index 814b2b31c9..dfa32683c8 100644 --- a/octavia/controller/worker/flows/member_flows.py +++ b/octavia/controller/worker/v1/flows/member_flows.py @@ -17,11 +17,11 @@ from taskflow.patterns import linear_flow from taskflow.patterns import unordered_flow from octavia.common import constants -from octavia.controller.worker.tasks import amphora_driver_tasks -from octavia.controller.worker.tasks import database_tasks -from octavia.controller.worker.tasks import lifecycle_tasks -from octavia.controller.worker.tasks import model_tasks -from octavia.controller.worker.tasks import network_tasks +from octavia.controller.worker.v1.tasks import amphora_driver_tasks +from octavia.controller.worker.v1.tasks import database_tasks +from octavia.controller.worker.v1.tasks import lifecycle_tasks +from octavia.controller.worker.v1.tasks import model_tasks +from octavia.controller.worker.v1.tasks import network_tasks class MemberFlows(object): diff --git a/octavia/controller/worker/flows/pool_flows.py b/octavia/controller/worker/v1/flows/pool_flows.py similarity index 95% rename from octavia/controller/worker/flows/pool_flows.py rename to octavia/controller/worker/v1/flows/pool_flows.py index 2d39f2a37a..96dadb989b 100644 --- a/octavia/controller/worker/flows/pool_flows.py +++ b/octavia/controller/worker/v1/flows/pool_flows.py @@ -16,10 +16,10 @@ from taskflow.patterns import linear_flow from octavia.common import constants -from octavia.controller.worker.tasks import amphora_driver_tasks -from octavia.controller.worker.tasks import database_tasks -from octavia.controller.worker.tasks import lifecycle_tasks -from octavia.controller.worker.tasks import model_tasks +from octavia.controller.worker.v1.tasks import amphora_driver_tasks +from octavia.controller.worker.v1.tasks import database_tasks +from octavia.controller.worker.v1.tasks import lifecycle_tasks +from octavia.controller.worker.v1.tasks import model_tasks class PoolFlows(object): diff --git a/octavia/controller/worker/v1/tasks/__init__.py b/octavia/controller/worker/v1/tasks/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/controller/worker/v1/tasks/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/controller/worker/tasks/amphora_driver_tasks.py b/octavia/controller/worker/v1/tasks/amphora_driver_tasks.py similarity index 100% rename from octavia/controller/worker/tasks/amphora_driver_tasks.py rename to octavia/controller/worker/v1/tasks/amphora_driver_tasks.py diff --git a/octavia/controller/worker/tasks/cert_task.py b/octavia/controller/worker/v1/tasks/cert_task.py similarity index 100% rename from octavia/controller/worker/tasks/cert_task.py rename to octavia/controller/worker/v1/tasks/cert_task.py diff --git a/octavia/controller/worker/tasks/compute_tasks.py b/octavia/controller/worker/v1/tasks/compute_tasks.py similarity index 100% rename from octavia/controller/worker/tasks/compute_tasks.py rename to octavia/controller/worker/v1/tasks/compute_tasks.py diff --git a/octavia/controller/worker/tasks/database_tasks.py b/octavia/controller/worker/v1/tasks/database_tasks.py similarity index 100% rename from octavia/controller/worker/tasks/database_tasks.py rename to octavia/controller/worker/v1/tasks/database_tasks.py diff --git a/octavia/controller/worker/tasks/lifecycle_tasks.py b/octavia/controller/worker/v1/tasks/lifecycle_tasks.py similarity index 100% rename from octavia/controller/worker/tasks/lifecycle_tasks.py rename to octavia/controller/worker/v1/tasks/lifecycle_tasks.py diff --git a/octavia/controller/worker/tasks/model_tasks.py b/octavia/controller/worker/v1/tasks/model_tasks.py similarity index 100% rename from octavia/controller/worker/tasks/model_tasks.py rename to octavia/controller/worker/v1/tasks/model_tasks.py diff --git a/octavia/controller/worker/tasks/network_tasks.py b/octavia/controller/worker/v1/tasks/network_tasks.py similarity index 100% rename from octavia/controller/worker/tasks/network_tasks.py rename to octavia/controller/worker/v1/tasks/network_tasks.py diff --git a/octavia/controller/worker/v2/__init__.py b/octavia/controller/worker/v2/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/controller/worker/v2/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/controller/worker/v2/controller_worker.py b/octavia/controller/worker/v2/controller_worker.py new file mode 100644 index 0000000000..cacc909834 --- /dev/null +++ b/octavia/controller/worker/v2/controller_worker.py @@ -0,0 +1,964 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils +from sqlalchemy.orm import exc as db_exceptions +from taskflow.listeners import logging as tf_logging +import tenacity + +from octavia.common import base_taskflow +from octavia.common import constants +from octavia.controller.worker.v2.flows import amphora_flows +from octavia.controller.worker.v2.flows import health_monitor_flows +from octavia.controller.worker.v2.flows import l7policy_flows +from octavia.controller.worker.v2.flows import l7rule_flows +from octavia.controller.worker.v2.flows import listener_flows +from octavia.controller.worker.v2.flows import load_balancer_flows +from octavia.controller.worker.v2.flows import member_flows +from octavia.controller.worker.v2.flows import pool_flows +from octavia.db import api as db_apis +from octavia.db import repositories as repo + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + +RETRY_ATTEMPTS = 15 +RETRY_INITIAL_DELAY = 1 +RETRY_BACKOFF = 1 +RETRY_MAX = 5 + + +def _is_provisioning_status_pending_update(lb_obj): + return not lb_obj.provisioning_status == constants.PENDING_UPDATE + + +class ControllerWorker(base_taskflow.BaseTaskFlowEngine): + + def __init__(self): + + self._amphora_flows = amphora_flows.AmphoraFlows() + self._health_monitor_flows = health_monitor_flows.HealthMonitorFlows() + self._lb_flows = load_balancer_flows.LoadBalancerFlows() + self._listener_flows = listener_flows.ListenerFlows() + self._member_flows = member_flows.MemberFlows() + self._pool_flows = pool_flows.PoolFlows() + self._l7policy_flows = l7policy_flows.L7PolicyFlows() + self._l7rule_flows = l7rule_flows.L7RuleFlows() + + self._amphora_repo = repo.AmphoraRepository() + self._amphora_health_repo = repo.AmphoraHealthRepository() + self._health_mon_repo = repo.HealthMonitorRepository() + self._lb_repo = repo.LoadBalancerRepository() + self._listener_repo = repo.ListenerRepository() + self._member_repo = repo.MemberRepository() + self._pool_repo = repo.PoolRepository() + self._l7policy_repo = repo.L7PolicyRepository() + self._l7rule_repo = repo.L7RuleRepository() + self._flavor_repo = repo.FlavorRepository() + + super(ControllerWorker, self).__init__() + + @tenacity.retry( + retry=( + tenacity.retry_if_result(_is_provisioning_status_pending_update) | + tenacity.retry_if_exception_type()), + wait=tenacity.wait_incrementing( + RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX), + stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS)) + def _get_db_obj_until_pending_update(self, repo, id): + + return repo.get(db_apis.get_session(), id=id) + + def create_amphora(self): + """Creates an Amphora. + + This is used to create spare amphora. + + :returns: amphora_id + """ + try: + create_amp_tf = self._taskflow_load( + self._amphora_flows.get_create_amphora_flow(), + store={constants.BUILD_TYPE_PRIORITY: + constants.LB_CREATE_SPARES_POOL_PRIORITY, + constants.FLAVOR: None} + ) + with tf_logging.DynamicLoggingListener(create_amp_tf, log=LOG): + create_amp_tf.run() + + return create_amp_tf.storage.fetch('amphora') + except Exception as e: + LOG.error('Failed to create an amphora due to: {}'.format(str(e))) + + def delete_amphora(self, amphora_id): + """Deletes an existing Amphora. + + :param amphora_id: ID of the amphora to delete + :returns: None + :raises AmphoraNotFound: The referenced Amphora was not found + """ + amphora = self._amphora_repo.get(db_apis.get_session(), + id=amphora_id) + delete_amp_tf = self._taskflow_load(self._amphora_flows. + get_delete_amphora_flow(), + store={constants.AMPHORA: amphora}) + with tf_logging.DynamicLoggingListener(delete_amp_tf, + log=LOG): + delete_amp_tf.run() + + @tenacity.retry( + retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), + wait=tenacity.wait_incrementing( + RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX), + stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS)) + def create_health_monitor(self, health_monitor_id): + """Creates a health monitor. + + :param pool_id: ID of the pool to create a health monitor on + :returns: None + :raises NoResultFound: Unable to find the object + """ + health_mon = self._health_mon_repo.get(db_apis.get_session(), + id=health_monitor_id) + if not health_mon: + LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' + '60 seconds.', 'health_monitor', health_monitor_id) + raise db_exceptions.NoResultFound + + pool = health_mon.pool + listeners = pool.listeners + pool.health_monitor = health_mon + load_balancer = pool.load_balancer + + create_hm_tf = self._taskflow_load( + self._health_monitor_flows.get_create_health_monitor_flow(), + store={constants.HEALTH_MON: health_mon, + constants.POOL: pool, + constants.LISTENERS: listeners, + constants.LOADBALANCER: load_balancer}) + with tf_logging.DynamicLoggingListener(create_hm_tf, + log=LOG): + create_hm_tf.run() + + def delete_health_monitor(self, health_monitor_id): + """Deletes a health monitor. + + :param pool_id: ID of the pool to delete its health monitor + :returns: None + :raises HMNotFound: The referenced health monitor was not found + """ + health_mon = self._health_mon_repo.get(db_apis.get_session(), + id=health_monitor_id) + + pool = health_mon.pool + listeners = pool.listeners + load_balancer = pool.load_balancer + + delete_hm_tf = self._taskflow_load( + self._health_monitor_flows.get_delete_health_monitor_flow(), + store={constants.HEALTH_MON: health_mon, + constants.POOL: pool, + constants.LISTENERS: listeners, + constants.LOADBALANCER: load_balancer}) + with tf_logging.DynamicLoggingListener(delete_hm_tf, + log=LOG): + delete_hm_tf.run() + + def update_health_monitor(self, health_monitor_id, health_monitor_updates): + """Updates a health monitor. + + :param pool_id: ID of the pool to have it's health monitor updated + :param health_monitor_updates: Dict containing updated health monitor + :returns: None + :raises HMNotFound: The referenced health monitor was not found + """ + health_mon = None + try: + health_mon = self._get_db_obj_until_pending_update( + self._health_mon_repo, health_monitor_id) + except tenacity.RetryError as e: + LOG.warning('Health monitor did not go into %s in 60 seconds. ' + 'This either due to an in-progress Octavia upgrade ' + 'or an overloaded and failing database. Assuming ' + 'an upgrade is in progress and continuing.', + constants.PENDING_UPDATE) + health_mon = e.last_attempt.result() + + pool = health_mon.pool + listeners = pool.listeners + pool.health_monitor = health_mon + load_balancer = pool.load_balancer + + update_hm_tf = self._taskflow_load( + self._health_monitor_flows.get_update_health_monitor_flow(), + store={constants.HEALTH_MON: health_mon, + constants.POOL: pool, + constants.LISTENERS: listeners, + constants.LOADBALANCER: load_balancer, + constants.UPDATE_DICT: health_monitor_updates}) + with tf_logging.DynamicLoggingListener(update_hm_tf, + log=LOG): + update_hm_tf.run() + + @tenacity.retry( + retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), + wait=tenacity.wait_incrementing( + RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX), + stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS)) + def create_listener(self, listener_id): + """Creates a listener. + + :param listener_id: ID of the listener to create + :returns: None + :raises NoResultFound: Unable to find the object + """ + listener = self._listener_repo.get(db_apis.get_session(), + id=listener_id) + if not listener: + LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' + '60 seconds.', 'listener', listener_id) + raise db_exceptions.NoResultFound + + load_balancer = listener.load_balancer + + create_listener_tf = self._taskflow_load(self._listener_flows. + get_create_listener_flow(), + store={constants.LOADBALANCER: + load_balancer, + constants.LISTENERS: + [listener]}) + with tf_logging.DynamicLoggingListener(create_listener_tf, + log=LOG): + create_listener_tf.run() + + def delete_listener(self, listener_id): + """Deletes a listener. + + :param listener_id: ID of the listener to delete + :returns: None + :raises ListenerNotFound: The referenced listener was not found + """ + listener = self._listener_repo.get(db_apis.get_session(), + id=listener_id) + load_balancer = listener.load_balancer + + delete_listener_tf = self._taskflow_load( + self._listener_flows.get_delete_listener_flow(), + store={constants.LOADBALANCER: load_balancer, + constants.LISTENER: listener}) + with tf_logging.DynamicLoggingListener(delete_listener_tf, + log=LOG): + delete_listener_tf.run() + + def update_listener(self, listener_id, listener_updates): + """Updates a listener. + + :param listener_id: ID of the listener to update + :param listener_updates: Dict containing updated listener attributes + :returns: None + :raises ListenerNotFound: The referenced listener was not found + """ + listener = None + try: + listener = self._get_db_obj_until_pending_update( + self._listener_repo, listener_id) + except tenacity.RetryError as e: + LOG.warning('Listener did not go into %s in 60 seconds. ' + 'This either due to an in-progress Octavia upgrade ' + 'or an overloaded and failing database. Assuming ' + 'an upgrade is in progress and continuing.', + constants.PENDING_UPDATE) + listener = e.last_attempt.result() + + load_balancer = listener.load_balancer + + update_listener_tf = self._taskflow_load(self._listener_flows. + get_update_listener_flow(), + store={constants.LISTENER: + listener, + constants.LOADBALANCER: + load_balancer, + constants.UPDATE_DICT: + listener_updates, + constants.LISTENERS: + [listener]}) + with tf_logging.DynamicLoggingListener(update_listener_tf, log=LOG): + update_listener_tf.run() + + @tenacity.retry( + retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), + wait=tenacity.wait_incrementing( + RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX), + stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS)) + def create_load_balancer(self, load_balancer_id, flavor=None): + """Creates a load balancer by allocating Amphorae. + + First tries to allocate an existing Amphora in READY state. + If none are available it will attempt to build one specifically + for this load balancer. + + :param load_balancer_id: ID of the load balancer to create + :returns: None + :raises NoResultFound: Unable to find the object + """ + lb = self._lb_repo.get(db_apis.get_session(), id=load_balancer_id) + if not lb: + LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' + '60 seconds.', 'load_balancer', load_balancer_id) + raise db_exceptions.NoResultFound + + # TODO(johnsom) convert this to octavia_lib constant flavor + # once octavia is transitioned to use octavia_lib + store = {constants.LOADBALANCER_ID: load_balancer_id, + constants.BUILD_TYPE_PRIORITY: + constants.LB_CREATE_NORMAL_PRIORITY, + constants.FLAVOR: flavor} + + topology = lb.topology + + store[constants.UPDATE_DICT] = { + constants.TOPOLOGY: topology + } + + create_lb_flow = self._lb_flows.get_create_load_balancer_flow( + topology=topology, listeners=lb.listeners) + + create_lb_tf = self._taskflow_load(create_lb_flow, store=store) + with tf_logging.DynamicLoggingListener(create_lb_tf, log=LOG): + create_lb_tf.run() + + def delete_load_balancer(self, load_balancer_id, cascade=False): + """Deletes a load balancer by de-allocating Amphorae. + + :param load_balancer_id: ID of the load balancer to delete + :returns: None + :raises LBNotFound: The referenced load balancer was not found + """ + lb = self._lb_repo.get(db_apis.get_session(), + id=load_balancer_id) + + if cascade: + (flow, + store) = self._lb_flows.get_cascade_delete_load_balancer_flow(lb) + else: + (flow, store) = self._lb_flows.get_delete_load_balancer_flow(lb) + store.update({constants.LOADBALANCER: lb, + constants.SERVER_GROUP_ID: lb.server_group_id}) + delete_lb_tf = self._taskflow_load(flow, store=store) + + with tf_logging.DynamicLoggingListener(delete_lb_tf, + log=LOG): + delete_lb_tf.run() + + def update_load_balancer(self, load_balancer_id, load_balancer_updates): + """Updates a load balancer. + + :param load_balancer_id: ID of the load balancer to update + :param load_balancer_updates: Dict containing updated load balancer + :returns: None + :raises LBNotFound: The referenced load balancer was not found + """ + lb = None + try: + lb = self._get_db_obj_until_pending_update( + self._lb_repo, load_balancer_id) + except tenacity.RetryError as e: + LOG.warning('Load balancer did not go into %s in 60 seconds. ' + 'This either due to an in-progress Octavia upgrade ' + 'or an overloaded and failing database. Assuming ' + 'an upgrade is in progress and continuing.', + constants.PENDING_UPDATE) + lb = e.last_attempt.result() + + listeners, _ = self._listener_repo.get_all( + db_apis.get_session(), + load_balancer_id=load_balancer_id) + + update_lb_tf = self._taskflow_load( + self._lb_flows.get_update_load_balancer_flow(), + store={constants.LOADBALANCER: lb, + constants.LISTENERS: listeners, + constants.UPDATE_DICT: load_balancer_updates}) + + with tf_logging.DynamicLoggingListener(update_lb_tf, + log=LOG): + update_lb_tf.run() + + @tenacity.retry( + retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), + wait=tenacity.wait_incrementing( + RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX), + stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS)) + def create_member(self, member_id): + """Creates a pool member. + + :param member_id: ID of the member to create + :returns: None + :raises NoSuitablePool: Unable to find the node pool + """ + member = self._member_repo.get(db_apis.get_session(), + id=member_id) + if not member: + LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' + '60 seconds.', 'member', member_id) + raise db_exceptions.NoResultFound + + pool = member.pool + listeners = pool.listeners + load_balancer = pool.load_balancer + + create_member_tf = self._taskflow_load(self._member_flows. + get_create_member_flow(), + store={constants.MEMBER: member, + constants.LISTENERS: + listeners, + constants.LOADBALANCER: + load_balancer, + constants.POOL: pool}) + with tf_logging.DynamicLoggingListener(create_member_tf, + log=LOG): + create_member_tf.run() + + def delete_member(self, member_id): + """Deletes a pool member. + + :param member_id: ID of the member to delete + :returns: None + :raises MemberNotFound: The referenced member was not found + """ + member = self._member_repo.get(db_apis.get_session(), + id=member_id) + pool = member.pool + listeners = pool.listeners + load_balancer = pool.load_balancer + + delete_member_tf = self._taskflow_load( + self._member_flows.get_delete_member_flow(), + store={constants.MEMBER: member, constants.LISTENERS: listeners, + constants.LOADBALANCER: load_balancer, constants.POOL: pool} + ) + with tf_logging.DynamicLoggingListener(delete_member_tf, + log=LOG): + delete_member_tf.run() + + def batch_update_members(self, old_member_ids, new_member_ids, + updated_members): + old_members = [self._member_repo.get(db_apis.get_session(), id=mid) + for mid in old_member_ids] + new_members = [self._member_repo.get(db_apis.get_session(), id=mid) + for mid in new_member_ids] + updated_members = [ + (self._member_repo.get(db_apis.get_session(), id=m.get('id')), m) + for m in updated_members] + if old_members: + pool = old_members[0].pool + elif new_members: + pool = new_members[0].pool + else: + pool = updated_members[0][0].pool + listeners = pool.listeners + load_balancer = pool.load_balancer + + batch_update_members_tf = self._taskflow_load( + self._member_flows.get_batch_update_members_flow( + old_members, new_members, updated_members), + store={constants.LISTENERS: listeners, + constants.LOADBALANCER: load_balancer, + constants.POOL: pool}) + with tf_logging.DynamicLoggingListener(batch_update_members_tf, + log=LOG): + batch_update_members_tf.run() + + def update_member(self, member_id, member_updates): + """Updates a pool member. + + :param member_id: ID of the member to update + :param member_updates: Dict containing updated member attributes + :returns: None + :raises MemberNotFound: The referenced member was not found + """ + member = None + try: + member = self._get_db_obj_until_pending_update( + self._member_repo, member_id) + except tenacity.RetryError as e: + LOG.warning('Member did not go into %s in 60 seconds. ' + 'This either due to an in-progress Octavia upgrade ' + 'or an overloaded and failing database. Assuming ' + 'an upgrade is in progress and continuing.', + constants.PENDING_UPDATE) + member = e.last_attempt.result() + + pool = member.pool + listeners = pool.listeners + load_balancer = pool.load_balancer + + update_member_tf = self._taskflow_load(self._member_flows. + get_update_member_flow(), + store={constants.MEMBER: member, + constants.LISTENERS: + listeners, + constants.LOADBALANCER: + load_balancer, + constants.POOL: + pool, + constants.UPDATE_DICT: + member_updates}) + with tf_logging.DynamicLoggingListener(update_member_tf, + log=LOG): + update_member_tf.run() + + @tenacity.retry( + retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), + wait=tenacity.wait_incrementing( + RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX), + stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS)) + def create_pool(self, pool_id): + """Creates a node pool. + + :param pool_id: ID of the pool to create + :returns: None + :raises NoResultFound: Unable to find the object + """ + pool = self._pool_repo.get(db_apis.get_session(), + id=pool_id) + if not pool: + LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' + '60 seconds.', 'pool', pool_id) + raise db_exceptions.NoResultFound + + listeners = pool.listeners + load_balancer = pool.load_balancer + + create_pool_tf = self._taskflow_load(self._pool_flows. + get_create_pool_flow(), + store={constants.POOL: pool, + constants.LISTENERS: + listeners, + constants.LOADBALANCER: + load_balancer}) + with tf_logging.DynamicLoggingListener(create_pool_tf, + log=LOG): + create_pool_tf.run() + + def delete_pool(self, pool_id): + """Deletes a node pool. + + :param pool_id: ID of the pool to delete + :returns: None + :raises PoolNotFound: The referenced pool was not found + """ + pool = self._pool_repo.get(db_apis.get_session(), + id=pool_id) + + load_balancer = pool.load_balancer + listeners = pool.listeners + + delete_pool_tf = self._taskflow_load( + self._pool_flows.get_delete_pool_flow(), + store={constants.POOL: pool, constants.LISTENERS: listeners, + constants.LOADBALANCER: load_balancer}) + with tf_logging.DynamicLoggingListener(delete_pool_tf, + log=LOG): + delete_pool_tf.run() + + def update_pool(self, pool_id, pool_updates): + """Updates a node pool. + + :param pool_id: ID of the pool to update + :param pool_updates: Dict containing updated pool attributes + :returns: None + :raises PoolNotFound: The referenced pool was not found + """ + pool = None + try: + pool = self._get_db_obj_until_pending_update( + self._pool_repo, pool_id) + except tenacity.RetryError as e: + LOG.warning('Pool did not go into %s in 60 seconds. ' + 'This either due to an in-progress Octavia upgrade ' + 'or an overloaded and failing database. Assuming ' + 'an upgrade is in progress and continuing.', + constants.PENDING_UPDATE) + pool = e.last_attempt.result() + + listeners = pool.listeners + load_balancer = pool.load_balancer + + update_pool_tf = self._taskflow_load(self._pool_flows. + get_update_pool_flow(), + store={constants.POOL: pool, + constants.LISTENERS: + listeners, + constants.LOADBALANCER: + load_balancer, + constants.UPDATE_DICT: + pool_updates}) + with tf_logging.DynamicLoggingListener(update_pool_tf, + log=LOG): + update_pool_tf.run() + + @tenacity.retry( + retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), + wait=tenacity.wait_incrementing( + RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX), + stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS)) + def create_l7policy(self, l7policy_id): + """Creates an L7 Policy. + + :param l7policy_id: ID of the l7policy to create + :returns: None + :raises NoResultFound: Unable to find the object + """ + l7policy = self._l7policy_repo.get(db_apis.get_session(), + id=l7policy_id) + if not l7policy: + LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' + '60 seconds.', 'l7policy', l7policy_id) + raise db_exceptions.NoResultFound + + listeners = [l7policy.listener] + load_balancer = l7policy.listener.load_balancer + + create_l7policy_tf = self._taskflow_load( + self._l7policy_flows.get_create_l7policy_flow(), + store={constants.L7POLICY: l7policy, + constants.LISTENERS: listeners, + constants.LOADBALANCER: load_balancer}) + with tf_logging.DynamicLoggingListener(create_l7policy_tf, + log=LOG): + create_l7policy_tf.run() + + def delete_l7policy(self, l7policy_id): + """Deletes an L7 policy. + + :param l7policy_id: ID of the l7policy to delete + :returns: None + :raises L7PolicyNotFound: The referenced l7policy was not found + """ + l7policy = self._l7policy_repo.get(db_apis.get_session(), + id=l7policy_id) + + load_balancer = l7policy.listener.load_balancer + listeners = [l7policy.listener] + + delete_l7policy_tf = self._taskflow_load( + self._l7policy_flows.get_delete_l7policy_flow(), + store={constants.L7POLICY: l7policy, + constants.LISTENERS: listeners, + constants.LOADBALANCER: load_balancer}) + with tf_logging.DynamicLoggingListener(delete_l7policy_tf, + log=LOG): + delete_l7policy_tf.run() + + def update_l7policy(self, l7policy_id, l7policy_updates): + """Updates an L7 policy. + + :param l7policy_id: ID of the l7policy to update + :param l7policy_updates: Dict containing updated l7policy attributes + :returns: None + :raises L7PolicyNotFound: The referenced l7policy was not found + """ + l7policy = None + try: + l7policy = self._get_db_obj_until_pending_update( + self._l7policy_repo, l7policy_id) + except tenacity.RetryError as e: + LOG.warning('L7 policy did not go into %s in 60 seconds. ' + 'This either due to an in-progress Octavia upgrade ' + 'or an overloaded and failing database. Assuming ' + 'an upgrade is in progress and continuing.', + constants.PENDING_UPDATE) + l7policy = e.last_attempt.result() + + listeners = [l7policy.listener] + load_balancer = l7policy.listener.load_balancer + + update_l7policy_tf = self._taskflow_load( + self._l7policy_flows.get_update_l7policy_flow(), + store={constants.L7POLICY: l7policy, + constants.LISTENERS: listeners, + constants.LOADBALANCER: load_balancer, + constants.UPDATE_DICT: l7policy_updates}) + with tf_logging.DynamicLoggingListener(update_l7policy_tf, + log=LOG): + update_l7policy_tf.run() + + @tenacity.retry( + retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), + wait=tenacity.wait_incrementing( + RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX), + stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS)) + def create_l7rule(self, l7rule_id): + """Creates an L7 Rule. + + :param l7rule_id: ID of the l7rule to create + :returns: None + :raises NoResultFound: Unable to find the object + """ + l7rule = self._l7rule_repo.get(db_apis.get_session(), + id=l7rule_id) + if not l7rule: + LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' + '60 seconds.', 'l7rule', l7rule_id) + raise db_exceptions.NoResultFound + + l7policy = l7rule.l7policy + listeners = [l7policy.listener] + load_balancer = l7policy.listener.load_balancer + + create_l7rule_tf = self._taskflow_load( + self._l7rule_flows.get_create_l7rule_flow(), + store={constants.L7RULE: l7rule, + constants.L7POLICY: l7policy, + constants.LISTENERS: listeners, + constants.LOADBALANCER: load_balancer}) + with tf_logging.DynamicLoggingListener(create_l7rule_tf, + log=LOG): + create_l7rule_tf.run() + + def delete_l7rule(self, l7rule_id): + """Deletes an L7 rule. + + :param l7rule_id: ID of the l7rule to delete + :returns: None + :raises L7RuleNotFound: The referenced l7rule was not found + """ + l7rule = self._l7rule_repo.get(db_apis.get_session(), + id=l7rule_id) + l7policy = l7rule.l7policy + load_balancer = l7policy.listener.load_balancer + listeners = [l7policy.listener] + + delete_l7rule_tf = self._taskflow_load( + self._l7rule_flows.get_delete_l7rule_flow(), + store={constants.L7RULE: l7rule, + constants.L7POLICY: l7policy, + constants.LISTENERS: listeners, + constants.LOADBALANCER: load_balancer}) + with tf_logging.DynamicLoggingListener(delete_l7rule_tf, + log=LOG): + delete_l7rule_tf.run() + + def update_l7rule(self, l7rule_id, l7rule_updates): + """Updates an L7 rule. + + :param l7rule_id: ID of the l7rule to update + :param l7rule_updates: Dict containing updated l7rule attributes + :returns: None + :raises L7RuleNotFound: The referenced l7rule was not found + """ + l7rule = None + try: + l7rule = self._get_db_obj_until_pending_update( + self._l7rule_repo, l7rule_id) + except tenacity.RetryError as e: + LOG.warning('L7 rule did not go into %s in 60 seconds. ' + 'This either due to an in-progress Octavia upgrade ' + 'or an overloaded and failing database. Assuming ' + 'an upgrade is in progress and continuing.', + constants.PENDING_UPDATE) + l7rule = e.last_attempt.result() + + l7policy = l7rule.l7policy + listeners = [l7policy.listener] + load_balancer = l7policy.listener.load_balancer + + update_l7rule_tf = self._taskflow_load( + self._l7rule_flows.get_update_l7rule_flow(), + store={constants.L7RULE: l7rule, + constants.L7POLICY: l7policy, + constants.LISTENERS: listeners, + constants.LOADBALANCER: load_balancer, + constants.UPDATE_DICT: l7rule_updates}) + with tf_logging.DynamicLoggingListener(update_l7rule_tf, + log=LOG): + update_l7rule_tf.run() + + def _perform_amphora_failover(self, amp, priority): + """Internal method to perform failover operations for an amphora. + + :param amp: The amphora to failover + :param priority: The create priority + :returns: None + """ + + stored_params = {constants.FAILED_AMPHORA: amp, + constants.LOADBALANCER_ID: amp.load_balancer_id, + constants.BUILD_TYPE_PRIORITY: priority, } + + if amp.status == constants.DELETED: + LOG.warning('Amphora %s is marked DELETED in the database but ' + 'was submitted for failover. Deleting it from the ' + 'amphora health table to exclude it from health ' + 'checks and skipping the failover.', amp.id) + self._amphora_health_repo.delete(db_apis.get_session(), + amphora_id=amp.id) + return + + if (CONF.house_keeping.spare_amphora_pool_size == 0) and ( + CONF.nova.enable_anti_affinity is False): + LOG.warning("Failing over amphora with no spares pool may " + "cause delays in failover times while a new " + "amphora instance boots.") + + # if we run with anti-affinity we need to set the server group + # as well + lb = self._amphora_repo.get_lb_for_amphora( + db_apis.get_session(), amp.id) + if CONF.nova.enable_anti_affinity and lb: + stored_params[constants.SERVER_GROUP_ID] = lb.server_group_id + if lb and lb.flavor_id: + stored_params[constants.FLAVOR] = ( + self._flavor_repo.get_flavor_metadata_dict( + db_apis.get_session(), lb.flavor_id)) + else: + stored_params[constants.FLAVOR] = {} + + failover_amphora_tf = self._taskflow_load( + self._amphora_flows.get_failover_flow( + role=amp.role, load_balancer=lb), + store=stored_params) + + with tf_logging.DynamicLoggingListener(failover_amphora_tf, log=LOG): + failover_amphora_tf.run() + + def failover_amphora(self, amphora_id): + """Perform failover operations for an amphora. + + :param amphora_id: ID for amphora to failover + :returns: None + :raises AmphoraNotFound: The referenced amphora was not found + """ + try: + amp = self._amphora_repo.get(db_apis.get_session(), + id=amphora_id) + if not amp: + LOG.warning("Could not fetch Amphora %s from DB, ignoring " + "failover request.", amphora_id) + return + self._perform_amphora_failover( + amp, constants.LB_CREATE_FAILOVER_PRIORITY) + if amp.load_balancer_id: + LOG.info("Mark ACTIVE in DB for load balancer id: %s", + amp.load_balancer_id) + self._lb_repo.update( + db_apis.get_session(), amp.load_balancer_id, + provisioning_status=constants.ACTIVE) + except Exception as e: + try: + self._lb_repo.update( + db_apis.get_session(), amp.load_balancer_id, + provisioning_status=constants.ERROR) + except Exception: + LOG.error("Unable to revert LB status to ERROR.") + with excutils.save_and_reraise_exception(): + LOG.error("Failover exception: %s", e) + + def failover_loadbalancer(self, load_balancer_id): + """Perform failover operations for a load balancer. + + :param load_balancer_id: ID for load balancer to failover + :returns: None + :raises LBNotFound: The referenced load balancer was not found + """ + + # Note: This expects that the load balancer is already in + # provisioning_status=PENDING_UPDATE state + try: + lb = self._lb_repo.get(db_apis.get_session(), + id=load_balancer_id) + + # Exclude amphora already deleted + amps = [a for a in lb.amphorae if a.status != constants.DELETED] + for amp in amps: + # failover amphora in backup role + # Note: this amp may not currently be the backup + # TODO(johnsom) Change this to query the amp state + # once the amp API supports it. + if amp.role == constants.ROLE_BACKUP: + self._perform_amphora_failover( + amp, constants.LB_CREATE_ADMIN_FAILOVER_PRIORITY) + + for amp in amps: + # failover everyhting else + if amp.role != constants.ROLE_BACKUP: + self._perform_amphora_failover( + amp, constants.LB_CREATE_ADMIN_FAILOVER_PRIORITY) + + self._lb_repo.update( + db_apis.get_session(), load_balancer_id, + provisioning_status=constants.ACTIVE) + + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.error("LB %(lbid)s failover exception: %(exc)s", + {'lbid': load_balancer_id, 'exc': e}) + self._lb_repo.update( + db_apis.get_session(), load_balancer_id, + provisioning_status=constants.ERROR) + + def amphora_cert_rotation(self, amphora_id): + """Perform cert rotation for an amphora. + + :param amphora_id: ID for amphora to rotate + :returns: None + :raises AmphoraNotFound: The referenced amphora was not found + """ + + amp = self._amphora_repo.get(db_apis.get_session(), + id=amphora_id) + LOG.info("Start amphora cert rotation, amphora's id is: %s", amp.id) + + certrotation_amphora_tf = self._taskflow_load( + self._amphora_flows.cert_rotate_amphora_flow(), + store={constants.AMPHORA: amp, + constants.AMPHORA_ID: amp.id}) + + with tf_logging.DynamicLoggingListener(certrotation_amphora_tf, + log=LOG): + certrotation_amphora_tf.run() + + def update_amphora_agent_config(self, amphora_id): + """Update the amphora agent configuration. + + Note: This will update the amphora agent configuration file and + update the running configuration for mutatable configuration + items. + + :param amphora_id: ID of the amphora to update. + :returns: None + """ + LOG.info("Start amphora agent configuration update, amphora's id " + "is: %s", amphora_id) + amp = self._amphora_repo.get(db_apis.get_session(), id=amphora_id) + lb = self._amphora_repo.get_lb_for_amphora(db_apis.get_session(), + amphora_id) + flavor = {} + if lb.flavor_id: + flavor = self._flavor_repo.get_flavor_metadata_dict( + db_apis.get_session(), lb.flavor_id) + + update_amphora_tf = self._taskflow_load( + self._amphora_flows.update_amphora_config_flow(), + store={constants.AMPHORA: amp, + constants.FLAVOR: flavor}) + + with tf_logging.DynamicLoggingListener(update_amphora_tf, + log=LOG): + update_amphora_tf.run() diff --git a/octavia/controller/worker/v2/flows/__init__.py b/octavia/controller/worker/v2/flows/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/controller/worker/v2/flows/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/controller/worker/v2/flows/amphora_flows.py b/octavia/controller/worker/v2/flows/amphora_flows.py new file mode 100644 index 0000000000..b7e4338983 --- /dev/null +++ b/octavia/controller/worker/v2/flows/amphora_flows.py @@ -0,0 +1,599 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo_config import cfg +from taskflow.patterns import graph_flow +from taskflow.patterns import linear_flow +from taskflow.patterns import unordered_flow + +from octavia.common import constants +from octavia.controller.worker.v2.tasks import amphora_driver_tasks +from octavia.controller.worker.v2.tasks import cert_task +from octavia.controller.worker.v2.tasks import compute_tasks +from octavia.controller.worker.v2.tasks import database_tasks +from octavia.controller.worker.v2.tasks import lifecycle_tasks +from octavia.controller.worker.v2.tasks import network_tasks + +CONF = cfg.CONF + + +class AmphoraFlows(object): + def __init__(self): + # for some reason only this has the values from the config file + self.REST_AMPHORA_DRIVER = (CONF.controller_worker.amphora_driver == + 'amphora_haproxy_rest_driver') + + def get_create_amphora_flow(self): + """Creates a flow to create an amphora. + + :returns: The flow for creating the amphora + """ + create_amphora_flow = linear_flow.Flow(constants.CREATE_AMPHORA_FLOW) + create_amphora_flow.add(database_tasks.CreateAmphoraInDB( + provides=constants.AMPHORA_ID)) + create_amphora_flow.add(lifecycle_tasks.AmphoraIDToErrorOnRevertTask( + requires=constants.AMPHORA_ID)) + if self.REST_AMPHORA_DRIVER: + create_amphora_flow.add(cert_task.GenerateServerPEMTask( + provides=constants.SERVER_PEM)) + + create_amphora_flow.add( + database_tasks.UpdateAmphoraDBCertExpiration( + requires=(constants.AMPHORA_ID, constants.SERVER_PEM))) + + create_amphora_flow.add(compute_tasks.CertComputeCreate( + requires=(constants.AMPHORA_ID, constants.SERVER_PEM, + constants.BUILD_TYPE_PRIORITY, constants.FLAVOR), + provides=constants.COMPUTE_ID)) + else: + create_amphora_flow.add(compute_tasks.ComputeCreate( + requires=(constants.AMPHORA_ID, constants.BUILD_TYPE_PRIORITY, + constants.FLAVOR), + provides=constants.COMPUTE_ID)) + create_amphora_flow.add(database_tasks.MarkAmphoraBootingInDB( + requires=(constants.AMPHORA_ID, constants.COMPUTE_ID))) + create_amphora_flow.add(compute_tasks.ComputeActiveWait( + requires=(constants.COMPUTE_ID, constants.AMPHORA_ID), + provides=constants.COMPUTE_OBJ)) + create_amphora_flow.add(database_tasks.UpdateAmphoraInfo( + requires=(constants.AMPHORA_ID, constants.COMPUTE_OBJ), + provides=constants.AMPHORA)) + create_amphora_flow.add( + amphora_driver_tasks.AmphoraComputeConnectivityWait( + requires=constants.AMPHORA)) + create_amphora_flow.add(database_tasks.ReloadAmphora( + requires=constants.AMPHORA_ID, + provides=constants.AMPHORA)) + create_amphora_flow.add(amphora_driver_tasks.AmphoraFinalize( + requires=constants.AMPHORA)) + create_amphora_flow.add(database_tasks.MarkAmphoraReadyInDB( + requires=constants.AMPHORA)) + + return create_amphora_flow + + def _get_post_map_lb_subflow(self, prefix, role): + """Set amphora type after mapped to lb.""" + + sf_name = prefix + '-' + constants.POST_MAP_AMP_TO_LB_SUBFLOW + post_map_amp_to_lb = linear_flow.Flow( + sf_name) + + post_map_amp_to_lb.add(database_tasks.ReloadAmphora( + name=sf_name + '-' + constants.RELOAD_AMPHORA, + requires=constants.AMPHORA_ID, + provides=constants.AMPHORA)) + + post_map_amp_to_lb.add(amphora_driver_tasks.AmphoraConfigUpdate( + name=sf_name + '-' + constants.AMPHORA_CONFIG_UPDATE_TASK, + requires=(constants.AMPHORA, constants.FLAVOR))) + + if role == constants.ROLE_MASTER: + post_map_amp_to_lb.add(database_tasks.MarkAmphoraMasterInDB( + name=sf_name + '-' + constants.MARK_AMP_MASTER_INDB, + requires=constants.AMPHORA)) + elif role == constants.ROLE_BACKUP: + post_map_amp_to_lb.add(database_tasks.MarkAmphoraBackupInDB( + name=sf_name + '-' + constants.MARK_AMP_BACKUP_INDB, + requires=constants.AMPHORA)) + elif role == constants.ROLE_STANDALONE: + post_map_amp_to_lb.add(database_tasks.MarkAmphoraStandAloneInDB( + name=sf_name + '-' + constants.MARK_AMP_STANDALONE_INDB, + requires=constants.AMPHORA)) + + return post_map_amp_to_lb + + def _get_create_amp_for_lb_subflow(self, prefix, role): + """Create a new amphora for lb.""" + + sf_name = prefix + '-' + constants.CREATE_AMP_FOR_LB_SUBFLOW + create_amp_for_lb_subflow = linear_flow.Flow(sf_name) + create_amp_for_lb_subflow.add(database_tasks.CreateAmphoraInDB( + name=sf_name + '-' + constants.CREATE_AMPHORA_INDB, + provides=constants.AMPHORA_ID)) + + require_server_group_id_condition = ( + role in (constants.ROLE_BACKUP, constants.ROLE_MASTER) and + CONF.nova.enable_anti_affinity) + + if self.REST_AMPHORA_DRIVER: + create_amp_for_lb_subflow.add(cert_task.GenerateServerPEMTask( + name=sf_name + '-' + constants.GENERATE_SERVER_PEM, + provides=constants.SERVER_PEM)) + + create_amp_for_lb_subflow.add( + database_tasks.UpdateAmphoraDBCertExpiration( + name=sf_name + '-' + constants.UPDATE_CERT_EXPIRATION, + requires=(constants.AMPHORA_ID, constants.SERVER_PEM))) + + if require_server_group_id_condition: + create_amp_for_lb_subflow.add(compute_tasks.CertComputeCreate( + name=sf_name + '-' + constants.CERT_COMPUTE_CREATE, + requires=( + constants.AMPHORA_ID, + constants.SERVER_PEM, + constants.BUILD_TYPE_PRIORITY, + constants.SERVER_GROUP_ID, + constants.FLAVOR + ), + provides=constants.COMPUTE_ID)) + else: + create_amp_for_lb_subflow.add(compute_tasks.CertComputeCreate( + name=sf_name + '-' + constants.CERT_COMPUTE_CREATE, + requires=( + constants.AMPHORA_ID, + constants.SERVER_PEM, + constants.BUILD_TYPE_PRIORITY, + constants.FLAVOR + ), + provides=constants.COMPUTE_ID)) + else: + if require_server_group_id_condition: + create_amp_for_lb_subflow.add(compute_tasks.ComputeCreate( + name=sf_name + '-' + constants.COMPUTE_CREATE, + requires=( + constants.AMPHORA_ID, + constants.BUILD_TYPE_PRIORITY, + constants.SERVER_GROUP_ID, + constants.FLAVOR + ), + provides=constants.COMPUTE_ID)) + else: + create_amp_for_lb_subflow.add(compute_tasks.ComputeCreate( + name=sf_name + '-' + constants.COMPUTE_CREATE, + requires=( + constants.AMPHORA_ID, + constants.BUILD_TYPE_PRIORITY, + constants.FLAVOR + ), + provides=constants.COMPUTE_ID)) + + create_amp_for_lb_subflow.add(database_tasks.UpdateAmphoraComputeId( + name=sf_name + '-' + constants.UPDATE_AMPHORA_COMPUTEID, + requires=(constants.AMPHORA_ID, constants.COMPUTE_ID))) + create_amp_for_lb_subflow.add(database_tasks.MarkAmphoraBootingInDB( + name=sf_name + '-' + constants.MARK_AMPHORA_BOOTING_INDB, + requires=(constants.AMPHORA_ID, constants.COMPUTE_ID))) + create_amp_for_lb_subflow.add(compute_tasks.ComputeActiveWait( + name=sf_name + '-' + constants.COMPUTE_WAIT, + requires=(constants.COMPUTE_ID, constants.AMPHORA_ID), + provides=constants.COMPUTE_OBJ)) + create_amp_for_lb_subflow.add(database_tasks.UpdateAmphoraInfo( + name=sf_name + '-' + constants.UPDATE_AMPHORA_INFO, + requires=(constants.AMPHORA_ID, constants.COMPUTE_OBJ), + provides=constants.AMPHORA)) + create_amp_for_lb_subflow.add( + amphora_driver_tasks.AmphoraComputeConnectivityWait( + name=sf_name + '-' + constants.AMP_COMPUTE_CONNECTIVITY_WAIT, + requires=constants.AMPHORA)) + create_amp_for_lb_subflow.add(amphora_driver_tasks.AmphoraFinalize( + name=sf_name + '-' + constants.AMPHORA_FINALIZE, + requires=constants.AMPHORA)) + create_amp_for_lb_subflow.add( + database_tasks.MarkAmphoraAllocatedInDB( + name=sf_name + '-' + constants.MARK_AMPHORA_ALLOCATED_INDB, + requires=(constants.AMPHORA, constants.LOADBALANCER_ID))) + create_amp_for_lb_subflow.add(database_tasks.ReloadAmphora( + name=sf_name + '-' + constants.RELOAD_AMPHORA, + requires=constants.AMPHORA_ID, + provides=constants.AMPHORA)) + + if role == constants.ROLE_MASTER: + create_amp_for_lb_subflow.add(database_tasks.MarkAmphoraMasterInDB( + name=sf_name + '-' + constants.MARK_AMP_MASTER_INDB, + requires=constants.AMPHORA)) + elif role == constants.ROLE_BACKUP: + create_amp_for_lb_subflow.add(database_tasks.MarkAmphoraBackupInDB( + name=sf_name + '-' + constants.MARK_AMP_BACKUP_INDB, + requires=constants.AMPHORA)) + elif role == constants.ROLE_STANDALONE: + create_amp_for_lb_subflow.add( + database_tasks.MarkAmphoraStandAloneInDB( + name=sf_name + '-' + constants.MARK_AMP_STANDALONE_INDB, + requires=constants.AMPHORA)) + + return create_amp_for_lb_subflow + + def _allocate_amp_to_lb_decider(self, history): + """decides if the lb shall be mapped to a spare amphora + + :return: True if a spare amphora exists in DB + """ + + return list(history.values())[0] is not None + + def _create_new_amp_for_lb_decider(self, history): + """decides if a new amphora must be created for the lb + + :return: True if there is no spare amphora + """ + + return list(history.values())[0] is None + + def get_amphora_for_lb_subflow( + self, prefix, role=constants.ROLE_STANDALONE): + """Tries to allocate a spare amphora to a loadbalancer if none + + exists, create a new amphora. + """ + + sf_name = prefix + '-' + constants.GET_AMPHORA_FOR_LB_SUBFLOW + + # We need a graph flow here for a conditional flow + amp_for_lb_flow = graph_flow.Flow(sf_name) + + # Setup the task that maps an amphora to a load balancer + allocate_and_associate_amp = database_tasks.MapLoadbalancerToAmphora( + name=sf_name + '-' + constants.MAP_LOADBALANCER_TO_AMPHORA, + requires=(constants.LOADBALANCER_ID, constants.FLAVOR), + provides=constants.AMPHORA_ID) + + # Define a subflow for if we successfully map an amphora + map_lb_to_amp = self._get_post_map_lb_subflow(prefix, role) + # Define a subflow for if we can't map an amphora + create_amp = self._get_create_amp_for_lb_subflow(prefix, role) + + # Add them to the graph flow + amp_for_lb_flow.add(allocate_and_associate_amp, + map_lb_to_amp, create_amp) + + # Setup the decider for the path if we can map an amphora + amp_for_lb_flow.link(allocate_and_associate_amp, map_lb_to_amp, + decider=self._allocate_amp_to_lb_decider, + decider_depth='flow') + # Setup the decider for the path if we can't map an amphora + amp_for_lb_flow.link(allocate_and_associate_amp, create_amp, + decider=self._create_new_amp_for_lb_decider, + decider_depth='flow') + + # Plug the network + # todo(xgerman): Rework failover flow + if prefix != constants.FAILOVER_AMPHORA_FLOW: + sf_name = prefix + '-' + constants.AMP_PLUG_NET_SUBFLOW + amp_for_lb_net_flow = linear_flow.Flow(sf_name) + amp_for_lb_net_flow.add(amp_for_lb_flow) + amp_for_lb_net_flow.add(*self._get_amp_net_subflow(sf_name)) + return amp_for_lb_net_flow + + return amp_for_lb_flow + + def _get_amp_net_subflow(self, sf_name): + flows = [] + flows.append(network_tasks.PlugVIPAmpphora( + name=sf_name + '-' + constants.PLUG_VIP_AMPHORA, + requires=(constants.LOADBALANCER, constants.AMPHORA, + constants.SUBNET), + provides=constants.AMP_DATA)) + + flows.append(network_tasks.ApplyQosAmphora( + name=sf_name + '-' + constants.APPLY_QOS_AMP, + requires=(constants.LOADBALANCER, constants.AMP_DATA, + constants.UPDATE_DICT))) + flows.append(database_tasks.UpdateAmphoraVIPData( + name=sf_name + '-' + constants.UPDATE_AMPHORA_VIP_DATA, + requires=constants.AMP_DATA)) + flows.append(database_tasks.ReloadAmphora( + name=sf_name + '-' + constants.RELOAD_AMP_AFTER_PLUG_VIP, + requires=constants.AMPHORA_ID, + provides=constants.AMPHORA)) + flows.append(database_tasks.ReloadLoadBalancer( + name=sf_name + '-' + constants.RELOAD_LB_AFTER_PLUG_VIP, + requires=constants.LOADBALANCER_ID, + provides=constants.LOADBALANCER)) + flows.append(network_tasks.GetAmphoraNetworkConfigs( + name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG, + requires=(constants.LOADBALANCER, constants.AMPHORA), + provides=constants.AMPHORA_NETWORK_CONFIG)) + flows.append(amphora_driver_tasks.AmphoraPostVIPPlug( + name=sf_name + '-' + constants.AMP_POST_VIP_PLUG, + rebind={constants.AMPHORAE_NETWORK_CONFIG: + constants.AMPHORA_NETWORK_CONFIG}, + requires=(constants.LOADBALANCER, + constants.AMPHORAE_NETWORK_CONFIG))) + return flows + + def get_delete_amphora_flow(self): + """Creates a flow to delete an amphora. + + This should be configurable in the config file + :returns: The flow for deleting the amphora + :raises AmphoraNotFound: The referenced Amphora was not found + """ + + delete_amphora_flow = linear_flow.Flow(constants.DELETE_AMPHORA_FLOW) + delete_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask( + requires=constants.AMPHORA)) + delete_amphora_flow.add(database_tasks. + MarkAmphoraPendingDeleteInDB( + requires=constants.AMPHORA)) + delete_amphora_flow.add(database_tasks. + MarkAmphoraHealthBusy( + requires=constants.AMPHORA)) + delete_amphora_flow.add(compute_tasks.ComputeDelete( + requires=constants.AMPHORA)) + delete_amphora_flow.add(database_tasks. + DisableAmphoraHealthMonitoring( + requires=constants.AMPHORA)) + delete_amphora_flow.add(database_tasks. + MarkAmphoraDeletedInDB( + requires=constants.AMPHORA)) + return delete_amphora_flow + + def get_failover_flow(self, role=constants.ROLE_STANDALONE, + load_balancer=None): + """Creates a flow to failover a stale amphora + + :returns: The flow for amphora failover + """ + + failover_amphora_flow = linear_flow.Flow( + constants.FAILOVER_AMPHORA_FLOW) + + failover_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask( + rebind={constants.AMPHORA: constants.FAILED_AMPHORA}, + requires=constants.AMPHORA)) + + # Note: It seems intuitive to boot an amphora prior to deleting + # the old amphora, however this is a complicated issue. + # If the target host (due to anit-affinity) is resource + # constrained, this will fail where a post-delete will + # succeed. Since this is async with the API it would result + # in the LB ending in ERROR though the amps are still alive. + # Consider in the future making this a complicated + # try-on-failure-retry flow, or move upgrade failovers to be + # synchronous with the API. For now spares pool and act/stdby + # will mitigate most of this delay. + + # Delete the old amphora + failover_amphora_flow.add( + database_tasks.MarkAmphoraPendingDeleteInDB( + rebind={constants.AMPHORA: constants.FAILED_AMPHORA}, + requires=constants.AMPHORA)) + failover_amphora_flow.add( + database_tasks.MarkAmphoraHealthBusy( + rebind={constants.AMPHORA: constants.FAILED_AMPHORA}, + requires=constants.AMPHORA)) + failover_amphora_flow.add(compute_tasks.ComputeDelete( + rebind={constants.AMPHORA: constants.FAILED_AMPHORA}, + requires=constants.AMPHORA)) + failover_amphora_flow.add(network_tasks.WaitForPortDetach( + rebind={constants.AMPHORA: constants.FAILED_AMPHORA}, + requires=constants.AMPHORA)) + failover_amphora_flow.add(database_tasks.MarkAmphoraDeletedInDB( + rebind={constants.AMPHORA: constants.FAILED_AMPHORA}, + requires=constants.AMPHORA)) + + # If this is an unallocated amp (spares pool), we're done + if not load_balancer: + failover_amphora_flow.add( + database_tasks.DisableAmphoraHealthMonitoring( + rebind={constants.AMPHORA: constants.FAILED_AMPHORA}, + requires=constants.AMPHORA)) + return failover_amphora_flow + + # Save failed amphora details for later + failover_amphora_flow.add( + database_tasks.GetAmphoraDetails( + rebind={constants.AMPHORA: constants.FAILED_AMPHORA}, + requires=constants.AMPHORA, + provides=constants.AMP_DATA)) + + # Get a new amphora + # Note: Role doesn't matter here. We will update it later. + get_amp_subflow = self.get_amphora_for_lb_subflow( + prefix=constants.FAILOVER_AMPHORA_FLOW) + failover_amphora_flow.add(get_amp_subflow) + + # Update the new amphora with the failed amphora details + failover_amphora_flow.add(database_tasks.UpdateAmpFailoverDetails( + requires=(constants.AMPHORA, constants.AMP_DATA))) + + # Update the data stored in the flow from the database + failover_amphora_flow.add(database_tasks.ReloadLoadBalancer( + requires=constants.LOADBALANCER_ID, + provides=constants.LOADBALANCER)) + + failover_amphora_flow.add(database_tasks.ReloadAmphora( + requires=constants.AMPHORA_ID, + provides=constants.AMPHORA)) + + # Prepare to reconnect the network interface(s) + failover_amphora_flow.add(network_tasks.GetAmphoraeNetworkConfigs( + requires=constants.LOADBALANCER, + provides=constants.AMPHORAE_NETWORK_CONFIG)) + failover_amphora_flow.add(database_tasks.GetListenersFromLoadbalancer( + requires=constants.LOADBALANCER, provides=constants.LISTENERS)) + failover_amphora_flow.add(database_tasks.GetAmphoraeFromLoadbalancer( + requires=constants.LOADBALANCER, provides=constants.AMPHORAE)) + + # Plug the VIP ports into the new amphora + # The reason for moving these steps here is the udp listeners want to + # do some kernel configuration before Listener update for forbidding + # failure during rebuild amphora. + failover_amphora_flow.add(network_tasks.PlugVIPPort( + requires=(constants.AMPHORA, constants.AMPHORAE_NETWORK_CONFIG))) + failover_amphora_flow.add(amphora_driver_tasks.AmphoraPostVIPPlug( + requires=(constants.AMPHORA, constants.LOADBALANCER, + constants.AMPHORAE_NETWORK_CONFIG))) + + # Listeners update needs to be run on all amphora to update + # their peer configurations. So parallelize this with an + # unordered subflow. + update_amps_subflow = unordered_flow.Flow( + constants.UPDATE_AMPS_SUBFLOW) + + timeout_dict = { + constants.CONN_MAX_RETRIES: + CONF.haproxy_amphora.active_connection_max_retries, + constants.CONN_RETRY_INTERVAL: + CONF.haproxy_amphora.active_connection_rety_interval} + + # Setup parallel flows for each amp. We don't know the new amp + # details at flow creation time, so setup a subflow for each + # amp on the LB, they let the task index into a list of amps + # to find the amphora it should work on. + amp_index = 0 + for amp in load_balancer.amphorae: + if amp.status == constants.DELETED: + continue + update_amps_subflow.add( + amphora_driver_tasks.AmpListenersUpdate( + name=constants.AMP_LISTENER_UPDATE + '-' + str(amp_index), + requires=(constants.LISTENERS, constants.AMPHORAE), + inject={constants.AMPHORA_INDEX: amp_index, + constants.TIMEOUT_DICT: timeout_dict})) + amp_index += 1 + + failover_amphora_flow.add(update_amps_subflow) + + # Plug the member networks into the new amphora + failover_amphora_flow.add(network_tasks.CalculateAmphoraDelta( + requires=(constants.LOADBALANCER, constants.AMPHORA), + provides=constants.DELTA)) + + failover_amphora_flow.add(network_tasks.HandleNetworkDelta( + requires=(constants.AMPHORA, constants.DELTA), + provides=constants.ADDED_PORTS)) + + failover_amphora_flow.add(amphora_driver_tasks.AmphoraePostNetworkPlug( + requires=(constants.LOADBALANCER, constants.ADDED_PORTS))) + + failover_amphora_flow.add(database_tasks.ReloadLoadBalancer( + name='octavia-failover-LB-reload-2', + requires=constants.LOADBALANCER_ID, + provides=constants.LOADBALANCER)) + + # Handle the amphora role and VRRP if necessary + if role == constants.ROLE_MASTER: + failover_amphora_flow.add(database_tasks.MarkAmphoraMasterInDB( + name=constants.MARK_AMP_MASTER_INDB, + requires=constants.AMPHORA)) + vrrp_subflow = self.get_vrrp_subflow(role) + failover_amphora_flow.add(vrrp_subflow) + elif role == constants.ROLE_BACKUP: + failover_amphora_flow.add(database_tasks.MarkAmphoraBackupInDB( + name=constants.MARK_AMP_BACKUP_INDB, + requires=constants.AMPHORA)) + vrrp_subflow = self.get_vrrp_subflow(role) + failover_amphora_flow.add(vrrp_subflow) + elif role == constants.ROLE_STANDALONE: + failover_amphora_flow.add( + database_tasks.MarkAmphoraStandAloneInDB( + name=constants.MARK_AMP_STANDALONE_INDB, + requires=constants.AMPHORA)) + + failover_amphora_flow.add(amphora_driver_tasks.ListenersStart( + requires=(constants.LOADBALANCER, constants.LISTENERS, + constants.AMPHORA))) + failover_amphora_flow.add( + database_tasks.DisableAmphoraHealthMonitoring( + rebind={constants.AMPHORA: constants.FAILED_AMPHORA}, + requires=constants.AMPHORA)) + + return failover_amphora_flow + + def get_vrrp_subflow(self, prefix): + sf_name = prefix + '-' + constants.GET_VRRP_SUBFLOW + vrrp_subflow = linear_flow.Flow(sf_name) + vrrp_subflow.add(network_tasks.GetAmphoraeNetworkConfigs( + name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG, + requires=constants.LOADBALANCER, + provides=constants.AMPHORAE_NETWORK_CONFIG)) + vrrp_subflow.add(amphora_driver_tasks.AmphoraUpdateVRRPInterface( + name=sf_name + '-' + constants.AMP_UPDATE_VRRP_INTF, + requires=constants.LOADBALANCER, + provides=constants.LOADBALANCER)) + vrrp_subflow.add(database_tasks.CreateVRRPGroupForLB( + name=sf_name + '-' + constants.CREATE_VRRP_GROUP_FOR_LB, + requires=constants.LOADBALANCER, + provides=constants.LOADBALANCER)) + vrrp_subflow.add(amphora_driver_tasks.AmphoraVRRPUpdate( + name=sf_name + '-' + constants.AMP_VRRP_UPDATE, + requires=(constants.LOADBALANCER, + constants.AMPHORAE_NETWORK_CONFIG))) + vrrp_subflow.add(amphora_driver_tasks.AmphoraVRRPStart( + name=sf_name + '-' + constants.AMP_VRRP_START, + requires=constants.LOADBALANCER)) + return vrrp_subflow + + def cert_rotate_amphora_flow(self): + """Implement rotation for amphora's cert. + + 1. Create a new certificate + 2. Upload the cert to amphora + 3. update the newly created certificate info to amphora + 4. update the cert_busy flag to be false after rotation + + :returns: The flow for updating an amphora + """ + rotated_amphora_flow = linear_flow.Flow( + constants.CERT_ROTATE_AMPHORA_FLOW) + + rotated_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask( + requires=constants.AMPHORA)) + + # create a new certificate, the returned value is the newly created + # certificate + rotated_amphora_flow.add(cert_task.GenerateServerPEMTask( + provides=constants.SERVER_PEM)) + + # update it in amphora task + rotated_amphora_flow.add(amphora_driver_tasks.AmphoraCertUpload( + requires=(constants.AMPHORA, constants.SERVER_PEM))) + + # update the newly created certificate info to amphora + rotated_amphora_flow.add(database_tasks.UpdateAmphoraDBCertExpiration( + requires=(constants.AMPHORA_ID, constants.SERVER_PEM))) + + # update the cert_busy flag to be false after rotation + rotated_amphora_flow.add(database_tasks.UpdateAmphoraCertBusyToFalse( + requires=constants.AMPHORA)) + + return rotated_amphora_flow + + def update_amphora_config_flow(self): + """Creates a flow to update the amphora agent configuration. + + :returns: The flow for updating an amphora + """ + update_amphora_flow = linear_flow.Flow( + constants.UPDATE_AMPHORA_CONFIG_FLOW) + + update_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask( + requires=constants.AMPHORA)) + + update_amphora_flow.add(amphora_driver_tasks.AmphoraConfigUpdate( + requires=(constants.AMPHORA, constants.FLAVOR))) + + return update_amphora_flow diff --git a/octavia/controller/worker/v2/flows/health_monitor_flows.py b/octavia/controller/worker/v2/flows/health_monitor_flows.py new file mode 100644 index 0000000000..2fc14f1324 --- /dev/null +++ b/octavia/controller/worker/v2/flows/health_monitor_flows.py @@ -0,0 +1,105 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from taskflow.patterns import linear_flow + +from octavia.common import constants +from octavia.controller.worker.v2.tasks import amphora_driver_tasks +from octavia.controller.worker.v2.tasks import database_tasks +from octavia.controller.worker.v2.tasks import lifecycle_tasks +from octavia.controller.worker.v2.tasks import model_tasks + + +class HealthMonitorFlows(object): + + def get_create_health_monitor_flow(self): + """Create a flow to create a health monitor + + :returns: The flow for creating a health monitor + """ + create_hm_flow = linear_flow.Flow(constants.CREATE_HEALTH_MONITOR_FLOW) + create_hm_flow.add(lifecycle_tasks.HealthMonitorToErrorOnRevertTask( + requires=[constants.HEALTH_MON, + constants.LISTENERS, + constants.LOADBALANCER])) + create_hm_flow.add(database_tasks.MarkHealthMonitorPendingCreateInDB( + requires=constants.HEALTH_MON)) + create_hm_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + create_hm_flow.add(database_tasks.MarkHealthMonitorActiveInDB( + requires=constants.HEALTH_MON)) + create_hm_flow.add(database_tasks.MarkPoolActiveInDB( + requires=constants.POOL)) + create_hm_flow.add(database_tasks.MarkLBAndListenersActiveInDB( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + + return create_hm_flow + + def get_delete_health_monitor_flow(self): + """Create a flow to delete a health monitor + + :returns: The flow for deleting a health monitor + """ + delete_hm_flow = linear_flow.Flow(constants.DELETE_HEALTH_MONITOR_FLOW) + delete_hm_flow.add(lifecycle_tasks.HealthMonitorToErrorOnRevertTask( + requires=[constants.HEALTH_MON, + constants.LISTENERS, + constants.LOADBALANCER])) + delete_hm_flow.add(database_tasks.MarkHealthMonitorPendingDeleteInDB( + requires=constants.HEALTH_MON)) + delete_hm_flow.add(model_tasks. + DeleteModelObject(rebind={constants.OBJECT: + constants.HEALTH_MON})) + delete_hm_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + delete_hm_flow.add(database_tasks.DeleteHealthMonitorInDB( + requires=constants.HEALTH_MON)) + delete_hm_flow.add(database_tasks.DecrementHealthMonitorQuota( + requires=constants.HEALTH_MON)) + delete_hm_flow.add( + database_tasks.UpdatePoolMembersOperatingStatusInDB( + requires=constants.POOL, + inject={constants.OPERATING_STATUS: constants.NO_MONITOR})) + delete_hm_flow.add(database_tasks.MarkPoolActiveInDB( + requires=constants.POOL)) + delete_hm_flow.add(database_tasks.MarkLBAndListenersActiveInDB( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + + return delete_hm_flow + + def get_update_health_monitor_flow(self): + """Create a flow to update a health monitor + + :returns: The flow for updating a health monitor + """ + update_hm_flow = linear_flow.Flow(constants.UPDATE_HEALTH_MONITOR_FLOW) + update_hm_flow.add(lifecycle_tasks.HealthMonitorToErrorOnRevertTask( + requires=[constants.HEALTH_MON, + constants.LISTENERS, + constants.LOADBALANCER])) + update_hm_flow.add(database_tasks.MarkHealthMonitorPendingUpdateInDB( + requires=constants.HEALTH_MON)) + update_hm_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + update_hm_flow.add(database_tasks.UpdateHealthMonInDB( + requires=[constants.HEALTH_MON, constants.UPDATE_DICT])) + update_hm_flow.add(database_tasks.MarkHealthMonitorActiveInDB( + requires=constants.HEALTH_MON)) + update_hm_flow.add(database_tasks.MarkPoolActiveInDB( + requires=constants.POOL)) + update_hm_flow.add(database_tasks.MarkLBAndListenersActiveInDB( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + + return update_hm_flow diff --git a/octavia/controller/worker/v2/flows/l7policy_flows.py b/octavia/controller/worker/v2/flows/l7policy_flows.py new file mode 100644 index 0000000000..98e11b2584 --- /dev/null +++ b/octavia/controller/worker/v2/flows/l7policy_flows.py @@ -0,0 +1,92 @@ +# Copyright 2016 Blue Box, an IBM Company +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from taskflow.patterns import linear_flow + +from octavia.common import constants +from octavia.controller.worker.v2.tasks import amphora_driver_tasks +from octavia.controller.worker.v2.tasks import database_tasks +from octavia.controller.worker.v2.tasks import lifecycle_tasks +from octavia.controller.worker.v2.tasks import model_tasks + + +class L7PolicyFlows(object): + + def get_create_l7policy_flow(self): + """Create a flow to create an L7 policy + + :returns: The flow for creating an L7 policy + """ + create_l7policy_flow = linear_flow.Flow(constants.CREATE_L7POLICY_FLOW) + create_l7policy_flow.add(lifecycle_tasks.L7PolicyToErrorOnRevertTask( + requires=[constants.L7POLICY, + constants.LISTENERS, + constants.LOADBALANCER])) + create_l7policy_flow.add(database_tasks.MarkL7PolicyPendingCreateInDB( + requires=constants.L7POLICY)) + create_l7policy_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + create_l7policy_flow.add(database_tasks.MarkL7PolicyActiveInDB( + requires=constants.L7POLICY)) + create_l7policy_flow.add(database_tasks.MarkLBAndListenersActiveInDB( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + + return create_l7policy_flow + + def get_delete_l7policy_flow(self): + """Create a flow to delete an L7 policy + + :returns: The flow for deleting an L7 policy + """ + delete_l7policy_flow = linear_flow.Flow(constants.DELETE_L7POLICY_FLOW) + delete_l7policy_flow.add(lifecycle_tasks.L7PolicyToErrorOnRevertTask( + requires=[constants.L7POLICY, + constants.LISTENERS, + constants.LOADBALANCER])) + delete_l7policy_flow.add(database_tasks.MarkL7PolicyPendingDeleteInDB( + requires=constants.L7POLICY)) + delete_l7policy_flow.add(model_tasks.DeleteModelObject( + rebind={constants.OBJECT: constants.L7POLICY})) + delete_l7policy_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + delete_l7policy_flow.add(database_tasks.DeleteL7PolicyInDB( + requires=constants.L7POLICY)) + delete_l7policy_flow.add(database_tasks.MarkLBAndListenersActiveInDB( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + + return delete_l7policy_flow + + def get_update_l7policy_flow(self): + """Create a flow to update an L7 policy + + :returns: The flow for updating an L7 policy + """ + update_l7policy_flow = linear_flow.Flow(constants.UPDATE_L7POLICY_FLOW) + update_l7policy_flow.add(lifecycle_tasks.L7PolicyToErrorOnRevertTask( + requires=[constants.L7POLICY, + constants.LISTENERS, + constants.LOADBALANCER])) + update_l7policy_flow.add(database_tasks.MarkL7PolicyPendingUpdateInDB( + requires=constants.L7POLICY)) + update_l7policy_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + update_l7policy_flow.add(database_tasks.UpdateL7PolicyInDB( + requires=[constants.L7POLICY, constants.UPDATE_DICT])) + update_l7policy_flow.add(database_tasks.MarkL7PolicyActiveInDB( + requires=constants.L7POLICY)) + update_l7policy_flow.add(database_tasks.MarkLBAndListenersActiveInDB( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + + return update_l7policy_flow diff --git a/octavia/controller/worker/v2/flows/l7rule_flows.py b/octavia/controller/worker/v2/flows/l7rule_flows.py new file mode 100644 index 0000000000..7f828f8f9e --- /dev/null +++ b/octavia/controller/worker/v2/flows/l7rule_flows.py @@ -0,0 +1,98 @@ +# Copyright 2016 Blue Box, an IBM Company +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from taskflow.patterns import linear_flow + +from octavia.common import constants +from octavia.controller.worker.v2.tasks import amphora_driver_tasks +from octavia.controller.worker.v2.tasks import database_tasks +from octavia.controller.worker.v2.tasks import lifecycle_tasks +from octavia.controller.worker.v2.tasks import model_tasks + + +class L7RuleFlows(object): + + def get_create_l7rule_flow(self): + """Create a flow to create an L7 rule + + :returns: The flow for creating an L7 rule + """ + create_l7rule_flow = linear_flow.Flow(constants.CREATE_L7RULE_FLOW) + create_l7rule_flow.add(lifecycle_tasks.L7RuleToErrorOnRevertTask( + requires=[constants.L7RULE, + constants.LISTENERS, + constants.LOADBALANCER])) + create_l7rule_flow.add(database_tasks.MarkL7RulePendingCreateInDB( + requires=constants.L7RULE)) + create_l7rule_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + create_l7rule_flow.add(database_tasks.MarkL7RuleActiveInDB( + requires=constants.L7RULE)) + create_l7rule_flow.add(database_tasks.MarkL7PolicyActiveInDB( + requires=constants.L7POLICY)) + create_l7rule_flow.add(database_tasks.MarkLBAndListenersActiveInDB( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + + return create_l7rule_flow + + def get_delete_l7rule_flow(self): + """Create a flow to delete an L7 rule + + :returns: The flow for deleting an L7 rule + """ + delete_l7rule_flow = linear_flow.Flow(constants.DELETE_L7RULE_FLOW) + delete_l7rule_flow.add(lifecycle_tasks.L7RuleToErrorOnRevertTask( + requires=[constants.L7RULE, + constants.LISTENERS, + constants.LOADBALANCER])) + delete_l7rule_flow.add(database_tasks.MarkL7RulePendingDeleteInDB( + requires=constants.L7RULE)) + delete_l7rule_flow.add(model_tasks.DeleteModelObject( + rebind={constants.OBJECT: constants.L7RULE})) + delete_l7rule_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + delete_l7rule_flow.add(database_tasks.DeleteL7RuleInDB( + requires=constants.L7RULE)) + delete_l7rule_flow.add(database_tasks.MarkL7PolicyActiveInDB( + requires=constants.L7POLICY)) + delete_l7rule_flow.add(database_tasks.MarkLBAndListenersActiveInDB( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + + return delete_l7rule_flow + + def get_update_l7rule_flow(self): + """Create a flow to update an L7 rule + + :returns: The flow for updating an L7 rule + """ + update_l7rule_flow = linear_flow.Flow(constants.UPDATE_L7RULE_FLOW) + update_l7rule_flow.add(lifecycle_tasks.L7RuleToErrorOnRevertTask( + requires=[constants.L7RULE, + constants.LISTENERS, + constants.LOADBALANCER])) + update_l7rule_flow.add(database_tasks.MarkL7RulePendingUpdateInDB( + requires=constants.L7RULE)) + update_l7rule_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + update_l7rule_flow.add(database_tasks.UpdateL7RuleInDB( + requires=[constants.L7RULE, constants.UPDATE_DICT])) + update_l7rule_flow.add(database_tasks.MarkL7RuleActiveInDB( + requires=constants.L7RULE)) + update_l7rule_flow.add(database_tasks.MarkL7PolicyActiveInDB( + requires=constants.L7POLICY)) + update_l7rule_flow.add(database_tasks.MarkLBAndListenersActiveInDB( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + + return update_l7rule_flow diff --git a/octavia/controller/worker/v2/flows/listener_flows.py b/octavia/controller/worker/v2/flows/listener_flows.py new file mode 100644 index 0000000000..d02cd03ac4 --- /dev/null +++ b/octavia/controller/worker/v2/flows/listener_flows.py @@ -0,0 +1,126 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from taskflow.patterns import linear_flow + +from octavia.common import constants +from octavia.controller.worker.v2.tasks import amphora_driver_tasks +from octavia.controller.worker.v2.tasks import database_tasks +from octavia.controller.worker.v2.tasks import lifecycle_tasks +from octavia.controller.worker.v2.tasks import network_tasks + + +class ListenerFlows(object): + + def get_create_listener_flow(self): + """Create a flow to create a listener + + :returns: The flow for creating a listener + """ + create_listener_flow = linear_flow.Flow(constants.CREATE_LISTENER_FLOW) + create_listener_flow.add(lifecycle_tasks.ListenersToErrorOnRevertTask( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + create_listener_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + create_listener_flow.add(network_tasks.UpdateVIP( + requires=constants.LOADBALANCER)) + create_listener_flow.add(database_tasks. + MarkLBAndListenersActiveInDB( + requires=[constants.LOADBALANCER, + constants.LISTENERS])) + return create_listener_flow + + def get_create_all_listeners_flow(self): + """Create a flow to create all listeners + + :returns: The flow for creating all listeners + """ + create_all_listeners_flow = linear_flow.Flow( + constants.CREATE_LISTENERS_FLOW) + create_all_listeners_flow.add( + database_tasks.GetListenersFromLoadbalancer( + requires=constants.LOADBALANCER, + provides=constants.LISTENERS)) + create_all_listeners_flow.add(database_tasks.ReloadLoadBalancer( + requires=constants.LOADBALANCER_ID, + provides=constants.LOADBALANCER)) + create_all_listeners_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + create_all_listeners_flow.add(network_tasks.UpdateVIP( + requires=constants.LOADBALANCER)) + return create_all_listeners_flow + + def get_delete_listener_flow(self): + """Create a flow to delete a listener + + :returns: The flow for deleting a listener + """ + delete_listener_flow = linear_flow.Flow(constants.DELETE_LISTENER_FLOW) + delete_listener_flow.add(lifecycle_tasks.ListenerToErrorOnRevertTask( + requires=constants.LISTENER)) + delete_listener_flow.add(amphora_driver_tasks.ListenerDelete( + requires=[constants.LOADBALANCER, constants.LISTENER])) + delete_listener_flow.add(network_tasks.UpdateVIPForDelete( + requires=constants.LOADBALANCER)) + delete_listener_flow.add(database_tasks.DeleteListenerInDB( + requires=constants.LISTENER)) + delete_listener_flow.add(database_tasks.DecrementListenerQuota( + requires=constants.LISTENER)) + delete_listener_flow.add(database_tasks.MarkLBActiveInDB( + requires=constants.LOADBALANCER)) + + return delete_listener_flow + + def get_delete_listener_internal_flow(self, listener_name): + """Create a flow to delete a listener and l7policies internally + + (will skip deletion on the amp and marking LB active) + + :returns: The flow for deleting a listener + """ + delete_listener_flow = linear_flow.Flow(constants.DELETE_LISTENER_FLOW) + # Should cascade delete all L7 policies + delete_listener_flow.add(network_tasks.UpdateVIPForDelete( + name='delete_update_vip_' + listener_name, + requires=constants.LOADBALANCER)) + delete_listener_flow.add(database_tasks.DeleteListenerInDB( + name='delete_listener_in_db_' + listener_name, + requires=constants.LISTENER, + rebind={constants.LISTENER: listener_name})) + delete_listener_flow.add(database_tasks.DecrementListenerQuota( + name='decrement_listener_quota_' + listener_name, + requires=constants.LISTENER, + rebind={constants.LISTENER: listener_name})) + + return delete_listener_flow + + def get_update_listener_flow(self): + """Create a flow to update a listener + + :returns: The flow for updating a listener + """ + update_listener_flow = linear_flow.Flow(constants.UPDATE_LISTENER_FLOW) + update_listener_flow.add(lifecycle_tasks.ListenersToErrorOnRevertTask( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + update_listener_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + update_listener_flow.add(database_tasks.UpdateListenerInDB( + requires=[constants.LISTENER, constants.UPDATE_DICT])) + update_listener_flow.add(database_tasks. + MarkLBAndListenersActiveInDB( + requires=[constants.LOADBALANCER, + constants.LISTENERS])) + + return update_listener_flow diff --git a/octavia/controller/worker/v2/flows/load_balancer_flows.py b/octavia/controller/worker/v2/flows/load_balancer_flows.py new file mode 100644 index 0000000000..968a719090 --- /dev/null +++ b/octavia/controller/worker/v2/flows/load_balancer_flows.py @@ -0,0 +1,341 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo_config import cfg +from oslo_log import log as logging +from taskflow.patterns import linear_flow +from taskflow.patterns import unordered_flow + +from octavia.common import constants +from octavia.common import exceptions +from octavia.controller.worker.v2.flows import amphora_flows +from octavia.controller.worker.v2.flows import listener_flows +from octavia.controller.worker.v2.flows import member_flows +from octavia.controller.worker.v2.flows import pool_flows +from octavia.controller.worker.v2.tasks import amphora_driver_tasks +from octavia.controller.worker.v2.tasks import compute_tasks +from octavia.controller.worker.v2.tasks import database_tasks +from octavia.controller.worker.v2.tasks import lifecycle_tasks +from octavia.controller.worker.v2.tasks import network_tasks + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class LoadBalancerFlows(object): + + def __init__(self): + self.amp_flows = amphora_flows.AmphoraFlows() + self.listener_flows = listener_flows.ListenerFlows() + self.pool_flows = pool_flows.PoolFlows() + self.member_flows = member_flows.MemberFlows() + + def get_create_load_balancer_flow(self, topology, listeners=None): + """Creates a conditional graph flow that allocates a loadbalancer to + + two spare amphorae. + :raises InvalidTopology: Invalid topology specified + :return: The graph flow for creating a loadbalancer. + """ + f_name = constants.CREATE_LOADBALANCER_FLOW + lb_create_flow = linear_flow.Flow(f_name) + + lb_create_flow.add(lifecycle_tasks.LoadBalancerIDToErrorOnRevertTask( + requires=constants.LOADBALANCER_ID)) + + # allocate VIP + lb_create_flow.add(database_tasks.ReloadLoadBalancer( + name=constants.RELOAD_LB_BEFOR_ALLOCATE_VIP, + requires=constants.LOADBALANCER_ID, + provides=constants.LOADBALANCER + )) + lb_create_flow.add(network_tasks.AllocateVIP( + requires=constants.LOADBALANCER, + provides=constants.VIP)) + lb_create_flow.add(database_tasks.UpdateVIPAfterAllocation( + requires=(constants.LOADBALANCER_ID, constants.VIP), + provides=constants.LOADBALANCER)) + lb_create_flow.add(network_tasks.UpdateVIPSecurityGroup( + requires=constants.LOADBALANCER)) + lb_create_flow.add(network_tasks.GetSubnetFromVIP( + requires=constants.LOADBALANCER, + provides=constants.SUBNET)) + + if topology == constants.TOPOLOGY_ACTIVE_STANDBY: + lb_create_flow.add(*self._create_active_standby_topology()) + elif topology == constants.TOPOLOGY_SINGLE: + lb_create_flow.add(*self._create_single_topology()) + else: + LOG.error("Unknown topology: %s. Unable to build load balancer.", + topology) + raise exceptions.InvalidTopology(topology=topology) + + post_amp_prefix = constants.POST_LB_AMP_ASSOCIATION_SUBFLOW + lb_create_flow.add( + self.get_post_lb_amp_association_flow( + post_amp_prefix, topology, mark_active=(not listeners))) + + if listeners: + lb_create_flow.add(*self._create_listeners_flow()) + + return lb_create_flow + + def _create_single_topology(self): + return (self.amp_flows.get_amphora_for_lb_subflow( + prefix=constants.ROLE_STANDALONE, + role=constants.ROLE_STANDALONE), ) + + def _create_active_standby_topology( + self, lf_name=constants.CREATE_LOADBALANCER_FLOW): + # When we boot up amphora for an active/standby topology, + # we should leverage the Nova anti-affinity capabilities + # to place the amphora on different hosts, also we need to check + # if anti-affinity-flag is enabled or not: + anti_affinity = CONF.nova.enable_anti_affinity + flows = [] + if anti_affinity: + # we need to create a server group first + flows.append( + compute_tasks.NovaServerGroupCreate( + name=lf_name + '-' + + constants.CREATE_SERVER_GROUP_FLOW, + requires=(constants.LOADBALANCER_ID), + provides=constants.SERVER_GROUP_ID)) + + # update server group id in lb table + flows.append( + database_tasks.UpdateLBServerGroupInDB( + name=lf_name + '-' + + constants.UPDATE_LB_SERVERGROUPID_FLOW, + requires=(constants.LOADBALANCER_ID, + constants.SERVER_GROUP_ID))) + + f_name = constants.CREATE_LOADBALANCER_FLOW + amps_flow = unordered_flow.Flow(f_name) + master_amp_sf = self.amp_flows.get_amphora_for_lb_subflow( + prefix=constants.ROLE_MASTER, role=constants.ROLE_MASTER + ) + + backup_amp_sf = self.amp_flows.get_amphora_for_lb_subflow( + prefix=constants.ROLE_BACKUP, role=constants.ROLE_BACKUP) + amps_flow.add(master_amp_sf, backup_amp_sf) + + return flows + [amps_flow] + + def _create_listeners_flow(self): + flows = [] + flows.append( + database_tasks.ReloadLoadBalancer( + name=constants.RELOAD_LB_AFTER_AMP_ASSOC_FULL_GRAPH, + requires=constants.LOADBALANCER_ID, + provides=constants.LOADBALANCER + ) + ) + flows.append( + network_tasks.CalculateDelta( + requires=constants.LOADBALANCER, provides=constants.DELTAS + ) + ) + flows.append( + network_tasks.HandleNetworkDeltas( + requires=constants.DELTAS, provides=constants.ADDED_PORTS + ) + ) + flows.append( + amphora_driver_tasks.AmphoraePostNetworkPlug( + requires=(constants.LOADBALANCER, constants.ADDED_PORTS) + ) + ) + flows.append( + self.listener_flows.get_create_all_listeners_flow() + ) + flows.append( + database_tasks.MarkLBActiveInDB( + mark_subobjects=True, + requires=constants.LOADBALANCER + ) + ) + return flows + + def get_post_lb_amp_association_flow(self, prefix, topology, + mark_active=True): + """Reload the loadbalancer and create networking subflows for + + created/allocated amphorae. + :return: Post amphorae association subflow + """ + + # Note: If any task in this flow failed, the created amphorae will be + # left ''incorrectly'' allocated to the loadbalancer. Likely, + # the get_new_LB_networking_subflow is the most prune to failure + # shall deallocate the amphora from its loadbalancer and put it in a + # READY state. + + sf_name = prefix + '-' + constants.POST_LB_AMP_ASSOCIATION_SUBFLOW + post_create_LB_flow = linear_flow.Flow(sf_name) + post_create_LB_flow.add( + database_tasks.ReloadLoadBalancer( + name=sf_name + '-' + constants.RELOAD_LB_AFTER_AMP_ASSOC, + requires=constants.LOADBALANCER_ID, + provides=constants.LOADBALANCER)) + + if topology == constants.TOPOLOGY_ACTIVE_STANDBY: + vrrp_subflow = self.amp_flows.get_vrrp_subflow(prefix) + post_create_LB_flow.add(vrrp_subflow) + + post_create_LB_flow.add(database_tasks.UpdateLoadbalancerInDB( + requires=[constants.LOADBALANCER, constants.UPDATE_DICT])) + if mark_active: + post_create_LB_flow.add(database_tasks.MarkLBActiveInDB( + name=sf_name + '-' + constants.MARK_LB_ACTIVE_INDB, + requires=constants.LOADBALANCER)) + return post_create_LB_flow + + def _get_delete_listeners_flow(self, lb): + """Sets up an internal delete flow + + Because task flow doesn't support loops we store each listener + we want to delete in the store part and then rebind + :param lb: load balancer + :return: (flow, store) -- flow for the deletion and store with all + the listeners stored properly + """ + listeners_delete_flow = unordered_flow.Flow('listener_delete_flow') + store = {} + for listener in lb.listeners: + listener_name = 'listener_' + listener.id + store[listener_name] = listener + listeners_delete_flow.add( + self.listener_flows.get_delete_listener_internal_flow( + listener_name)) + return (listeners_delete_flow, store) + + def get_delete_load_balancer_flow(self, lb): + """Creates a flow to delete a load balancer. + + :returns: The flow for deleting a load balancer + """ + return self._get_delete_load_balancer_flow(lb, False) + + def _get_delete_pools_flow(self, lb): + """Sets up an internal delete flow + + Because task flow doesn't support loops we store each pool + we want to delete in the store part and then rebind + :param lb: load balancer + :return: (flow, store) -- flow for the deletion and store with all + the listeners stored properly + """ + pools_delete_flow = unordered_flow.Flow('pool_delete_flow') + store = {} + for pool in lb.pools: + pool_name = 'pool' + pool.id + store[pool_name] = pool + pools_delete_flow.add( + self.pool_flows.get_delete_pool_flow_internal( + pool_name)) + return (pools_delete_flow, store) + + def _get_delete_load_balancer_flow(self, lb, cascade): + store = {} + delete_LB_flow = linear_flow.Flow(constants.DELETE_LOADBALANCER_FLOW) + delete_LB_flow.add(lifecycle_tasks.LoadBalancerToErrorOnRevertTask( + requires=constants.LOADBALANCER)) + delete_LB_flow.add(compute_tasks.NovaServerGroupDelete( + requires=constants.SERVER_GROUP_ID)) + delete_LB_flow.add(database_tasks.MarkLBAmphoraeHealthBusy( + requires=constants.LOADBALANCER)) + if cascade: + (listeners_delete, store) = self._get_delete_listeners_flow(lb) + (pools_delete, pool_store) = self._get_delete_pools_flow(lb) + store.update(pool_store) + delete_LB_flow.add(pools_delete) + delete_LB_flow.add(listeners_delete) + delete_LB_flow.add(network_tasks.UnplugVIP( + requires=constants.LOADBALANCER)) + delete_LB_flow.add(network_tasks.DeallocateVIP( + requires=constants.LOADBALANCER)) + delete_LB_flow.add(compute_tasks.DeleteAmphoraeOnLoadBalancer( + requires=constants.LOADBALANCER)) + delete_LB_flow.add(database_tasks.MarkLBAmphoraeDeletedInDB( + requires=constants.LOADBALANCER)) + delete_LB_flow.add(database_tasks.DisableLBAmphoraeHealthMonitoring( + requires=constants.LOADBALANCER)) + delete_LB_flow.add(database_tasks.MarkLBDeletedInDB( + requires=constants.LOADBALANCER)) + delete_LB_flow.add(database_tasks.DecrementLoadBalancerQuota( + requires=constants.LOADBALANCER)) + return (delete_LB_flow, store) + + def get_cascade_delete_load_balancer_flow(self, lb): + """Creates a flow to delete a load balancer. + + :returns: The flow for deleting a load balancer + """ + return self._get_delete_load_balancer_flow(lb, True) + + def get_new_LB_networking_subflow(self): + """Create a sub-flow to setup networking. + + :returns: The flow to setup networking for a new amphora + """ + + new_LB_net_subflow = linear_flow.Flow(constants. + LOADBALANCER_NETWORKING_SUBFLOW) + new_LB_net_subflow.add(network_tasks.AllocateVIP( + requires=constants.LOADBALANCER, + provides=constants.VIP)) + new_LB_net_subflow.add(database_tasks.UpdateVIPAfterAllocation( + requires=(constants.LOADBALANCER_ID, constants.VIP), + provides=constants.LOADBALANCER)) + new_LB_net_subflow.add(network_tasks.PlugVIP( + requires=constants.LOADBALANCER, + provides=constants.AMPS_DATA)) + new_LB_net_subflow.add(network_tasks.ApplyQos( + requires=(constants.LOADBALANCER, constants.AMPS_DATA, + constants.UPDATE_DICT))) + new_LB_net_subflow.add(database_tasks.UpdateAmphoraeVIPData( + requires=constants.AMPS_DATA)) + new_LB_net_subflow.add(database_tasks.ReloadLoadBalancer( + name=constants.RELOAD_LB_AFTER_PLUG_VIP, + requires=constants.LOADBALANCER_ID, + provides=constants.LOADBALANCER)) + new_LB_net_subflow.add(network_tasks.GetAmphoraeNetworkConfigs( + requires=constants.LOADBALANCER, + provides=constants.AMPHORAE_NETWORK_CONFIG)) + new_LB_net_subflow.add(amphora_driver_tasks.AmphoraePostVIPPlug( + requires=(constants.LOADBALANCER, + constants.AMPHORAE_NETWORK_CONFIG))) + + return new_LB_net_subflow + + def get_update_load_balancer_flow(self): + """Creates a flow to update a load balancer. + + :returns: The flow for update a load balancer + """ + update_LB_flow = linear_flow.Flow(constants.UPDATE_LOADBALANCER_FLOW) + update_LB_flow.add(lifecycle_tasks.LoadBalancerToErrorOnRevertTask( + requires=constants.LOADBALANCER)) + update_LB_flow.add(network_tasks.ApplyQos( + requires=(constants.LOADBALANCER, constants.UPDATE_DICT))) + update_LB_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + update_LB_flow.add(database_tasks.UpdateLoadbalancerInDB( + requires=[constants.LOADBALANCER, constants.UPDATE_DICT])) + update_LB_flow.add(database_tasks.MarkLBActiveInDB( + requires=constants.LOADBALANCER)) + + return update_LB_flow diff --git a/octavia/controller/worker/v2/flows/member_flows.py b/octavia/controller/worker/v2/flows/member_flows.py new file mode 100644 index 0000000000..2e6829d3a9 --- /dev/null +++ b/octavia/controller/worker/v2/flows/member_flows.py @@ -0,0 +1,209 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from taskflow.patterns import linear_flow +from taskflow.patterns import unordered_flow + +from octavia.common import constants +from octavia.controller.worker.v2.tasks import amphora_driver_tasks +from octavia.controller.worker.v2.tasks import database_tasks +from octavia.controller.worker.v2.tasks import lifecycle_tasks +from octavia.controller.worker.v2.tasks import model_tasks +from octavia.controller.worker.v2.tasks import network_tasks + + +class MemberFlows(object): + + def get_create_member_flow(self): + """Create a flow to create a member + + :returns: The flow for creating a member + """ + create_member_flow = linear_flow.Flow(constants.CREATE_MEMBER_FLOW) + create_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask( + requires=[constants.MEMBER, + constants.LISTENERS, + constants.LOADBALANCER, + constants.POOL])) + create_member_flow.add(database_tasks.MarkMemberPendingCreateInDB( + requires=constants.MEMBER)) + create_member_flow.add(network_tasks.CalculateDelta( + requires=constants.LOADBALANCER, + provides=constants.DELTAS)) + create_member_flow.add(network_tasks.HandleNetworkDeltas( + requires=constants.DELTAS, provides=constants.ADDED_PORTS)) + create_member_flow.add(amphora_driver_tasks.AmphoraePostNetworkPlug( + requires=(constants.LOADBALANCER, constants.ADDED_PORTS) + )) + create_member_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=(constants.LOADBALANCER, constants.LISTENERS))) + create_member_flow.add(database_tasks.MarkMemberActiveInDB( + requires=constants.MEMBER)) + create_member_flow.add(database_tasks.MarkPoolActiveInDB( + requires=constants.POOL)) + create_member_flow.add(database_tasks. + MarkLBAndListenersActiveInDB( + requires=(constants.LOADBALANCER, + constants.LISTENERS))) + + return create_member_flow + + def get_delete_member_flow(self): + """Create a flow to delete a member + + :returns: The flow for deleting a member + """ + delete_member_flow = linear_flow.Flow(constants.DELETE_MEMBER_FLOW) + delete_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask( + requires=[constants.MEMBER, + constants.LISTENERS, + constants.LOADBALANCER, + constants.POOL])) + delete_member_flow.add(database_tasks.MarkMemberPendingDeleteInDB( + requires=constants.MEMBER)) + delete_member_flow.add(model_tasks. + DeleteModelObject(rebind={constants.OBJECT: + constants.MEMBER})) + delete_member_flow.add(database_tasks.DeleteMemberInDB( + requires=constants.MEMBER)) + delete_member_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + delete_member_flow.add(database_tasks.DecrementMemberQuota( + requires=constants.MEMBER)) + delete_member_flow.add(database_tasks.MarkPoolActiveInDB( + requires=constants.POOL)) + delete_member_flow.add(database_tasks. + MarkLBAndListenersActiveInDB( + requires=[constants.LOADBALANCER, + constants.LISTENERS])) + + return delete_member_flow + + def get_update_member_flow(self): + """Create a flow to update a member + + :returns: The flow for updating a member + """ + update_member_flow = linear_flow.Flow(constants.UPDATE_MEMBER_FLOW) + update_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask( + requires=[constants.MEMBER, + constants.LISTENERS, + constants.LOADBALANCER, + constants.POOL])) + update_member_flow.add(database_tasks.MarkMemberPendingUpdateInDB( + requires=constants.MEMBER)) + update_member_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + update_member_flow.add(database_tasks.UpdateMemberInDB( + requires=[constants.MEMBER, constants.UPDATE_DICT])) + update_member_flow.add(database_tasks.MarkMemberActiveInDB( + requires=constants.MEMBER)) + update_member_flow.add(database_tasks.MarkPoolActiveInDB( + requires=constants.POOL)) + update_member_flow.add(database_tasks. + MarkLBAndListenersActiveInDB( + requires=[constants.LOADBALANCER, + constants.LISTENERS])) + + return update_member_flow + + def get_batch_update_members_flow(self, old_members, new_members, + updated_members): + """Create a flow to batch update members + + :returns: The flow for batch updating members + """ + batch_update_members_flow = linear_flow.Flow( + constants.BATCH_UPDATE_MEMBERS_FLOW) + unordered_members_flow = unordered_flow.Flow( + constants.UNORDERED_MEMBER_UPDATES_FLOW) + unordered_members_active_flow = unordered_flow.Flow( + constants.UNORDERED_MEMBER_ACTIVE_FLOW) + + # Delete old members + unordered_members_flow.add( + lifecycle_tasks.MembersToErrorOnRevertTask( + inject={constants.MEMBERS: old_members}, + name='{flow}-deleted'.format( + flow=constants.MEMBER_TO_ERROR_ON_REVERT_FLOW))) + for m in old_members: + unordered_members_flow.add( + model_tasks.DeleteModelObject( + inject={constants.OBJECT: m}, + name='{flow}-{id}'.format( + id=m.id, flow=constants.DELETE_MODEL_OBJECT_FLOW))) + unordered_members_flow.add(database_tasks.DeleteMemberInDB( + inject={constants.MEMBER: m}, + name='{flow}-{id}'.format( + id=m.id, flow=constants.DELETE_MEMBER_INDB))) + unordered_members_flow.add(database_tasks.DecrementMemberQuota( + inject={constants.MEMBER: m}, + name='{flow}-{id}'.format( + id=m.id, flow=constants.DECREMENT_MEMBER_QUOTA_FLOW))) + + # Create new members + unordered_members_flow.add( + lifecycle_tasks.MembersToErrorOnRevertTask( + inject={constants.MEMBERS: new_members}, + name='{flow}-created'.format( + flow=constants.MEMBER_TO_ERROR_ON_REVERT_FLOW))) + for m in new_members: + unordered_members_active_flow.add( + database_tasks.MarkMemberActiveInDB( + inject={constants.MEMBER: m}, + name='{flow}-{id}'.format( + id=m.id, flow=constants.MARK_MEMBER_ACTIVE_INDB))) + + # Update existing members + unordered_members_flow.add( + lifecycle_tasks.MembersToErrorOnRevertTask( + # updated_members is a list of (obj, dict), only pass `obj` + inject={constants.MEMBERS: [m[0] for m in updated_members]}, + name='{flow}-updated'.format( + flow=constants.MEMBER_TO_ERROR_ON_REVERT_FLOW))) + for m, um in updated_members: + um.pop('id', None) + unordered_members_active_flow.add( + database_tasks.MarkMemberActiveInDB( + inject={constants.MEMBER: m}, + name='{flow}-{id}'.format( + id=m.id, flow=constants.MARK_MEMBER_ACTIVE_INDB))) + + batch_update_members_flow.add(unordered_members_flow) + + # Done, do real updates + batch_update_members_flow.add(network_tasks.CalculateDelta( + requires=constants.LOADBALANCER, + provides=constants.DELTAS)) + batch_update_members_flow.add(network_tasks.HandleNetworkDeltas( + requires=constants.DELTAS, provides=constants.ADDED_PORTS)) + batch_update_members_flow.add( + amphora_driver_tasks.AmphoraePostNetworkPlug( + requires=(constants.LOADBALANCER, constants.ADDED_PORTS))) + + # Update the Listener (this makes the changes active on the Amp) + batch_update_members_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=(constants.LOADBALANCER, constants.LISTENERS))) + + # Mark all the members ACTIVE here, then pool then LB/Listeners + batch_update_members_flow.add(unordered_members_active_flow) + batch_update_members_flow.add(database_tasks.MarkPoolActiveInDB( + requires=constants.POOL)) + batch_update_members_flow.add( + database_tasks.MarkLBAndListenersActiveInDB( + requires=(constants.LOADBALANCER, + constants.LISTENERS))) + + return batch_update_members_flow diff --git a/octavia/controller/worker/v2/flows/pool_flows.py b/octavia/controller/worker/v2/flows/pool_flows.py new file mode 100644 index 0000000000..0cf615be82 --- /dev/null +++ b/octavia/controller/worker/v2/flows/pool_flows.py @@ -0,0 +1,127 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from taskflow.patterns import linear_flow + +from octavia.common import constants +from octavia.controller.worker.v2.tasks import amphora_driver_tasks +from octavia.controller.worker.v2.tasks import database_tasks +from octavia.controller.worker.v2.tasks import lifecycle_tasks +from octavia.controller.worker.v2.tasks import model_tasks + + +class PoolFlows(object): + + def get_create_pool_flow(self): + """Create a flow to create a pool + + :returns: The flow for creating a pool + """ + create_pool_flow = linear_flow.Flow(constants.CREATE_POOL_FLOW) + create_pool_flow.add(lifecycle_tasks.PoolToErrorOnRevertTask( + requires=[constants.POOL, + constants.LISTENERS, + constants.LOADBALANCER])) + create_pool_flow.add(database_tasks.MarkPoolPendingCreateInDB( + requires=constants.POOL)) + create_pool_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + create_pool_flow.add(database_tasks.MarkPoolActiveInDB( + requires=constants.POOL)) + create_pool_flow.add(database_tasks.MarkLBAndListenersActiveInDB( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + + return create_pool_flow + + def get_delete_pool_flow(self): + """Create a flow to delete a pool + + :returns: The flow for deleting a pool + """ + delete_pool_flow = linear_flow.Flow(constants.DELETE_POOL_FLOW) + delete_pool_flow.add(lifecycle_tasks.PoolToErrorOnRevertTask( + requires=[constants.POOL, + constants.LISTENERS, + constants.LOADBALANCER])) + delete_pool_flow.add(database_tasks.MarkPoolPendingDeleteInDB( + requires=constants.POOL)) + delete_pool_flow.add(database_tasks.CountPoolChildrenForQuota( + requires=constants.POOL, provides=constants.POOL_CHILD_COUNT)) + delete_pool_flow.add(model_tasks.DeleteModelObject( + rebind={constants.OBJECT: constants.POOL})) + delete_pool_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + delete_pool_flow.add(database_tasks.DeletePoolInDB( + requires=constants.POOL)) + delete_pool_flow.add(database_tasks.DecrementPoolQuota( + requires=[constants.POOL, constants.POOL_CHILD_COUNT])) + delete_pool_flow.add(database_tasks.MarkLBAndListenersActiveInDB( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + + return delete_pool_flow + + def get_delete_pool_flow_internal(self, name): + """Create a flow to delete a pool, etc. + + :returns: The flow for deleting a pool + """ + delete_pool_flow = linear_flow.Flow(constants.DELETE_POOL_FLOW) + # health monitor should cascade + # members should cascade + delete_pool_flow.add(database_tasks.MarkPoolPendingDeleteInDB( + name='mark_pool_pending_delete_in_db_' + name, + requires=constants.POOL, + rebind={constants.POOL: name})) + delete_pool_flow.add(database_tasks.CountPoolChildrenForQuota( + name='count_pool_children_for_quota_' + name, + requires=constants.POOL, + provides=constants.POOL_CHILD_COUNT, + rebind={constants.POOL: name})) + delete_pool_flow.add(model_tasks.DeleteModelObject( + name='delete_model_object_' + name, + rebind={constants.OBJECT: name})) + delete_pool_flow.add(database_tasks.DeletePoolInDB( + name='delete_pool_in_db_' + name, + requires=constants.POOL, + rebind={constants.POOL: name})) + delete_pool_flow.add(database_tasks.DecrementPoolQuota( + name='decrement_pool_quota_' + name, + requires=[constants.POOL, constants.POOL_CHILD_COUNT], + rebind={constants.POOL: name})) + + return delete_pool_flow + + def get_update_pool_flow(self): + """Create a flow to update a pool + + :returns: The flow for updating a pool + """ + update_pool_flow = linear_flow.Flow(constants.UPDATE_POOL_FLOW) + update_pool_flow.add(lifecycle_tasks.PoolToErrorOnRevertTask( + requires=[constants.POOL, + constants.LISTENERS, + constants.LOADBALANCER])) + update_pool_flow.add(database_tasks.MarkPoolPendingUpdateInDB( + requires=constants.POOL)) + update_pool_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + update_pool_flow.add(database_tasks.UpdatePoolInDB( + requires=[constants.POOL, constants.UPDATE_DICT])) + update_pool_flow.add(database_tasks.MarkPoolActiveInDB( + requires=constants.POOL)) + update_pool_flow.add(database_tasks.MarkLBAndListenersActiveInDB( + requires=[constants.LOADBALANCER, constants.LISTENERS])) + + return update_pool_flow diff --git a/octavia/controller/worker/v2/tasks/__init__.py b/octavia/controller/worker/v2/tasks/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/controller/worker/v2/tasks/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/controller/worker/v2/tasks/amphora_driver_tasks.py b/octavia/controller/worker/v2/tasks/amphora_driver_tasks.py new file mode 100644 index 0000000000..6fa16015c5 --- /dev/null +++ b/octavia/controller/worker/v2/tasks/amphora_driver_tasks.py @@ -0,0 +1,397 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from cryptography import fernet +from oslo_config import cfg +from oslo_log import log as logging +import six +from stevedore import driver as stevedore_driver +from taskflow import task +from taskflow.types import failure + +from octavia.amphorae.backends.agent import agent_jinja_cfg +from octavia.amphorae.driver_exceptions import exceptions as driver_except +from octavia.common import constants +from octavia.common import utils +from octavia.controller.worker import task_utils as task_utilities +from octavia.db import api as db_apis +from octavia.db import repositories as repo + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class BaseAmphoraTask(task.Task): + """Base task to load drivers common to the tasks.""" + + def __init__(self, **kwargs): + super(BaseAmphoraTask, self).__init__(**kwargs) + self.amphora_driver = stevedore_driver.DriverManager( + namespace='octavia.amphora.drivers', + name=CONF.controller_worker.amphora_driver, + invoke_on_load=True + ).driver + self.amphora_repo = repo.AmphoraRepository() + self.listener_repo = repo.ListenerRepository() + self.loadbalancer_repo = repo.LoadBalancerRepository() + self.task_utils = task_utilities.TaskUtils() + + +class AmpListenersUpdate(BaseAmphoraTask): + """Task to update the listeners on one amphora.""" + + def execute(self, listeners, amphora_index, amphorae, timeout_dict=()): + # Note, we don't want this to cause a revert as it may be used + # in a failover flow with both amps failing. Skip it and let + # health manager fix it. + try: + self.amphora_driver.update_amphora_listeners( + listeners, amphora_index, amphorae, timeout_dict) + except Exception as e: + amphora_id = amphorae[amphora_index].id + LOG.error('Failed to update listeners on amphora %s. Skipping ' + 'this amphora as it is failing to update due to: %s', + amphora_id, str(e)) + self.amphora_repo.update(db_apis.get_session(), amphora_id, + status=constants.ERROR) + + +class ListenersUpdate(BaseAmphoraTask): + """Task to update amphora with all specified listeners' configurations.""" + + def execute(self, loadbalancer, listeners): + """Execute updates per listener for an amphora.""" + for listener in listeners: + listener.load_balancer = loadbalancer + self.amphora_driver.update(listener, loadbalancer.vip) + + def revert(self, loadbalancer, *args, **kwargs): + """Handle failed listeners updates.""" + + LOG.warning("Reverting listeners updates.") + + for listener in loadbalancer.listeners: + self.task_utils.mark_listener_prov_status_error(listener.id) + + +class ListenerStop(BaseAmphoraTask): + """Task to stop the listener on the vip.""" + + def execute(self, loadbalancer, listener): + """Execute listener stop routines for an amphora.""" + self.amphora_driver.stop(listener, loadbalancer.vip) + LOG.debug("Stopped the listener on the vip") + + def revert(self, listener, *args, **kwargs): + """Handle a failed listener stop.""" + + LOG.warning("Reverting listener stop.") + + self.task_utils.mark_listener_prov_status_error(listener.id) + + +class ListenerStart(BaseAmphoraTask): + """Task to start the listener on the vip.""" + + def execute(self, loadbalancer, listener): + """Execute listener start routines for an amphora.""" + self.amphora_driver.start(listener, loadbalancer.vip) + LOG.debug("Started the listener on the vip") + + def revert(self, listener, *args, **kwargs): + """Handle a failed listener start.""" + + LOG.warning("Reverting listener start.") + + self.task_utils.mark_listener_prov_status_error(listener.id) + + +class ListenersStart(BaseAmphoraTask): + """Task to start all listeners on the vip.""" + + def execute(self, loadbalancer, listeners, amphora=None): + """Execute listener start routines for listeners on an amphora.""" + for listener in listeners: + self.amphora_driver.start(listener, loadbalancer.vip, amphora) + LOG.debug("Started the listeners on the vip") + + def revert(self, listeners, *args, **kwargs): + """Handle failed listeners starts.""" + + LOG.warning("Reverting listeners starts.") + for listener in listeners: + self.task_utils.mark_listener_prov_status_error(listener.id) + + +class ListenerDelete(BaseAmphoraTask): + """Task to delete the listener on the vip.""" + + def execute(self, loadbalancer, listener): + """Execute listener delete routines for an amphora.""" + self.amphora_driver.delete(listener, loadbalancer.vip) + LOG.debug("Deleted the listener on the vip") + + def revert(self, listener, *args, **kwargs): + """Handle a failed listener delete.""" + + LOG.warning("Reverting listener delete.") + + self.task_utils.mark_listener_prov_status_error(listener.id) + + +class AmphoraGetInfo(BaseAmphoraTask): + """Task to get information on an amphora.""" + + def execute(self, amphora): + """Execute get_info routine for an amphora.""" + self.amphora_driver.get_info(amphora) + + +class AmphoraGetDiagnostics(BaseAmphoraTask): + """Task to get diagnostics on the amphora and the loadbalancers.""" + + def execute(self, amphora): + """Execute get_diagnostic routine for an amphora.""" + self.amphora_driver.get_diagnostics(amphora) + + +class AmphoraFinalize(BaseAmphoraTask): + """Task to finalize the amphora before any listeners are configured.""" + + def execute(self, amphora): + """Execute finalize_amphora routine.""" + self.amphora_driver.finalize_amphora(amphora) + LOG.debug("Finalized the amphora.") + + def revert(self, result, amphora, *args, **kwargs): + """Handle a failed amphora finalize.""" + if isinstance(result, failure.Failure): + return + LOG.warning("Reverting amphora finalize.") + self.task_utils.mark_amphora_status_error(amphora.id) + + +class AmphoraPostNetworkPlug(BaseAmphoraTask): + """Task to notify the amphora post network plug.""" + + def execute(self, amphora, ports): + """Execute post_network_plug routine.""" + for port in ports: + self.amphora_driver.post_network_plug(amphora, port) + LOG.debug("post_network_plug called on compute instance " + "%(compute_id)s for port %(port_id)s", + {"compute_id": amphora.compute_id, "port_id": port.id}) + + def revert(self, result, amphora, *args, **kwargs): + """Handle a failed post network plug.""" + if isinstance(result, failure.Failure): + return + LOG.warning("Reverting post network plug.") + self.task_utils.mark_amphora_status_error(amphora.id) + + +class AmphoraePostNetworkPlug(BaseAmphoraTask): + """Task to notify the amphorae post network plug.""" + + def execute(self, loadbalancer, added_ports): + """Execute post_network_plug routine.""" + amp_post_plug = AmphoraPostNetworkPlug() + for amphora in loadbalancer.amphorae: + if amphora.id in added_ports: + amp_post_plug.execute(amphora, added_ports[amphora.id]) + + def revert(self, result, loadbalancer, added_ports, *args, **kwargs): + """Handle a failed post network plug.""" + if isinstance(result, failure.Failure): + return + LOG.warning("Reverting post network plug.") + for amphora in six.moves.filter( + lambda amp: amp.status == constants.AMPHORA_ALLOCATED, + loadbalancer.amphorae): + + self.task_utils.mark_amphora_status_error(amphora.id) + + +class AmphoraPostVIPPlug(BaseAmphoraTask): + """Task to notify the amphora post VIP plug.""" + + def execute(self, amphora, loadbalancer, amphorae_network_config): + """Execute post_vip_routine.""" + self.amphora_driver.post_vip_plug( + amphora, loadbalancer, amphorae_network_config) + LOG.debug("Notified amphora of vip plug") + + def revert(self, result, amphora, loadbalancer, *args, **kwargs): + """Handle a failed amphora vip plug notification.""" + if isinstance(result, failure.Failure): + return + LOG.warning("Reverting post vip plug.") + self.task_utils.mark_amphora_status_error(amphora.id) + self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id) + + +class AmphoraePostVIPPlug(BaseAmphoraTask): + """Task to notify the amphorae post VIP plug.""" + + def execute(self, loadbalancer, amphorae_network_config): + """Execute post_vip_plug across the amphorae.""" + amp_post_vip_plug = AmphoraPostVIPPlug() + for amphora in loadbalancer.amphorae: + amp_post_vip_plug.execute(amphora, + loadbalancer, + amphorae_network_config) + + def revert(self, result, loadbalancer, *args, **kwargs): + """Handle a failed amphora vip plug notification.""" + if isinstance(result, failure.Failure): + return + LOG.warning("Reverting amphorae post vip plug.") + self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id) + + +class AmphoraCertUpload(BaseAmphoraTask): + """Upload a certificate to the amphora.""" + + def execute(self, amphora, server_pem): + """Execute cert_update_amphora routine.""" + LOG.debug("Upload cert in amphora REST driver") + key = utils.get_six_compatible_server_certs_key_passphrase() + fer = fernet.Fernet(key) + self.amphora_driver.upload_cert_amp(amphora, fer.decrypt(server_pem)) + + +class AmphoraUpdateVRRPInterface(BaseAmphoraTask): + """Task to get and update the VRRP interface device name from amphora.""" + + def execute(self, loadbalancer): + """Execute post_vip_routine.""" + amps = [] + timeout_dict = { + constants.CONN_MAX_RETRIES: + CONF.haproxy_amphora.active_connection_max_retries, + constants.CONN_RETRY_INTERVAL: + CONF.haproxy_amphora.active_connection_rety_interval} + for amp in six.moves.filter( + lambda amp: amp.status == constants.AMPHORA_ALLOCATED, + loadbalancer.amphorae): + + try: + interface = self.amphora_driver.get_vrrp_interface( + amp, timeout_dict=timeout_dict) + except Exception as e: + # This can occur when an active/standby LB has no listener + LOG.error('Failed to get amphora VRRP interface on amphora ' + '%s. Skipping this amphora as it is failing due to: ' + '%s', amp.id, str(e)) + self.amphora_repo.update(db_apis.get_session(), amp.id, + status=constants.ERROR) + continue + + self.amphora_repo.update(db_apis.get_session(), amp.id, + vrrp_interface=interface) + amps.append(self.amphora_repo.get(db_apis.get_session(), + id=amp.id)) + loadbalancer.amphorae = amps + return loadbalancer + + def revert(self, result, loadbalancer, *args, **kwargs): + """Handle a failed amphora vip plug notification.""" + if isinstance(result, failure.Failure): + return + LOG.warning("Reverting Get Amphora VRRP Interface.") + for amp in six.moves.filter( + lambda amp: amp.status == constants.AMPHORA_ALLOCATED, + loadbalancer.amphorae): + + try: + self.amphora_repo.update(db_apis.get_session(), amp.id, + vrrp_interface=None) + except Exception as e: + LOG.error("Failed to update amphora %(amp)s " + "VRRP interface to None due to: %(except)s", + {'amp': amp.id, 'except': e}) + + +class AmphoraVRRPUpdate(BaseAmphoraTask): + """Task to update the VRRP configuration of the loadbalancer amphorae.""" + + def execute(self, loadbalancer, amphorae_network_config): + """Execute update_vrrp_conf.""" + self.amphora_driver.update_vrrp_conf(loadbalancer, + amphorae_network_config) + LOG.debug("Uploaded VRRP configuration of loadbalancer %s amphorae", + loadbalancer.id) + + +class AmphoraVRRPStop(BaseAmphoraTask): + """Task to stop keepalived of all amphorae of a LB.""" + + def execute(self, loadbalancer): + self.amphora_driver.stop_vrrp_service(loadbalancer) + LOG.debug("Stopped VRRP of loadbalancer %s amphorae", + loadbalancer.id) + + +class AmphoraVRRPStart(BaseAmphoraTask): + """Task to start keepalived of all amphorae of a LB.""" + + def execute(self, loadbalancer): + self.amphora_driver.start_vrrp_service(loadbalancer) + LOG.debug("Started VRRP of loadbalancer %s amphorae", + loadbalancer.id) + + +class AmphoraComputeConnectivityWait(BaseAmphoraTask): + """Task to wait for the compute instance to be up.""" + + def execute(self, amphora): + """Execute get_info routine for an amphora until it responds.""" + try: + amp_info = self.amphora_driver.get_info(amphora) + LOG.debug('Successfuly connected to amphora %s: %s', + amphora.id, amp_info) + except driver_except.TimeOutException: + LOG.error("Amphora compute instance failed to become reachable. " + "This either means the compute driver failed to fully " + "boot the instance inside the timeout interval or the " + "instance is not reachable via the lb-mgmt-net.") + self.amphora_repo.update(db_apis.get_session(), amphora.id, + status=constants.ERROR) + raise + + +class AmphoraConfigUpdate(BaseAmphoraTask): + """Task to push a new amphora agent configuration to the amphroa.""" + + def execute(self, amphora, flavor): + # Extract any flavor based settings + if flavor: + topology = flavor.get(constants.LOADBALANCER_TOPOLOGY, + CONF.controller_worker.loadbalancer_topology) + else: + topology = CONF.controller_worker.loadbalancer_topology + + # Build the amphora agent config + agent_cfg_tmpl = agent_jinja_cfg.AgentJinjaTemplater() + agent_config = agent_cfg_tmpl.build_agent_config(amphora.id, topology) + + # Push the new configuration to the amphroa + try: + self.amphora_driver.update_amphora_agent_config(amphora, + agent_config) + except driver_except.AmpDriverNotImplementedError: + LOG.error('Amphora {} does not support agent configuration ' + 'update. Please update the amphora image for this ' + 'amphora. Skipping.'.format(amphora.id)) diff --git a/octavia/controller/worker/v2/tasks/cert_task.py b/octavia/controller/worker/v2/tasks/cert_task.py new file mode 100644 index 0000000000..dffbfbdf39 --- /dev/null +++ b/octavia/controller/worker/v2/tasks/cert_task.py @@ -0,0 +1,51 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from cryptography import fernet +from oslo_config import cfg +from stevedore import driver as stevedore_driver +from taskflow import task + +from octavia.common import utils + +CONF = cfg.CONF + + +class BaseCertTask(task.Task): + """Base task to load drivers common to the tasks.""" + + def __init__(self, **kwargs): + super(BaseCertTask, self).__init__(**kwargs) + self.cert_generator = stevedore_driver.DriverManager( + namespace='octavia.cert_generator', + name=CONF.certificates.cert_generator, + invoke_on_load=True, + ).driver + + +class GenerateServerPEMTask(BaseCertTask): + """Create the server certs for the agent comm + + Use the amphora_id for the CN + """ + + def execute(self, amphora_id): + cert = self.cert_generator.generate_cert_key_pair( + cn=amphora_id, + validity=CONF.certificates.cert_validity_time) + key = utils.get_six_compatible_server_certs_key_passphrase() + fer = fernet.Fernet(key) + + return fer.encrypt(cert.certificate + cert.private_key) diff --git a/octavia/controller/worker/v2/tasks/compute_tasks.py b/octavia/controller/worker/v2/tasks/compute_tasks.py new file mode 100644 index 0000000000..12d5f1135a --- /dev/null +++ b/octavia/controller/worker/v2/tasks/compute_tasks.py @@ -0,0 +1,251 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import time + +from cryptography import fernet +from oslo_config import cfg +from oslo_log import log as logging +from stevedore import driver as stevedore_driver +from taskflow import task +from taskflow.types import failure + +from octavia.amphorae.backends.agent import agent_jinja_cfg +from octavia.common import constants +from octavia.common import exceptions +from octavia.common.jinja import user_data_jinja_cfg +from octavia.common import utils +from octavia.controller.worker import amphora_rate_limit + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class BaseComputeTask(task.Task): + """Base task to load drivers common to the tasks.""" + + def __init__(self, **kwargs): + super(BaseComputeTask, self).__init__(**kwargs) + self.compute = stevedore_driver.DriverManager( + namespace='octavia.compute.drivers', + name=CONF.controller_worker.compute_driver, + invoke_on_load=True + ).driver + self.rate_limit = amphora_rate_limit.AmphoraBuildRateLimit() + + +class ComputeCreate(BaseComputeTask): + """Create the compute instance for a new amphora.""" + + def execute(self, amphora_id, config_drive_files=None, + build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY, + server_group_id=None, ports=None, flavor=None): + """Create an amphora + + :returns: an amphora + """ + ports = ports or [] + network_ids = CONF.controller_worker.amp_boot_network_list[:] + config_drive_files = config_drive_files or {} + user_data = None + LOG.debug("Compute create execute for amphora with id %s", amphora_id) + + user_data_config_drive = CONF.controller_worker.user_data_config_drive + + key_name = CONF.controller_worker.amp_ssh_key_name + # TODO(rm_work): amp_ssh_access_allowed is deprecated in Pike. + # Remove the following two lines in the S release. + ssh_access = CONF.controller_worker.amp_ssh_access_allowed + key_name = None if not ssh_access else key_name + + # Apply an Octavia flavor customizations + if flavor: + topology = flavor.get(constants.LOADBALANCER_TOPOLOGY, + CONF.controller_worker.loadbalancer_topology) + amp_compute_flavor = flavor.get( + constants.COMPUTE_FLAVOR, CONF.controller_worker.amp_flavor_id) + else: + topology = CONF.controller_worker.loadbalancer_topology + amp_compute_flavor = CONF.controller_worker.amp_flavor_id + + try: + if CONF.haproxy_amphora.build_rate_limit != -1: + self.rate_limit.add_to_build_request_queue( + amphora_id, build_type_priority) + + agent_cfg = agent_jinja_cfg.AgentJinjaTemplater() + config_drive_files['/etc/octavia/amphora-agent.conf'] = ( + agent_cfg.build_agent_config(amphora_id, topology)) + if user_data_config_drive: + udtemplater = user_data_jinja_cfg.UserDataJinjaCfg() + user_data = udtemplater.build_user_data_config( + config_drive_files) + config_drive_files = None + + compute_id = self.compute.build( + name="amphora-" + amphora_id, + amphora_flavor=amp_compute_flavor, + image_id=CONF.controller_worker.amp_image_id, + image_tag=CONF.controller_worker.amp_image_tag, + image_owner=CONF.controller_worker.amp_image_owner_id, + key_name=key_name, + sec_groups=CONF.controller_worker.amp_secgroup_list, + network_ids=network_ids, + port_ids=[port.id for port in ports], + config_drive_files=config_drive_files, + user_data=user_data, + server_group_id=server_group_id) + + LOG.debug("Server created with id: %s for amphora id: %s", + compute_id, amphora_id) + return compute_id + + except Exception: + LOG.exception("Compute create for amphora id: %s failed", + amphora_id) + raise + + def revert(self, result, amphora_id, *args, **kwargs): + """This method will revert the creation of the + + amphora. So it will just delete it in this flow + """ + if isinstance(result, failure.Failure): + return + compute_id = result + LOG.warning("Reverting compute create for amphora with id " + "%(amp)s and compute id: %(comp)s", + {'amp': amphora_id, 'comp': compute_id}) + try: + self.compute.delete(compute_id) + except Exception: + LOG.exception("Reverting compute create failed") + + +class CertComputeCreate(ComputeCreate): + def execute(self, amphora_id, server_pem, + build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY, + server_group_id=None, ports=None, flavor=None): + """Create an amphora + + :returns: an amphora + """ + + # load client certificate + with open(CONF.controller_worker.client_ca, 'r') as client_ca: + ca = client_ca.read() + + key = utils.get_six_compatible_server_certs_key_passphrase() + fer = fernet.Fernet(key) + config_drive_files = { + '/etc/octavia/certs/server.pem': fer.decrypt(server_pem), + '/etc/octavia/certs/client_ca.pem': ca} + return super(CertComputeCreate, self).execute( + amphora_id, config_drive_files=config_drive_files, + build_type_priority=build_type_priority, + server_group_id=server_group_id, ports=ports, flavor=flavor) + + +class DeleteAmphoraeOnLoadBalancer(BaseComputeTask): + """Delete the amphorae on a load balancer. + + Iterate through amphorae, deleting them + """ + + def execute(self, loadbalancer): + for amp in loadbalancer.amphorae: + # The compute driver will already handle NotFound + try: + self.compute.delete(amp.compute_id) + except Exception: + LOG.exception("Compute delete for amphora id: %s failed", + amp.id) + raise + + +class ComputeDelete(BaseComputeTask): + def execute(self, amphora): + LOG.debug("Compute Delete execute for amphora with id %s", amphora.id) + + try: + self.compute.delete(amphora.compute_id) + except Exception: + LOG.exception("Compute delete for amphora id: %s failed", + amphora.id) + raise + + +class ComputeActiveWait(BaseComputeTask): + """Wait for the compute driver to mark the amphora active.""" + + def execute(self, compute_id, amphora_id): + """Wait for the compute driver to mark the amphora active + + :raises: Generic exception if the amphora is not active + :returns: An amphora object + """ + for i in range(CONF.controller_worker.amp_active_retries): + amp, fault = self.compute.get_amphora(compute_id) + if amp.status == constants.ACTIVE: + if CONF.haproxy_amphora.build_rate_limit != -1: + self.rate_limit.remove_from_build_req_queue(amphora_id) + return amp + if amp.status == constants.ERROR: + raise exceptions.ComputeBuildException(fault=fault) + time.sleep(CONF.controller_worker.amp_active_wait_sec) + + raise exceptions.ComputeWaitTimeoutException(id=compute_id) + + +class NovaServerGroupCreate(BaseComputeTask): + def execute(self, loadbalancer_id): + """Create a server group by nova client api + + :param loadbalancer_id: will be used for server group's name + :param policy: will used for server group's policy + :raises: Generic exception if the server group is not created + :returns: server group's id + """ + + name = 'octavia-lb-' + loadbalancer_id + server_group = self.compute.create_server_group( + name, CONF.nova.anti_affinity_policy) + LOG.debug("Server Group created with id: %s for load balancer id: " + "%s", server_group.id, loadbalancer_id) + return server_group.id + + def revert(self, result, *args, **kwargs): + """This method will revert the creation of the + + :param result: here it refers to server group id + """ + server_group_id = result + LOG.warning("Reverting server group create with id:%s", + server_group_id) + try: + self.compute.delete_server_group(server_group_id) + except Exception as e: + LOG.error("Failed to delete server group. Resources may " + "still be in use for server group: %(sg)s due to " + "error: %(except)s", + {'sg': server_group_id, 'except': e}) + + +class NovaServerGroupDelete(BaseComputeTask): + def execute(self, server_group_id): + if server_group_id is not None: + self.compute.delete_server_group(server_group_id) + else: + return diff --git a/octavia/controller/worker/v2/tasks/database_tasks.py b/octavia/controller/worker/v2/tasks/database_tasks.py new file mode 100644 index 0000000000..3b50c5ee90 --- /dev/null +++ b/octavia/controller/worker/v2/tasks/database_tasks.py @@ -0,0 +1,2707 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from cryptography import fernet +from oslo_config import cfg +from oslo_db import exception as odb_exceptions +from oslo_log import log as logging +from oslo_utils import excutils +from oslo_utils import uuidutils +import six +import sqlalchemy +from sqlalchemy.orm import exc +from taskflow import task +from taskflow.types import failure + +from octavia.common import constants +from octavia.common import data_models +import octavia.common.tls_utils.cert_parser as cert_parser +from octavia.common import utils +from octavia.common import validate +from octavia.controller.worker import task_utils as task_utilities +from octavia.db import api as db_apis +from octavia.db import repositories as repo + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class BaseDatabaseTask(task.Task): + """Base task to load drivers common to the tasks.""" + + def __init__(self, **kwargs): + self.repos = repo.Repositories() + self.amphora_repo = repo.AmphoraRepository() + self.health_mon_repo = repo.HealthMonitorRepository() + self.listener_repo = repo.ListenerRepository() + self.loadbalancer_repo = repo.LoadBalancerRepository() + self.vip_repo = repo.VipRepository() + self.member_repo = repo.MemberRepository() + self.pool_repo = repo.PoolRepository() + self.amp_health_repo = repo.AmphoraHealthRepository() + self.l7policy_repo = repo.L7PolicyRepository() + self.l7rule_repo = repo.L7RuleRepository() + self.task_utils = task_utilities.TaskUtils() + super(BaseDatabaseTask, self).__init__(**kwargs) + + def _delete_from_amp_health(self, amphora_id): + """Delete the amphora_health record for an amphora. + + :param amphora_id: The amphora id to delete + """ + LOG.debug('Disabling health monitoring on amphora: %s', amphora_id) + try: + self.amp_health_repo.delete(db_apis.get_session(), + amphora_id=amphora_id) + except (sqlalchemy.orm.exc.NoResultFound, + sqlalchemy.orm.exc.UnmappedInstanceError): + LOG.debug('No existing amphora health record to delete ' + 'for amphora: %s, skipping.', amphora_id) + + def _mark_amp_health_busy(self, amphora_id): + """Mark the amphora_health record busy for an amphora. + + :param amphora_id: The amphora id to mark busy + """ + LOG.debug('Marking health monitoring busy on amphora: %s', amphora_id) + try: + self.amp_health_repo.update(db_apis.get_session(), + amphora_id=amphora_id, + busy=True) + except (sqlalchemy.orm.exc.NoResultFound, + sqlalchemy.orm.exc.UnmappedInstanceError): + LOG.debug('No existing amphora health record to mark busy ' + 'for amphora: %s, skipping.', amphora_id) + + +class CreateAmphoraInDB(BaseDatabaseTask): + """Task to create an initial amphora in the Database.""" + + def execute(self, *args, **kwargs): + """Creates an pending create amphora record in the database. + + :returns: The created amphora object + """ + + amphora = self.amphora_repo.create(db_apis.get_session(), + id=uuidutils.generate_uuid(), + status=constants.PENDING_CREATE, + cert_busy=False) + + LOG.info("Created Amphora in DB with id %s", amphora.id) + return amphora.id + + def revert(self, result, *args, **kwargs): + """Revert by storing the amphora in error state in the DB + + In a future version we might change the status to DELETED + if deleting the amphora was successful + + :param result: Id of created amphora. + :returns: None + """ + + if isinstance(result, failure.Failure): + # This task's execute failed, so nothing needed to be done to + # revert + return + + # At this point the revert is being called because another task + # executed after this failed so we will need to do something and + # result is the amphora's id + + LOG.warning("Reverting create amphora in DB for amp id %s ", result) + + # Delete the amphora for now. May want to just update status later + try: + self.amphora_repo.delete(db_apis.get_session(), id=result) + except Exception as e: + LOG.error("Failed to delete amphora %(amp)s " + "in the database due to: " + "%(except)s", {'amp': result, 'except': e}) + + +class MarkLBAmphoraeDeletedInDB(BaseDatabaseTask): + """Task to mark a list of amphora deleted in the Database.""" + + def execute(self, loadbalancer): + """Update load balancer's amphorae statuses to DELETED in the database. + + :param loadbalancer: The load balancer which amphorae should be + marked DELETED. + :returns: None + """ + for amp in loadbalancer.amphorae: + LOG.debug("Marking amphora %s DELETED ", amp.id) + self.amphora_repo.update(db_apis.get_session(), + id=amp.id, status=constants.DELETED) + + +class DeleteHealthMonitorInDB(BaseDatabaseTask): + """Delete the health monitor in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, health_mon): + """Delete the health monitor in DB + + :param health_mon: The health monitor which should be deleted + :returns: None + """ + + LOG.debug("DB delete health monitor: %s ", health_mon.id) + try: + self.health_mon_repo.delete(db_apis.get_session(), + id=health_mon.id) + except exc.NoResultFound: + # ignore if the HealthMonitor was not found + pass + + def revert(self, health_mon, *args, **kwargs): + """Mark the health monitor ERROR since the mark active couldn't happen + + :param health_mon: The health monitor which couldn't be deleted + :returns: None + """ + + LOG.warning("Reverting mark health monitor delete in DB " + "for health monitor with id %s", health_mon.id) + self.health_mon_repo.update(db_apis.get_session(), id=health_mon.id, + provisioning_status=constants.ERROR) + + +class DeleteHealthMonitorInDBByPool(DeleteHealthMonitorInDB): + """Delete the health monitor in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, pool): + """Delete the health monitor in the DB. + + :param pool: A pool which health monitor should be deleted. + :returns: None + """ + super(DeleteHealthMonitorInDBByPool, self).execute( + pool.health_monitor) + + def revert(self, pool, *args, **kwargs): + """Mark the health monitor ERROR since the mark active couldn't happen + + :param pool: A pool which health monitor couldn't be deleted + :returns: None + """ + super(DeleteHealthMonitorInDBByPool, self).revert( + pool.health_monitor, *args, **kwargs) + + +class DeleteMemberInDB(BaseDatabaseTask): + """Delete the member in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, member): + """Delete the member in the DB + + :param member: The member to be deleted + :returns: None + """ + + LOG.debug("DB delete member for id: %s ", member.id) + self.member_repo.delete(db_apis.get_session(), id=member.id) + + def revert(self, member, *args, **kwargs): + """Mark the member ERROR since the delete couldn't happen + + :param member: Member that failed to get deleted + :returns: None + """ + + LOG.warning("Reverting delete in DB for member id %s", member.id) + try: + self.member_repo.update(db_apis.get_session(), member.id, + provisioning_status=constants.ERROR) + except Exception as e: + LOG.error("Failed to update member %(mem)s " + "provisioning_status to ERROR due to: %(except)s", + {'mem': member.id, 'except': e}) + + +class DeleteListenerInDB(BaseDatabaseTask): + """Delete the listener in the DB.""" + + def execute(self, listener): + """Delete the listener in DB + + :param listener: The listener to delete + :returns: None + """ + LOG.debug("Delete in DB for listener id: %s", listener.id) + self.listener_repo.delete(db_apis.get_session(), id=listener.id) + + def revert(self, listener, *args, **kwargs): + """Mark the listener ERROR since the listener didn't delete + + :param listener: Listener that failed to get deleted + :returns: None + """ + + LOG.warning("Reverting mark listener delete in DB for listener id %s", + listener.id) + + +class DeletePoolInDB(BaseDatabaseTask): + """Delete the pool in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, pool): + """Delete the pool in DB + + :param pool: The pool to be deleted + :returns: None + """ + + LOG.debug("Delete in DB for pool id: %s ", pool.id) + self.pool_repo.delete(db_apis.get_session(), id=pool.id) + + def revert(self, pool, *args, **kwargs): + """Mark the pool ERROR since the delete couldn't happen + + :param pool: Pool that failed to get deleted + :returns: None + """ + + LOG.warning("Reverting delete in DB for pool id %s", pool.id) + try: + self.pool_repo.update(db_apis.get_session(), pool.id, + provisioning_status=constants.ERROR) + except Exception as e: + LOG.error("Failed to update pool %(pool)s " + "provisioning_status to ERROR due to: %(except)s", + {'pool': pool.id, 'except': e}) + + +class DeleteL7PolicyInDB(BaseDatabaseTask): + """Delete the L7 policy in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, l7policy): + """Delete the l7policy in DB + + :param l7policy: The l7policy to be deleted + :returns: None + """ + + LOG.debug("Delete in DB for l7policy id: %s ", l7policy.id) + self.l7policy_repo.delete(db_apis.get_session(), id=l7policy.id) + + def revert(self, l7policy, *args, **kwargs): + """Mark the l7policy ERROR since the delete couldn't happen + + :param l7policy: L7 policy that failed to get deleted + :returns: None + """ + + LOG.warning("Reverting delete in DB for l7policy id %s", l7policy.id) + try: + self.l7policy_repo.update(db_apis.get_session(), l7policy.id, + provisioning_status=constants.ERROR) + except Exception as e: + LOG.error("Failed to update l7policy %(l7policy)s " + "provisioning_status to ERROR due to: %(except)s", + {'l7policy': l7policy.id, 'except': e}) + + +class DeleteL7RuleInDB(BaseDatabaseTask): + """Delete the L7 rule in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, l7rule): + """Delete the l7rule in DB + + :param l7rule: The l7rule to be deleted + :returns: None + """ + + LOG.debug("Delete in DB for l7rule id: %s ", l7rule.id) + self.l7rule_repo.delete(db_apis.get_session(), id=l7rule.id) + + def revert(self, l7rule, *args, **kwargs): + """Mark the l7rule ERROR since the delete couldn't happen + + :param l7rule: L7 rule that failed to get deleted + :returns: None + """ + + LOG.warning("Reverting delete in DB for l7rule id %s", l7rule.id) + try: + self.l7rule_repo.update(db_apis.get_session(), l7rule.id, + provisioning_status=constants.ERROR) + except Exception as e: + LOG.error("Failed to update l7rule %(l7rule)s " + "provisioning_status to ERROR due to: %(except)s", + {'l7rule': l7rule.id, 'except': e}) + + +class ReloadAmphora(BaseDatabaseTask): + """Get an amphora object from the database.""" + + def execute(self, amphora_id): + """Get an amphora object from the database. + + :param amphora_id: The amphora ID to lookup + :returns: The amphora object + """ + + LOG.debug("Get amphora from DB for amphora id: %s ", amphora_id) + return self.amphora_repo.get(db_apis.get_session(), id=amphora_id) + + +class ReloadLoadBalancer(BaseDatabaseTask): + """Get an load balancer object from the database.""" + + def execute(self, loadbalancer_id, *args, **kwargs): + """Get an load balancer object from the database. + + :param loadbalancer_id: The load balancer ID to lookup + :returns: The load balancer object + """ + + LOG.debug("Get load balancer from DB for load balancer id: %s ", + loadbalancer_id) + return self.loadbalancer_repo.get(db_apis.get_session(), + id=loadbalancer_id) + + +class UpdateVIPAfterAllocation(BaseDatabaseTask): + """Update a VIP associated with a given load balancer.""" + + def execute(self, loadbalancer_id, vip): + """Update a VIP associated with a given load balancer. + + :param loadbalancer_id: Id of a load balancer which VIP should be + updated. + :param vip: data_models.Vip object with update data. + :returns: The load balancer object. + """ + self.repos.vip.update(db_apis.get_session(), loadbalancer_id, + port_id=vip.port_id, subnet_id=vip.subnet_id, + ip_address=vip.ip_address) + return self.repos.load_balancer.get(db_apis.get_session(), + id=loadbalancer_id) + + +class UpdateAmphoraeVIPData(BaseDatabaseTask): + """Update amphorae VIP data.""" + + def execute(self, amps_data): + """Update amphorae VIP data. + + :param amps_data: Amphorae update dicts. + :returns: None + """ + for amp_data in amps_data: + self.repos.amphora.update(db_apis.get_session(), amp_data.id, + vrrp_ip=amp_data.vrrp_ip, + ha_ip=amp_data.ha_ip, + vrrp_port_id=amp_data.vrrp_port_id, + ha_port_id=amp_data.ha_port_id, + vrrp_id=1) + + +class UpdateAmphoraVIPData(BaseDatabaseTask): + """Update amphorae VIP data.""" + + def execute(self, amp_data): + """Update amphorae VIP data. + + :param amps_data: Amphorae update dicts. + :returns: None + """ + self.repos.amphora.update(db_apis.get_session(), amp_data.id, + vrrp_ip=amp_data.vrrp_ip, + ha_ip=amp_data.ha_ip, + vrrp_port_id=amp_data.vrrp_port_id, + ha_port_id=amp_data.ha_port_id, + vrrp_id=1) + + +class UpdateAmpFailoverDetails(BaseDatabaseTask): + """Update amphora failover details in the database.""" + + def execute(self, amphora, amp_data): + """Update amphora failover details in the database. + + :param amphora: The amphora to update + :param amp_data: data_models.Amphora object with update data + :returns: None + """ + # role and vrrp_priority will be updated later. + self.repos.amphora.update(db_apis.get_session(), amphora.id, + vrrp_ip=amp_data.vrrp_ip, + ha_ip=amp_data.ha_ip, + vrrp_port_id=amp_data.vrrp_port_id, + ha_port_id=amp_data.ha_port_id, + vrrp_id=amp_data.vrrp_id) + + +class AssociateFailoverAmphoraWithLBID(BaseDatabaseTask): + """Associate failover amphora with loadbalancer in the database.""" + + def execute(self, amphora_id, loadbalancer_id): + """Associate failover amphora with loadbalancer in the database. + + :param amphora_id: Id of an amphora to update + :param loadbalancer_id: Id of a load balancer to be associated with + a given amphora. + :returns: None + """ + self.repos.amphora.associate(db_apis.get_session(), + load_balancer_id=loadbalancer_id, + amphora_id=amphora_id) + + def revert(self, amphora_id, *args, **kwargs): + """Remove amphora-load balancer association. + + :param amphora_id: Id of an amphora that couldn't be associated + with a load balancer. + :returns: None + """ + try: + self.repos.amphora.update(db_apis.get_session(), amphora_id, + loadbalancer_id=None) + except Exception as e: + LOG.error("Failed to update amphora %(amp)s " + "load balancer id to None due to: " + "%(except)s", {'amp': amphora_id, 'except': e}) + + +class MapLoadbalancerToAmphora(BaseDatabaseTask): + """Maps and assigns a load balancer to an amphora in the database.""" + + def execute(self, loadbalancer_id, server_group_id=None, flavor=None): + """Allocates an Amphora for the load balancer in the database. + + :param loadbalancer_id: The load balancer id to map to an amphora + :returns: Amphora ID if one was allocated, None if it was + unable to allocate an Amphora + """ + + LOG.debug("Allocating an Amphora for load balancer with id %s", + loadbalancer_id) + + if server_group_id is not None: + LOG.debug("Load balancer is using anti-affinity. Skipping spares " + "pool allocation.") + return None + + # Validate the flavor is spares compatible + if not validate.is_flavor_spares_compatible(flavor): + LOG.debug("Load balancer has a flavor that is not compatible with " + "using spares pool amphora. Skipping spares pool " + "allocation.") + return None + + amp = self.amphora_repo.allocate_and_associate( + db_apis.get_session(), + loadbalancer_id) + if amp is None: + LOG.debug("No Amphora available for load balancer with id %s", + loadbalancer_id) + return None + + LOG.debug("Allocated Amphora with id %(amp)s for load balancer " + "with id %(lb)s", {'amp': amp.id, 'lb': loadbalancer_id}) + + return amp.id + + def revert(self, result, loadbalancer_id, *args, **kwargs): + LOG.warning("Reverting Amphora allocation for the load " + "balancer %s in the database.", loadbalancer_id) + self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer_id) + + +class _MarkAmphoraRoleAndPriorityInDB(BaseDatabaseTask): + """Alter the amphora role and priority in DB.""" + + def _execute(self, amphora, amp_role, vrrp_priority): + """Alter the amphora role and priority in DB. + + :param amphora: Amphora to update. + :param amp_role: Amphora role to be set. + :param vrrp_priority: VRRP priority to set. + :returns: None + """ + LOG.debug("Mark %(role)s in DB for amphora: %(amp)s", + {'role': amp_role, 'amp': amphora.id}) + self.amphora_repo.update(db_apis.get_session(), amphora.id, + role=amp_role, + vrrp_priority=vrrp_priority) + + def _revert(self, result, amphora, *args, **kwargs): + """Removes role and vrrp_priority association. + + :param result: Result of the association. + :param amphora: Amphora which role/vrrp_priority association + failed. + :returns: None + """ + + if isinstance(result, failure.Failure): + return + + LOG.warning("Reverting amphora role in DB for amp id %(amp)s", + {'amp': amphora.id}) + try: + self.amphora_repo.update(db_apis.get_session(), amphora.id, + role=None, + vrrp_priority=None) + except Exception as e: + LOG.error("Failed to update amphora %(amp)s " + "role and vrrp_priority to None due to: " + "%(except)s", {'amp': amphora.id, 'except': e}) + + +class MarkAmphoraMasterInDB(_MarkAmphoraRoleAndPriorityInDB): + """Alter the amphora role to: MASTER.""" + + def execute(self, amphora): + """Mark amphora as MASTER in db. + + :param amphora: Amphora to update role. + :returns: None + """ + amp_role = constants.ROLE_MASTER + self._execute(amphora, amp_role, constants.ROLE_MASTER_PRIORITY) + + def revert(self, result, amphora, *args, **kwargs): + """Removes amphora role association. + + :param amphora: Amphora to update role. + :returns: None + """ + self._revert(result, amphora, *args, **kwargs) + + +class MarkAmphoraBackupInDB(_MarkAmphoraRoleAndPriorityInDB): + """Alter the amphora role to: Backup.""" + + def execute(self, amphora): + """Mark amphora as BACKUP in db. + + :param amphora: Amphora to update role. + :returns: None + """ + amp_role = constants.ROLE_BACKUP + self._execute(amphora, amp_role, constants.ROLE_BACKUP_PRIORITY) + + def revert(self, result, amphora, *args, **kwargs): + """Removes amphora role association. + + :param amphora: Amphora to update role. + :returns: None + """ + self._revert(result, amphora, *args, **kwargs) + + +class MarkAmphoraStandAloneInDB(_MarkAmphoraRoleAndPriorityInDB): + """Alter the amphora role to: Standalone.""" + + def execute(self, amphora): + """Mark amphora as STANDALONE in db. + + :param amphora: Amphora to update role. + :returns: None + """ + amp_role = constants.ROLE_STANDALONE + self._execute(amphora, amp_role, None) + + def revert(self, result, amphora, *args, **kwargs): + """Removes amphora role association. + + :param amphora: Amphora to update role. + :returns: None + """ + self._revert(result, amphora, *args, **kwargs) + + +class MarkAmphoraAllocatedInDB(BaseDatabaseTask): + """Will mark an amphora as allocated to a load balancer in the database. + + Assume sqlalchemy made sure the DB got + retried sufficiently - so just abort + """ + + def execute(self, amphora, loadbalancer_id): + """Mark amphora as allocated to a load balancer in DB. + + :param amphora: Amphora to be updated. + :param loadbalancer_id: Id of a load balancer to which an amphora + should be allocated. + :returns: None + """ + + LOG.info('Mark ALLOCATED in DB for amphora: %(amp)s with ' + 'compute id %(comp)s for load balancer: %(lb)s', + { + 'amp': amphora.id, + 'comp': amphora.compute_id, + 'lb': loadbalancer_id + }) + self.amphora_repo.update(db_apis.get_session(), amphora.id, + status=constants.AMPHORA_ALLOCATED, + compute_id=amphora.compute_id, + lb_network_ip=amphora.lb_network_ip, + load_balancer_id=loadbalancer_id) + + def revert(self, result, amphora, loadbalancer_id, *args, **kwargs): + """Mark the amphora as broken and ready to be cleaned up. + + :param result: Execute task result + :param amphora: Amphora that was updated. + :param loadbalancer_id: Id of a load balancer to which an amphora + failed to be allocated. + :returns: None + """ + + if isinstance(result, failure.Failure): + return + + LOG.warning("Reverting mark amphora ready in DB for amp " + "id %(amp)s and compute id %(comp)s", + {'amp': amphora.id, 'comp': amphora.compute_id}) + self.task_utils.mark_amphora_status_error(amphora.id) + + +class MarkAmphoraBootingInDB(BaseDatabaseTask): + """Mark the amphora as booting in the database.""" + + def execute(self, amphora_id, compute_id): + """Mark amphora booting in DB. + + :param amphora_id: Id of the amphora to update + :param compute_id: Id of a compute on which an amphora resides + :returns: None + """ + + LOG.debug("Mark BOOTING in DB for amphora: %(amp)s with " + "compute id %(id)s", {'amp': amphora_id, 'id': compute_id}) + self.amphora_repo.update(db_apis.get_session(), amphora_id, + status=constants.AMPHORA_BOOTING, + compute_id=compute_id) + + def revert(self, result, amphora_id, compute_id, *args, **kwargs): + """Mark the amphora as broken and ready to be cleaned up. + + :param result: Execute task result + :param amphora_id: Id of the amphora that failed to update + :param compute_id: Id of a compute on which an amphora resides + :returns: None + """ + + if isinstance(result, failure.Failure): + return + + LOG.warning("Reverting mark amphora booting in DB for amp " + "id %(amp)s and compute id %(comp)s", + {'amp': amphora_id, 'comp': compute_id}) + try: + self.amphora_repo.update(db_apis.get_session(), amphora_id, + status=constants.ERROR, + compute_id=compute_id) + except Exception as e: + LOG.error("Failed to update amphora %(amp)s " + "status to ERROR due to: " + "%(except)s", {'amp': amphora_id, 'except': e}) + + +class MarkAmphoraDeletedInDB(BaseDatabaseTask): + """Mark the amphora deleted in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, amphora): + """Mark the amphora as deleted in DB. + + :param amphora: Amphora to be updated. + :returns: None + """ + + LOG.debug("Mark DELETED in DB for amphora: %(amp)s with " + "compute id %(comp)s", + {'amp': amphora.id, 'comp': amphora.compute_id}) + self.amphora_repo.update(db_apis.get_session(), amphora.id, + status=constants.DELETED) + + def revert(self, amphora, *args, **kwargs): + """Mark the amphora as broken and ready to be cleaned up. + + :param amphora: Amphora that was updated. + :returns: None + """ + + LOG.warning("Reverting mark amphora deleted in DB " + "for amp id %(amp)s and compute id %(comp)s", + {'amp': amphora.id, 'comp': amphora.compute_id}) + self.task_utils.mark_amphora_status_error(amphora.id) + + +class MarkAmphoraPendingDeleteInDB(BaseDatabaseTask): + """Mark the amphora pending delete in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, amphora): + """Mark the amphora as pending delete in DB. + + :param amphora: Amphora to be updated. + :returns: None + """ + + LOG.debug("Mark PENDING DELETE in DB for amphora: %(amp)s " + "with compute id %(id)s", + {'amp': amphora.id, 'id': amphora.compute_id}) + self.amphora_repo.update(db_apis.get_session(), amphora.id, + status=constants.PENDING_DELETE) + + def revert(self, amphora, *args, **kwargs): + """Mark the amphora as broken and ready to be cleaned up. + + :param amphora: Amphora that was updated. + :returns: None + """ + + LOG.warning("Reverting mark amphora pending delete in DB " + "for amp id %(amp)s and compute id %(comp)s", + {'amp': amphora.id, 'comp': amphora.compute_id}) + self.task_utils.mark_amphora_status_error(amphora.id) + + +class MarkAmphoraPendingUpdateInDB(BaseDatabaseTask): + """Mark the amphora pending update in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, amphora): + """Mark the amphora as pending update in DB. + + :param amphora: Amphora to be updated. + :returns: None + """ + + LOG.debug("Mark PENDING UPDATE in DB for amphora: %(amp)s " + "with compute id %(id)s", + {'amp': amphora.id, 'id': amphora.compute_id}) + self.amphora_repo.update(db_apis.get_session(), amphora.id, + status=constants.PENDING_UPDATE) + + def revert(self, amphora, *args, **kwargs): + """Mark the amphora as broken and ready to be cleaned up. + + :param amphora: Amphora that was updated. + :returns: None + """ + + LOG.warning("Reverting mark amphora pending update in DB " + "for amp id %(amp)s and compute id %(comp)s", + {'amp': amphora.id, 'comp': amphora.compute_id}) + self.task_utils.mark_amphora_status_error(amphora.id) + + +class MarkAmphoraReadyInDB(BaseDatabaseTask): + """This task will mark an amphora as ready in the database. + + Assume sqlalchemy made sure the DB got + retried sufficiently - so just abort + """ + + def execute(self, amphora): + """Mark amphora as ready in DB. + + :param amphora: Amphora to be updated. + :returns: None + """ + + LOG.info("Mark READY in DB for amphora: %(amp)s with compute " + "id %(comp)s", + {"amp": amphora.id, "comp": amphora.compute_id}) + self.amphora_repo.update(db_apis.get_session(), amphora.id, + status=constants.AMPHORA_READY, + compute_id=amphora.compute_id, + lb_network_ip=amphora.lb_network_ip) + + def revert(self, amphora, *args, **kwargs): + """Mark the amphora as broken and ready to be cleaned up. + + :param amphora: Amphora that was updated. + :returns: None + """ + + LOG.warning("Reverting mark amphora ready in DB for amp " + "id %(amp)s and compute id %(comp)s", + {'amp': amphora.id, 'comp': amphora.compute_id}) + try: + self.amphora_repo.update(db_apis.get_session(), amphora.id, + status=constants.ERROR, + compute_id=amphora.compute_id, + lb_network_ip=amphora.lb_network_ip) + except Exception as e: + LOG.error("Failed to update amphora %(amp)s " + "status to ERROR due to: " + "%(except)s", {'amp': amphora.id, 'except': e}) + + +class UpdateAmphoraComputeId(BaseDatabaseTask): + """Associate amphora with a compute in DB.""" + + def execute(self, amphora_id, compute_id): + """Associate amphora with a compute in DB. + + :param amphora_id: Id of the amphora to update + :param compute_id: Id of a compute on which an amphora resides + :returns: None + """ + + self.amphora_repo.update(db_apis.get_session(), amphora_id, + compute_id=compute_id) + + +class UpdateAmphoraInfo(BaseDatabaseTask): + """Update amphora with compute instance details.""" + + def execute(self, amphora_id, compute_obj): + """Update amphora with compute instance details. + + :param amphora_id: Id of the amphora to update + :param compute_obj: Compute on which an amphora resides + :returns: Updated amphora object + """ + self.amphora_repo.update( + db_apis.get_session(), amphora_id, + lb_network_ip=compute_obj.lb_network_ip, + cached_zone=compute_obj.cached_zone, + image_id=compute_obj.image_id, + compute_flavor=compute_obj.compute_flavor) + return self.amphora_repo.get(db_apis.get_session(), id=amphora_id) + + +class UpdateAmphoraDBCertExpiration(BaseDatabaseTask): + """Update the amphora expiration date with new cert file date.""" + + def execute(self, amphora_id, server_pem): + """Update the amphora expiration date with new cert file date. + + :param amphora_id: Id of the amphora to update + :param server_pem: Certificate in PEM format + :returns: None + """ + + LOG.debug("Update DB cert expiry date of amphora id: %s", amphora_id) + + key = utils.get_six_compatible_server_certs_key_passphrase() + fer = fernet.Fernet(key) + cert_expiration = cert_parser.get_cert_expiration( + fer.decrypt(server_pem)) + LOG.debug("Certificate expiration date is %s ", cert_expiration) + self.amphora_repo.update(db_apis.get_session(), amphora_id, + cert_expiration=cert_expiration) + + +class UpdateAmphoraCertBusyToFalse(BaseDatabaseTask): + """Update the amphora cert_busy flag to be false.""" + + def execute(self, amphora): + """Update the amphora cert_busy flag to be false. + + :param amphora: Amphora to be updated. + :returns: None + """ + + LOG.debug("Update cert_busy flag of amphora id %s to False", + amphora.id) + self.amphora_repo.update(db_apis.get_session(), amphora.id, + cert_busy=False) + + +class MarkLBActiveInDB(BaseDatabaseTask): + """Mark the load balancer active in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def __init__(self, mark_subobjects=False, **kwargs): + super(MarkLBActiveInDB, self).__init__(**kwargs) + self.mark_subobjects = mark_subobjects + + def execute(self, loadbalancer): + """Mark the load balancer as active in DB. + + This also marks ACTIVE all sub-objects of the load balancer if + self.mark_subobjects is True. + + :param loadbalancer: Load balancer object to be updated + :returns: None + """ + + if self.mark_subobjects: + LOG.debug("Marking all listeners of loadbalancer %s ACTIVE", + loadbalancer.id) + for listener in loadbalancer.listeners: + self._mark_listener_status(listener, constants.ACTIVE) + + LOG.info("Mark ACTIVE in DB for load balancer id: %s", + loadbalancer.id) + self.loadbalancer_repo.update(db_apis.get_session(), + loadbalancer.id, + provisioning_status=constants.ACTIVE) + + def _mark_listener_status(self, listener, status): + self.listener_repo.update(db_apis.get_session(), + listener.id, + provisioning_status=status) + LOG.debug("Marking all l7policies of listener %s %s", + listener.id, status) + for l7policy in listener.l7policies: + self._mark_l7policy_status(l7policy, status) + + if listener.default_pool: + LOG.debug("Marking default pool of listener %s %s", + listener.id, status) + self._mark_pool_status(listener.default_pool, status) + + def _mark_l7policy_status(self, l7policy, status): + self.l7policy_repo.update( + db_apis.get_session(), l7policy.id, + provisioning_status=status) + + LOG.debug("Marking all l7rules of l7policy %s %s", + l7policy.id, status) + for l7rule in l7policy.l7rules: + self._mark_l7rule_status(l7rule, status) + + if l7policy.redirect_pool: + LOG.debug("Marking redirect pool of l7policy %s %s", + l7policy.id, status) + self._mark_pool_status(l7policy.redirect_pool, status) + + def _mark_l7rule_status(self, l7rule, status): + self.l7rule_repo.update( + db_apis.get_session(), l7rule.id, + provisioning_status=status) + + def _mark_pool_status(self, pool, status): + self.pool_repo.update( + db_apis.get_session(), pool.id, + provisioning_status=status) + if pool.health_monitor: + LOG.debug("Marking health monitor of pool %s %s", pool.id, status) + self._mark_hm_status(pool.health_monitor, status) + + LOG.debug("Marking all members of pool %s %s", pool.id, status) + for member in pool.members: + self._mark_member_status(member, status) + + def _mark_hm_status(self, hm, status): + self.health_mon_repo.update( + db_apis.get_session(), hm.id, + provisioning_status=status) + + def _mark_member_status(self, member, status): + self.member_repo.update( + db_apis.get_session(), member.id, + provisioning_status=status) + + def revert(self, loadbalancer, *args, **kwargs): + """Mark the load balancer as broken and ready to be cleaned up. + + This also puts all sub-objects of the load balancer to ERROR state if + self.mark_subobjects is True + + :param loadbalancer: Load balancer object that failed to update + :returns: None + """ + + if self.mark_subobjects: + LOG.debug("Marking all listeners of loadbalancer %s ERROR", + loadbalancer.id) + for listener in loadbalancer.listeners: + try: + self._mark_listener_status(listener, constants.ERROR) + except Exception: + LOG.warning("Error updating listener %s provisioning " + "status", listener.id) + + LOG.warning("Reverting mark load balancer deleted in DB " + "for load balancer id %s", loadbalancer.id) + self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id) + + +class UpdateLBServerGroupInDB(BaseDatabaseTask): + """Update the server group id info for load balancer in DB.""" + + def execute(self, loadbalancer_id, server_group_id): + """Update the server group id info for load balancer in DB. + + :param loadbalancer_id: Id of a load balancer to update + :param server_group_id: Id of a server group to associate with + the load balancer + :returns: None + """ + + LOG.debug("Server Group updated with id: %s for load balancer id: %s:", + server_group_id, loadbalancer_id) + self.loadbalancer_repo.update(db_apis.get_session(), + id=loadbalancer_id, + server_group_id=server_group_id) + + def revert(self, loadbalancer_id, server_group_id, *args, **kwargs): + """Remove server group information from a load balancer in DB. + + :param loadbalancer_id: Id of a load balancer that failed to update + :param server_group_id: Id of a server group that couldn't be + associated with the load balancer + :returns: None + """ + LOG.warning('Reverting Server Group updated with id: %(s1)s for ' + 'load balancer id: %(s2)s ', + {'s1': server_group_id, 's2': loadbalancer_id}) + try: + self.loadbalancer_repo.update(db_apis.get_session(), + id=loadbalancer_id, + server_group_id=None) + except Exception as e: + LOG.error("Failed to update load balancer %(lb)s " + "server_group_id to None due to: " + "%(except)s", {'lb': loadbalancer_id, 'except': e}) + + +class MarkLBDeletedInDB(BaseDatabaseTask): + """Mark the load balancer deleted in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, loadbalancer): + """Mark the load balancer as deleted in DB. + + :param loadbalancer: Load balancer object to be updated + :returns: None + """ + + LOG.debug("Mark DELETED in DB for load balancer id: %s", + loadbalancer.id) + self.loadbalancer_repo.update(db_apis.get_session(), + loadbalancer.id, + provisioning_status=constants.DELETED) + + def revert(self, loadbalancer, *args, **kwargs): + """Mark the load balancer as broken and ready to be cleaned up. + + :param loadbalancer: Load balancer object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark load balancer deleted in DB " + "for load balancer id %s", loadbalancer.id) + self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id) + + +class MarkLBPendingDeleteInDB(BaseDatabaseTask): + """Mark the load balancer pending delete in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, loadbalancer): + """Mark the load balancer as pending delete in DB. + + :param loadbalancer: Load balancer object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING DELETE in DB for load balancer id: %s", + loadbalancer.id) + self.loadbalancer_repo.update(db_apis.get_session(), + loadbalancer.id, + provisioning_status=(constants. + PENDING_DELETE)) + + def revert(self, loadbalancer, *args, **kwargs): + """Mark the load balancer as broken and ready to be cleaned up. + + :param loadbalancer: Load balancer object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark load balancer pending delete in DB " + "for load balancer id %s", loadbalancer.id) + self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id) + + +class MarkLBAndListenersActiveInDB(BaseDatabaseTask): + """Mark the load balancer and specified listeners active in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, loadbalancer, listeners): + """Mark the load balancer and listeners as active in DB. + + :param loadbalancer: Load balancer object to be updated + :param listeners: Listener objects to be updated + :returns: None + """ + + LOG.debug("Mark ACTIVE in DB for load balancer id: %s " + "and listener ids: %s", loadbalancer.id, + ', '.join([l.id for l in listeners])) + self.loadbalancer_repo.update(db_apis.get_session(), + loadbalancer.id, + provisioning_status=constants.ACTIVE) + for listener in listeners: + self.listener_repo.update(db_apis.get_session(), listener.id, + provisioning_status=constants.ACTIVE) + + def revert(self, loadbalancer, listeners, *args, **kwargs): + """Mark the load balancer and listeners as broken. + + :param loadbalancer: Load balancer object that failed to update + :param listeners: Listener objects that failed to update + :returns: None + """ + + LOG.warning("Reverting mark load balancer and listeners active in DB " + "for load balancer id %(LB)s and listener ids: %(list)s", + {'LB': loadbalancer.id, + 'list': ', '.join([l.id for l in listeners])}) + self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id) + for listener in listeners: + self.task_utils.mark_listener_prov_status_error(listener.id) + + +class MarkListenerActiveInDB(BaseDatabaseTask): + """Mark the listener active in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, listener): + """Mark the listener as active in DB + + :param listener: The listener to be marked active + :returns: None + """ + + LOG.debug("Mark ACTIVE in DB for listener id: %s ", listener.id) + self.listener_repo.update(db_apis.get_session(), listener.id, + provisioning_status=constants.ACTIVE) + + def revert(self, listener, *args, **kwargs): + """Mark the listener ERROR since the delete couldn't happen + + :param listener: The listener that couldn't be updated + :returns: None + """ + + LOG.warning("Reverting mark listener active in DB " + "for listener id %s", listener.id) + self.task_utils.mark_listener_prov_status_error(listener.id) + + +class MarkListenerDeletedInDB(BaseDatabaseTask): + """Mark the listener deleted in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, listener): + """Mark the listener as deleted in DB + + :param listener: The listener to be marked deleted + :returns: None + """ + + LOG.debug("Mark DELETED in DB for listener id: %s ", listener.id) + self.listener_repo.update(db_apis.get_session(), listener.id, + provisioning_status=constants.DELETED) + + def revert(self, listener, *args, **kwargs): + """Mark the listener ERROR since the delete couldn't happen + + :param listener: The listener that couldn't be updated + :returns: None + """ + + LOG.warning("Reverting mark listener deleted in DB " + "for listener id %s", listener.id) + self.task_utils.mark_listener_prov_status_error(listener.id) + + +class MarkListenerPendingDeleteInDB(BaseDatabaseTask): + """Mark the listener pending delete in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, listener): + """Mark the listener as pending delete in DB. + + :param listener: The listener to be updated + :returns: None + """ + + LOG.debug("Mark PENDING DELETE in DB for listener id: %s", + listener.id) + self.listener_repo.update(db_apis.get_session(), listener.id, + provisioning_status=constants.PENDING_DELETE) + + def revert(self, listener, *args, **kwargs): + """Mark the listener as broken and ready to be cleaned up. + + :param listener: The listener that couldn't be updated + :returns: None + """ + + LOG.warning("Reverting mark listener pending delete in DB " + "for listener id %s", listener.id) + self.task_utils.mark_listener_prov_status_error(listener.id) + + +class UpdateLoadbalancerInDB(BaseDatabaseTask): + """Update the loadbalancer in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, loadbalancer, update_dict): + """Update the loadbalancer in the DB + + :param loadbalancer: The load balancer to be updated + :param update_dict: The dictionary of updates to apply + :returns: None + """ + + LOG.debug("Update DB for loadbalancer id: %s ", loadbalancer.id) + if update_dict.get('vip'): + vip_dict = update_dict.pop('vip') + self.vip_repo.update(db_apis.get_session(), + loadbalancer.vip.load_balancer_id, + **vip_dict) + self.loadbalancer_repo.update(db_apis.get_session(), loadbalancer.id, + **update_dict) + + def revert(self, loadbalancer, *args, **kwargs): + """Mark the loadbalancer ERROR since the update couldn't happen + + :param loadbalancer: The load balancer that couldn't be updated + :returns: None + """ + + LOG.warning("Reverting update loadbalancer in DB " + "for loadbalancer id %s", loadbalancer.id) + + self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id) + + +class UpdateHealthMonInDB(BaseDatabaseTask): + """Update the health monitor in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, health_mon, update_dict): + """Update the health monitor in the DB + + :param health_mon: The health monitor to be updated + :param update_dict: The dictionary of updates to apply + :returns: None + """ + + LOG.debug("Update DB for health monitor id: %s ", health_mon.id) + self.health_mon_repo.update(db_apis.get_session(), health_mon.id, + **update_dict) + + def revert(self, health_mon, *args, **kwargs): + """Mark the health monitor ERROR since the update couldn't happen + + :param health_mon: The health monitor that couldn't be updated + :returns: None + """ + + LOG.warning("Reverting update health monitor in DB " + "for health monitor id %s", health_mon.id) + try: + self.health_mon_repo.update(db_apis.get_session(), + health_mon.id, + provisioning_status=constants.ERROR) + except Exception as e: + LOG.error("Failed to update health monitor %(hm)s " + "provisioning_status to ERROR due to: %(except)s", + {'hm': health_mon.id, 'except': e}) + + +class UpdateListenerInDB(BaseDatabaseTask): + """Update the listener in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, listener, update_dict): + """Update the listener in the DB + + :param listener: The listener to be updated + :param update_dict: The dictionary of updates to apply + :returns: None + """ + + LOG.debug("Update DB for listener id: %s ", listener.id) + self.listener_repo.update(db_apis.get_session(), listener.id, + **update_dict) + + def revert(self, listener, *args, **kwargs): + """Mark the listener ERROR since the update couldn't happen + + :param listener: The listener that couldn't be updated + :returns: None + """ + + LOG.warning("Reverting update listener in DB " + "for listener id %s", listener.id) + self.task_utils.mark_listener_prov_status_error(listener.id) + + +class UpdateMemberInDB(BaseDatabaseTask): + """Update the member in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, member, update_dict): + """Update the member in the DB + + :param member: The member to be updated + :param update_dict: The dictionary of updates to apply + :returns: None + """ + + LOG.debug("Update DB for member id: %s ", member.id) + self.member_repo.update(db_apis.get_session(), member.id, + **update_dict) + + def revert(self, member, *args, **kwargs): + """Mark the member ERROR since the update couldn't happen + + :param member: The member that couldn't be updated + :returns: None + """ + + LOG.warning("Reverting update member in DB " + "for member id %s", member.id) + try: + self.member_repo.update(db_apis.get_session(), member.id, + provisioning_status=constants.ERROR) + except Exception as e: + LOG.error("Failed to update member %(member)s provisioning_status " + "to ERROR due to: %(except)s", {'member': member.id, + 'except': e}) + + +class UpdatePoolInDB(BaseDatabaseTask): + """Update the pool in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, pool, update_dict): + """Update the pool in the DB + + :param pool: The pool to be updated + :param update_dict: The dictionary of updates to apply + :returns: None + """ + + LOG.debug("Update DB for pool id: %s ", pool.id) + self.repos.update_pool_and_sp(db_apis.get_session(), pool.id, + update_dict) + + def revert(self, pool, *args, **kwargs): + """Mark the pool ERROR since the update couldn't happen + + :param pool: The pool that couldn't be updated + :returns: None + """ + + LOG.warning("Reverting update pool in DB for pool id %s", pool.id) + try: + self.repos.update_pool_and_sp( + db_apis.get_session(), pool.id, + dict(provisioning_status=constants.ERROR)) + except Exception as e: + LOG.error("Failed to update pool %(pool)s provisioning_status to " + "ERROR due to: %(except)s", {'pool': pool.id, + 'except': e}) + + +class UpdateL7PolicyInDB(BaseDatabaseTask): + """Update the L7 policy in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, l7policy, update_dict): + """Update the L7 policy in the DB + + :param l7policy: The L7 policy to be updated + :param update_dict: The dictionary of updates to apply + :returns: None + """ + + LOG.debug("Update DB for l7policy id: %s ", l7policy.id) + self.l7policy_repo.update(db_apis.get_session(), l7policy.id, + **update_dict) + + def revert(self, l7policy, *args, **kwargs): + """Mark the l7policy ERROR since the update couldn't happen + + :param l7policy: L7 policy that couldn't be updated + :returns: None + """ + + LOG.warning("Reverting update l7policy in DB " + "for l7policy id %s", l7policy.id) + try: + self.l7policy_repo.update(db_apis.get_session(), l7policy.id, + provisioning_status=constants.ERROR) + except Exception as e: + LOG.error("Failed to update l7policy %(l7p)s provisioning_status " + "to ERROR due to: %(except)s", {'l7p': l7policy.id, + 'except': e}) + + +class UpdateL7RuleInDB(BaseDatabaseTask): + """Update the L7 rule in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, l7rule, update_dict): + """Update the L7 rule in the DB + + :param l7rule: The L7 rule to be updated + :param update_dict: The dictionary of updates to apply + :returns: None + """ + + LOG.debug("Update DB for l7rule id: %s ", l7rule.id) + self.l7rule_repo.update(db_apis.get_session(), l7rule.id, + **update_dict) + + def revert(self, l7rule, *args, **kwargs): + """Mark the L7 rule ERROR since the update couldn't happen + + :param l7rule: L7 rule that couldn't be updated + :returns: None + """ + + LOG.warning("Reverting update l7rule in DB " + "for l7rule id %s", l7rule.id) + try: + self.l7policy_repo.update(db_apis.get_session(), + l7rule.l7policy.id, + provisioning_status=constants.ERROR) + except Exception as e: + LOG.error("Failed to update L7rule %(l7r)s provisioning_status to " + "ERROR due to: %(except)s", {'l7r': l7rule.l7policy.id, + 'except': e}) + + +class GetAmphoraDetails(BaseDatabaseTask): + """Task to retrieve amphora network details.""" + + def execute(self, amphora): + """Retrieve amphora network details. + + :param amphora: Amphora which network details are required + :returns: data_models.Amphora object + """ + return data_models.Amphora(id=amphora.id, + vrrp_ip=amphora.vrrp_ip, + ha_ip=amphora.ha_ip, + vrrp_port_id=amphora.vrrp_port_id, + ha_port_id=amphora.ha_port_id, + role=amphora.role, + vrrp_id=amphora.vrrp_id, + vrrp_priority=amphora.vrrp_priority) + + +class GetAmphoraeFromLoadbalancer(BaseDatabaseTask): + """Task to pull the listeners from a loadbalancer.""" + + def execute(self, loadbalancer): + """Pull the amphorae from a loadbalancer. + + :param loadbalancer: Load balancer which listeners are required + :returns: A list of Listener objects + """ + amphorae = [] + for amp in loadbalancer.amphorae: + a = self.amphora_repo.get(db_apis.get_session(), id=amp.id, + show_deleted=False) + if a is None: + continue + amphorae.append(a) + return amphorae + + +class GetListenersFromLoadbalancer(BaseDatabaseTask): + """Task to pull the listeners from a loadbalancer.""" + + def execute(self, loadbalancer): + """Pull the listeners from a loadbalancer. + + :param loadbalancer: Load balancer which listeners are required + :returns: A list of Listener objects + """ + listeners = [] + for listener in loadbalancer.listeners: + l = self.listener_repo.get(db_apis.get_session(), id=listener.id) + l.load_balancer = loadbalancer + listeners.append(l) + return listeners + + +class GetVipFromLoadbalancer(BaseDatabaseTask): + """Task to pull the vip from a loadbalancer.""" + + def execute(self, loadbalancer): + """Pull the vip from a loadbalancer. + + :param loadbalancer: Load balancer which VIP is required + :returns: VIP associated with a given load balancer + """ + return loadbalancer.vip + + +class CreateVRRPGroupForLB(BaseDatabaseTask): + """Create a VRRP group for a load balancer.""" + + def execute(self, loadbalancer): + """Create a VRRP group for a load balancer. + + :param loadbalancer: Load balancer for which a VRRP group + should be created + :returns: Updated load balancer + """ + try: + loadbalancer.vrrp_group = self.repos.vrrpgroup.create( + db_apis.get_session(), + load_balancer_id=loadbalancer.id, + vrrp_group_name=str(loadbalancer.id).replace('-', ''), + vrrp_auth_type=constants.VRRP_AUTH_DEFAULT, + vrrp_auth_pass=uuidutils.generate_uuid().replace('-', '')[0:7], + advert_int=CONF.keepalived_vrrp.vrrp_advert_int) + except odb_exceptions.DBDuplicateEntry: + LOG.debug('VRRP_GROUP entry already exists for load balancer, ' + 'skipping create.') + return loadbalancer + + +class DisableAmphoraHealthMonitoring(BaseDatabaseTask): + """Disable amphora health monitoring. + + This disables amphora health monitoring by removing it from + the amphora_health table. + """ + + def execute(self, amphora): + """Disable health monitoring for an amphora + + :param amphora: The amphora to disable health monitoring for + :returns: None + """ + self._delete_from_amp_health(amphora.id) + + +class DisableLBAmphoraeHealthMonitoring(BaseDatabaseTask): + """Disable health monitoring on the LB amphorae. + + This disables amphora health monitoring by removing it from + the amphora_health table for each amphora on a load balancer. + """ + + def execute(self, loadbalancer): + """Disable health monitoring for amphora on a load balancer + + :param loadbalancer: The load balancer to disable health monitoring on + :returns: None + """ + for amphora in loadbalancer.amphorae: + self._delete_from_amp_health(amphora.id) + + +class MarkAmphoraHealthBusy(BaseDatabaseTask): + """Mark amphora health monitoring busy. + + This prevents amphora failover by marking the amphora busy in + the amphora_health table. + """ + + def execute(self, amphora): + """Mark amphora health monitoring busy + + :param amphora: The amphora to mark amphora health busy + :returns: None + """ + self._mark_amp_health_busy(amphora.id) + + +class MarkLBAmphoraeHealthBusy(BaseDatabaseTask): + """Mark amphorae health monitoring busy for the LB. + + This prevents amphorae failover by marking each amphora of a given + load balancer busy in the amphora_health table. + """ + + def execute(self, loadbalancer): + """Marks amphorae health busy for each amphora on a load balancer + + :param loadbalancer: The load balancer to mark amphorae health busy + :returns: None + """ + for amphora in loadbalancer.amphorae: + self._mark_amp_health_busy(amphora.id) + + +class MarkHealthMonitorActiveInDB(BaseDatabaseTask): + """Mark the health monitor ACTIVE in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, health_mon): + """Mark the health monitor ACTIVE in DB. + + :param health_mon: Health Monitor object to be updated + :returns: None + """ + + LOG.debug("Mark ACTIVE in DB for health monitor id: %s", + health_mon.id) + + op_status = (constants.ONLINE if health_mon.enabled + else constants.OFFLINE) + self.health_mon_repo.update(db_apis.get_session(), + health_mon.id, + provisioning_status=constants.ACTIVE, + operating_status=op_status) + + def revert(self, health_mon, *args, **kwargs): + """Mark the health monitor as broken + + :param health_mon: Health Monitor object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark health montor ACTIVE in DB " + "for health monitor id %s", health_mon.id) + self.task_utils.mark_health_mon_prov_status_error(health_mon.id) + + +class MarkHealthMonitorPendingCreateInDB(BaseDatabaseTask): + """Mark the health monitor pending create in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, health_mon): + """Mark the health monitor as pending create in DB. + + :param health_mon: Health Monitor object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING CREATE in DB for health monitor id: %s", + health_mon.id) + self.health_mon_repo.update(db_apis.get_session(), + health_mon.id, + provisioning_status=(constants. + PENDING_CREATE)) + + def revert(self, health_mon, *args, **kwargs): + """Mark the health monitor as broken + + :param health_mon: Health Monitor object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark health monitor pending create in DB " + "for health monitor id %s", health_mon.id) + self.task_utils.mark_health_mon_prov_status_error(health_mon.id) + + +class MarkHealthMonitorPendingDeleteInDB(BaseDatabaseTask): + """Mark the health monitor pending delete in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, health_mon): + """Mark the health monitor as pending delete in DB. + + :param health_mon: Health Monitor object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING DELETE in DB for health monitor id: %s", + health_mon.id) + self.health_mon_repo.update(db_apis.get_session(), + health_mon.id, + provisioning_status=(constants. + PENDING_DELETE)) + + def revert(self, health_mon, *args, **kwargs): + """Mark the health monitor as broken + + :param health_mon: Health Monitor object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark health monitor pending delete in DB " + "for health monitor id %s", health_mon.id) + self.task_utils.mark_health_mon_prov_status_error(health_mon.id) + + +class MarkHealthMonitorPendingUpdateInDB(BaseDatabaseTask): + """Mark the health monitor pending update in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, health_mon): + """Mark the health monitor as pending update in DB. + + :param health_mon: Health Monitor object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING UPDATE in DB for health monitor id: %s", + health_mon.id) + self.health_mon_repo.update(db_apis.get_session(), + health_mon.id, + provisioning_status=(constants. + PENDING_UPDATE)) + + def revert(self, health_mon, *args, **kwargs): + """Mark the health monitor as broken + + :param health_mon: Health Monitor object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark health monitor pending update in DB " + "for health monitor id %s", health_mon.id) + self.task_utils.mark_health_mon_prov_status_error(health_mon.id) + + +class MarkL7PolicyActiveInDB(BaseDatabaseTask): + """Mark the l7policy ACTIVE in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, l7policy): + """Mark the l7policy ACTIVE in DB. + + :param l7policy: L7Policy object to be updated + :returns: None + """ + + LOG.debug("Mark ACTIVE in DB for l7policy id: %s", + l7policy.id) + + op_status = constants.ONLINE if l7policy.enabled else constants.OFFLINE + self.l7policy_repo.update(db_apis.get_session(), + l7policy.id, + provisioning_status=constants.ACTIVE, + operating_status=op_status) + + def revert(self, l7policy, *args, **kwargs): + """Mark the l7policy as broken + + :param l7policy: L7Policy object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark l7policy ACTIVE in DB " + "for l7policy id %s", l7policy.id) + self.task_utils.mark_l7policy_prov_status_error(l7policy.id) + + +class MarkL7PolicyPendingCreateInDB(BaseDatabaseTask): + """Mark the l7policy pending create in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, l7policy): + """Mark the l7policy as pending create in DB. + + :param l7policy: L7Policy object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING CREATE in DB for l7policy id: %s", + l7policy.id) + self.l7policy_repo.update(db_apis.get_session(), + l7policy.id, + provisioning_status=constants.PENDING_CREATE) + + def revert(self, l7policy, *args, **kwargs): + """Mark the l7policy as broken + + :param l7policy: L7Policy object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark l7policy pending create in DB " + "for l7policy id %s", l7policy.id) + self.task_utils.mark_l7policy_prov_status_error(l7policy.id) + + +class MarkL7PolicyPendingDeleteInDB(BaseDatabaseTask): + """Mark the l7policy pending delete in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, l7policy): + """Mark the l7policy as pending delete in DB. + + :param l7policy: L7Policy object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING DELETE in DB for l7policy id: %s", + l7policy.id) + self.l7policy_repo.update(db_apis.get_session(), + l7policy.id, + provisioning_status=constants.PENDING_DELETE) + + def revert(self, l7policy, *args, **kwargs): + """Mark the l7policy as broken + + :param l7policy: L7Policy object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark l7policy pending delete in DB " + "for l7policy id %s", l7policy.id) + self.task_utils.mark_l7policy_prov_status_error(l7policy.id) + + +class MarkL7PolicyPendingUpdateInDB(BaseDatabaseTask): + """Mark the l7policy pending update in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, l7policy): + """Mark the l7policy as pending update in DB. + + :param l7policy: L7Policy object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING UPDATE in DB for l7policy id: %s", + l7policy.id) + self.l7policy_repo.update(db_apis.get_session(), + l7policy.id, + provisioning_status=(constants. + PENDING_UPDATE)) + + def revert(self, l7policy, *args, **kwargs): + """Mark the l7policy as broken + + :param l7policy: L7Policy object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark l7policy pending update in DB " + "for l7policy id %s", l7policy.id) + self.task_utils.mark_l7policy_prov_status_error(l7policy.id) + + +class MarkL7RuleActiveInDB(BaseDatabaseTask): + """Mark the l7rule ACTIVE in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, l7rule): + """Mark the l7rule ACTIVE in DB. + + :param l7rule: L7Rule object to be updated + :returns: None + """ + + LOG.debug("Mark ACTIVE in DB for l7rule id: %s", + l7rule.id) + op_status = constants.ONLINE if l7rule.enabled else constants.OFFLINE + self.l7rule_repo.update(db_apis.get_session(), + l7rule.id, + provisioning_status=constants.ACTIVE, + operating_status=op_status) + + def revert(self, l7rule, *args, **kwargs): + """Mark the l7rule as broken + + :param l7rule: L7Rule object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark l7rule ACTIVE in DB " + "for l7rule id %s", l7rule.id) + self.task_utils.mark_l7rule_prov_status_error(l7rule.id) + + +class MarkL7RulePendingCreateInDB(BaseDatabaseTask): + """Mark the l7rule pending create in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, l7rule): + """Mark the l7rule as pending create in DB. + + :param l7rule: L7Rule object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING CREATE in DB for l7rule id: %s", + l7rule.id) + self.l7rule_repo.update(db_apis.get_session(), + l7rule.id, + provisioning_status=constants.PENDING_CREATE) + + def revert(self, l7rule, *args, **kwargs): + """Mark the l7rule as broken + + :param l7rule: L7Rule object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark l7rule pending create in DB " + "for l7rule id %s", l7rule.id) + self.task_utils.mark_l7rule_prov_status_error(l7rule.id) + + +class MarkL7RulePendingDeleteInDB(BaseDatabaseTask): + """Mark the l7rule pending delete in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, l7rule): + """Mark the l7rule as pending delete in DB. + + :param l7rule: L7Rule object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING DELETE in DB for l7rule id: %s", + l7rule.id) + self.l7rule_repo.update(db_apis.get_session(), + l7rule.id, + provisioning_status=constants.PENDING_DELETE) + + def revert(self, l7rule, *args, **kwargs): + """Mark the l7rule as broken + + :param l7rule: L7Rule object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark l7rule pending delete in DB " + "for l7rule id %s", l7rule.id) + self.task_utils.mark_l7rule_prov_status_error(l7rule.id) + + +class MarkL7RulePendingUpdateInDB(BaseDatabaseTask): + """Mark the l7rule pending update in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, l7rule): + """Mark the l7rule as pending update in DB. + + :param l7rule: L7Rule object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING UPDATE in DB for l7rule id: %s", + l7rule.id) + self.l7rule_repo.update(db_apis.get_session(), + l7rule.id, + provisioning_status=constants.PENDING_UPDATE) + + def revert(self, l7rule, *args, **kwargs): + """Mark the l7rule as broken + + :param l7rule: L7Rule object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark l7rule pending update in DB " + "for l7rule id %s", l7rule.id) + self.task_utils.mark_l7rule_prov_status_error(l7rule.id) + + +class MarkMemberActiveInDB(BaseDatabaseTask): + """Mark the member ACTIVE in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, member): + """Mark the member ACTIVE in DB. + + :param member: Member object to be updated + :returns: None + """ + + LOG.debug("Mark ACTIVE in DB for member id: %s", member.id) + self.member_repo.update(db_apis.get_session(), + member.id, + provisioning_status=constants.ACTIVE) + + def revert(self, member, *args, **kwargs): + """Mark the member as broken + + :param member: Member object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark member ACTIVE in DB " + "for member id %s", member.id) + self.task_utils.mark_member_prov_status_error(member.id) + + +class MarkMemberPendingCreateInDB(BaseDatabaseTask): + """Mark the member pending create in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, member): + """Mark the member as pending create in DB. + + :param member: Member object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING CREATE in DB for member id: %s", member.id) + self.member_repo.update(db_apis.get_session(), + member.id, + provisioning_status=constants.PENDING_CREATE) + + def revert(self, member, *args, **kwargs): + """Mark the member as broken + + :param member: Member object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark member pending create in DB " + "for member id %s", member.id) + self.task_utils.mark_member_prov_status_error(member.id) + + +class MarkMemberPendingDeleteInDB(BaseDatabaseTask): + """Mark the member pending delete in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, member): + """Mark the member as pending delete in DB. + + :param member: Member object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING DELETE in DB for member id: %s", member.id) + self.member_repo.update(db_apis.get_session(), + member.id, + provisioning_status=constants.PENDING_DELETE) + + def revert(self, member, *args, **kwargs): + """Mark the member as broken + + :param member: Member object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark member pending delete in DB " + "for member id %s", member.id) + self.task_utils.mark_member_prov_status_error(member.id) + + +class MarkMemberPendingUpdateInDB(BaseDatabaseTask): + """Mark the member pending update in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, member): + """Mark the member as pending update in DB. + + :param member: Member object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING UPDATE in DB for member id: %s", + member.id) + self.member_repo.update(db_apis.get_session(), + member.id, + provisioning_status=constants.PENDING_UPDATE) + + def revert(self, member, *args, **kwargs): + """Mark the member as broken + + :param member: Member object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark member pending update in DB " + "for member id %s", member.id) + self.task_utils.mark_member_prov_status_error(member.id) + + +class MarkPoolActiveInDB(BaseDatabaseTask): + """Mark the pool ACTIVE in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, pool): + """Mark the pool ACTIVE in DB. + + :param pool: Pool object to be updated + :returns: None + """ + + LOG.debug("Mark ACTIVE in DB for pool id: %s", + pool.id) + self.pool_repo.update(db_apis.get_session(), + pool.id, + provisioning_status=constants.ACTIVE) + + def revert(self, pool, *args, **kwargs): + """Mark the pool as broken + + :param pool: Pool object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark pool ACTIVE in DB for pool id %s", pool.id) + self.task_utils.mark_pool_prov_status_error(pool.id) + + +class MarkPoolPendingCreateInDB(BaseDatabaseTask): + """Mark the pool pending create in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, pool): + """Mark the pool as pending create in DB. + + :param pool: Pool object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING CREATE in DB for pool id: %s", + pool.id) + self.pool_repo.update(db_apis.get_session(), + pool.id, + provisioning_status=constants.PENDING_CREATE) + + def revert(self, pool, *args, **kwargs): + """Mark the pool as broken + + :param pool: Pool object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark pool pending create in DB " + "for pool id %s", pool.id) + self.task_utils.mark_pool_prov_status_error(pool.id) + + +class MarkPoolPendingDeleteInDB(BaseDatabaseTask): + """Mark the pool pending delete in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, pool): + """Mark the pool as pending delete in DB. + + :param pool: Pool object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING DELETE in DB for pool id: %s", + pool.id) + self.pool_repo.update(db_apis.get_session(), + pool.id, + provisioning_status=constants.PENDING_DELETE) + + def revert(self, pool, *args, **kwargs): + """Mark the pool as broken + + :param pool: Pool object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark pool pending delete in DB " + "for pool id %s", pool.id) + self.task_utils.mark_pool_prov_status_error(pool.id) + + +class MarkPoolPendingUpdateInDB(BaseDatabaseTask): + """Mark the pool pending update in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, pool): + """Mark the pool as pending update in DB. + + :param pool: Pool object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING UPDATE in DB for pool id: %s", + pool.id) + self.pool_repo.update(db_apis.get_session(), + pool.id, + provisioning_status=constants.PENDING_UPDATE) + + def revert(self, pool, *args, **kwargs): + """Mark the pool as broken + + :param pool: Pool object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark pool pending update in DB " + "for pool id %s", pool.id) + self.task_utils.mark_pool_prov_status_error(pool.id) + + +class DecrementHealthMonitorQuota(BaseDatabaseTask): + """Decrements the health monitor quota for a project. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, health_mon): + """Decrements the health monitor quota. + + :param health_mon: The health monitor to decrement the quota on. + :returns: None + """ + + LOG.debug("Decrementing health monitor quota for " + "project: %s ", health_mon.project_id) + + lock_session = db_apis.get_session(autocommit=False) + try: + self.repos.decrement_quota(lock_session, + data_models.HealthMonitor, + health_mon.project_id) + lock_session.commit() + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error('Failed to decrement health monitor quota for ' + 'project: %(proj)s the project may have excess ' + 'quota in use.', {'proj': health_mon.project_id}) + lock_session.rollback() + + def revert(self, health_mon, result, *args, **kwargs): + """Re-apply the quota + + :param health_mon: The health monitor to decrement the quota on. + :returns: None + """ + + LOG.warning('Reverting decrement quota for health monitor on project' + ' %(proj)s Project quota counts may be incorrect.', + {'proj': health_mon.project_id}) + + # Increment the quota back if this task wasn't the failure + if not isinstance(result, failure.Failure): + + try: + session = db_apis.get_session() + lock_session = db_apis.get_session(autocommit=False) + try: + self.repos.check_quota_met(session, + lock_session, + data_models.HealthMonitor, + health_mon.project_id) + lock_session.commit() + except Exception: + lock_session.rollback() + except Exception: + # Don't fail the revert flow + pass + + +class DecrementListenerQuota(BaseDatabaseTask): + """Decrements the listener quota for a project. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, listener): + """Decrements the listener quota. + + :param listener: The listener to decrement the quota on. + :returns: None + """ + + LOG.debug("Decrementing listener quota for " + "project: %s ", listener.project_id) + + lock_session = db_apis.get_session(autocommit=False) + try: + self.repos.decrement_quota(lock_session, + data_models.Listener, + listener.project_id) + lock_session.commit() + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error('Failed to decrement listener quota for project: ' + '%(proj)s the project may have excess quota in use.', + {'proj': listener.project_id}) + lock_session.rollback() + + def revert(self, listener, result, *args, **kwargs): + """Re-apply the quota + + :param listener: The listener to decrement the quota on. + :returns: None + """ + + LOG.warning('Reverting decrement quota for listener on project ' + '%(proj)s Project quota counts may be incorrect.', + {'proj': listener.project_id}) + + # Increment the quota back if this task wasn't the failure + if not isinstance(result, failure.Failure): + + try: + session = db_apis.get_session() + lock_session = db_apis.get_session(autocommit=False) + try: + self.repos.check_quota_met(session, + lock_session, + data_models.Listener, + listener.project_id) + lock_session.commit() + except Exception: + lock_session.rollback() + except Exception: + # Don't fail the revert flow + pass + + +class DecrementLoadBalancerQuota(BaseDatabaseTask): + """Decrements the load balancer quota for a project. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, loadbalancer): + """Decrements the load balancer quota. + + :param loadbalancer: The load balancer to decrement the quota on. + :returns: None + """ + + LOG.debug("Decrementing load balancer quota for " + "project: %s ", loadbalancer.project_id) + + lock_session = db_apis.get_session(autocommit=False) + try: + self.repos.decrement_quota(lock_session, + data_models.LoadBalancer, + loadbalancer.project_id) + lock_session.commit() + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error('Failed to decrement load balancer quota for ' + 'project: %(proj)s the project may have excess ' + 'quota in use.', {'proj': loadbalancer.project_id}) + lock_session.rollback() + + def revert(self, loadbalancer, result, *args, **kwargs): + """Re-apply the quota + + :param loadbalancer: The load balancer to decrement the quota on. + :returns: None + """ + + LOG.warning('Reverting decrement quota for load balancer on project ' + '%(proj)s Project quota counts may be incorrect.', + {'proj': loadbalancer.project_id}) + + # Increment the quota back if this task wasn't the failure + if not isinstance(result, failure.Failure): + + try: + session = db_apis.get_session() + lock_session = db_apis.get_session(autocommit=False) + try: + self.repos.check_quota_met(session, + lock_session, + data_models.LoadBalancer, + loadbalancer.project_id) + lock_session.commit() + except Exception: + lock_session.rollback() + except Exception: + # Don't fail the revert flow + pass + + +class DecrementMemberQuota(BaseDatabaseTask): + """Decrements the member quota for a project. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, member): + """Decrements the member quota. + + :param member: The member to decrement the quota on. + :returns: None + """ + + LOG.debug("Decrementing member quota for " + "project: %s ", member.project_id) + + lock_session = db_apis.get_session(autocommit=False) + try: + self.repos.decrement_quota(lock_session, + data_models.Member, + member.project_id) + lock_session.commit() + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error('Failed to decrement member quota for project: ' + '%(proj)s the project may have excess quota in use.', + {'proj': member.project_id}) + lock_session.rollback() + + def revert(self, member, result, *args, **kwargs): + """Re-apply the quota + + :param member: The member to decrement the quota on. + :returns: None + """ + + LOG.warning('Reverting decrement quota for member on project %(proj)s ' + 'Project quota counts may be incorrect.', + {'proj': member.project_id}) + + # Increment the quota back if this task wasn't the failure + if not isinstance(result, failure.Failure): + + try: + session = db_apis.get_session() + lock_session = db_apis.get_session(autocommit=False) + try: + self.repos.check_quota_met(session, + lock_session, + data_models.Member, + member.project_id) + lock_session.commit() + except Exception: + lock_session.rollback() + except Exception: + # Don't fail the revert flow + pass + + +class DecrementPoolQuota(BaseDatabaseTask): + """Decrements the pool quota for a project. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, pool, pool_child_count): + """Decrements the pool quota. + + :param pool: The pool to decrement the quota on + :returns: None + """ + + LOG.debug("Decrementing pool quota for " + "project: %s ", pool.project_id) + + lock_session = db_apis.get_session(autocommit=False) + try: + self.repos.decrement_quota(lock_session, + data_models.Pool, + pool.project_id) + + # Pools cascade delete members and health monitors + # update the quota for those items as well. + if pool_child_count['HM'] > 0: + self.repos.decrement_quota(lock_session, + data_models.HealthMonitor, + pool.project_id) + if pool_child_count['member'] > 0: + self.repos.decrement_quota( + lock_session, data_models.Member, + pool.project_id, quantity=pool_child_count['member']) + + lock_session.commit() + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error('Failed to decrement pool quota for project: ' + '%(proj)s the project may have excess quota in use.', + {'proj': pool.project_id}) + lock_session.rollback() + + def revert(self, pool, pool_child_count, result, *args, **kwargs): + """Re-apply the quota + + :param project_id: The id of project to decrement the quota on + :returns: None + """ + + LOG.warning('Reverting decrement quota for pool on project %(proj)s ' + 'Project quota counts may be incorrect.', + {'proj': pool.project_id}) + + # Increment the quota back if this task wasn't the failure + if not isinstance(result, failure.Failure): + + # These are all independent to maximize the correction + # in case other quota actions have occurred + try: + session = db_apis.get_session() + lock_session = db_apis.get_session(autocommit=False) + try: + self.repos.check_quota_met(session, + lock_session, + data_models.Pool, + pool.project_id) + lock_session.commit() + except Exception: + lock_session.rollback() + + # Attempt to increment back the health monitor quota + if pool_child_count['HM'] > 0: + lock_session = db_apis.get_session(autocommit=False) + try: + self.repos.check_quota_met(session, + lock_session, + data_models.HealthMonitor, + pool.project_id) + lock_session.commit() + except Exception: + lock_session.rollback() + + # Attempt to increment back the member quota + # This is separate calls to maximize the correction + # should other factors have increased the in use quota + # before this point in the revert flow + for i in six.moves.range(pool_child_count['member']): + lock_session = db_apis.get_session(autocommit=False) + try: + self.repos.check_quota_met(session, + lock_session, + data_models.Member, + pool.project_id) + lock_session.commit() + except Exception: + lock_session.rollback() + except Exception: + # Don't fail the revert flow + pass + + +class CountPoolChildrenForQuota(BaseDatabaseTask): + """Counts the pool child resources for quota management. + + Since the children of pools are cleaned up by the sqlalchemy + cascade delete settings, we need to collect the quota counts + for the child objects early. + + """ + + def execute(self, pool): + """Count the pool child resources for quota management + + :param pool: The pool to count children on + :returns: None + """ + + LOG.debug("Counting pool children for " + "project: %s ", pool.project_id) + + health_mon_count = 1 if pool.health_monitor else 0 + member_count = len(pool.members) + + return {'HM': health_mon_count, 'member': member_count} + + +class UpdatePoolMembersOperatingStatusInDB(BaseDatabaseTask): + """Updates the members of a pool operating status. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, pool, operating_status): + """Update the members of a pool operating status in DB. + + :param pool: Pool object to be updated + :param operating_status: Operating status to set + :returns: None + """ + + LOG.debug("Updating member operating status to %(status)s in DB for " + "pool id: %(pool)s", {'status': operating_status, + 'pool': pool.id}) + self.member_repo.update_pool_members(db_apis.get_session(), + pool.id, + operating_status=operating_status) diff --git a/octavia/controller/worker/v2/tasks/lifecycle_tasks.py b/octavia/controller/worker/v2/tasks/lifecycle_tasks.py new file mode 100644 index 0000000000..291b5677c5 --- /dev/null +++ b/octavia/controller/worker/v2/tasks/lifecycle_tasks.py @@ -0,0 +1,173 @@ +# Copyright 2016 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from taskflow import task + +from octavia.controller.worker import task_utils as task_utilities + + +class BaseLifecycleTask(task.Task): + """Base task to instansiate common classes.""" + + def __init__(self, **kwargs): + self.task_utils = task_utilities.TaskUtils() + super(BaseLifecycleTask, self).__init__(**kwargs) + + +class AmphoraIDToErrorOnRevertTask(BaseLifecycleTask): + """Task to checkpoint Amphora lifecycle milestones.""" + + def execute(self, amphora_id): + pass + + def revert(self, amphora_id, *args, **kwargs): + self.task_utils.mark_amphora_status_error(amphora_id) + + +class AmphoraToErrorOnRevertTask(AmphoraIDToErrorOnRevertTask): + """Task to checkpoint Amphora lifecycle milestones.""" + + def execute(self, amphora): + pass + + def revert(self, amphora, *args, **kwargs): + super(AmphoraToErrorOnRevertTask, self).revert(amphora.id) + + +class HealthMonitorToErrorOnRevertTask(BaseLifecycleTask): + """Task to set a member to ERROR on revert.""" + + def execute(self, health_mon, listeners, loadbalancer): + pass + + def revert(self, health_mon, listeners, loadbalancer, *args, **kwargs): + self.task_utils.mark_health_mon_prov_status_error(health_mon.pool_id) + self.task_utils.mark_pool_prov_status_active(health_mon.pool_id) + self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id) + for listener in listeners: + self.task_utils.mark_listener_prov_status_active(listener.id) + + +class L7PolicyToErrorOnRevertTask(BaseLifecycleTask): + """Task to set a l7policy to ERROR on revert.""" + + def execute(self, l7policy, listeners, loadbalancer): + pass + + def revert(self, l7policy, listeners, loadbalancer, *args, **kwargs): + self.task_utils.mark_l7policy_prov_status_error(l7policy.id) + self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id) + for listener in listeners: + self.task_utils.mark_listener_prov_status_active(listener.id) + + +class L7RuleToErrorOnRevertTask(BaseLifecycleTask): + """Task to set a l7rule to ERROR on revert.""" + + def execute(self, l7rule, listeners, loadbalancer): + pass + + def revert(self, l7rule, listeners, loadbalancer, *args, **kwargs): + self.task_utils.mark_l7rule_prov_status_error(l7rule.id) + self.task_utils.mark_l7policy_prov_status_active(l7rule.l7policy_id) + self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id) + for listener in listeners: + self.task_utils.mark_listener_prov_status_active(listener.id) + + +class ListenerToErrorOnRevertTask(BaseLifecycleTask): + """Task to set a listener to ERROR on revert.""" + + def execute(self, listener): + pass + + def revert(self, listener, *args, **kwargs): + self.task_utils.mark_listener_prov_status_error(listener.id) + self.task_utils.mark_loadbalancer_prov_status_active( + listener.load_balancer.id) + + +class ListenersToErrorOnRevertTask(BaseLifecycleTask): + """Task to set listeners to ERROR on revert.""" + + def execute(self, listeners, loadbalancer): + pass + + def revert(self, listeners, loadbalancer, *args, **kwargs): + self.task_utils.mark_loadbalancer_prov_status_active( + loadbalancer.id) + for listener in listeners: + self.task_utils.mark_listener_prov_status_error(listener.id) + + +class LoadBalancerIDToErrorOnRevertTask(BaseLifecycleTask): + """Task to set the load balancer to ERROR on revert.""" + + def execute(self, loadbalancer_id): + pass + + def revert(self, loadbalancer_id, *args, **kwargs): + self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer_id) + + +class LoadBalancerToErrorOnRevertTask(LoadBalancerIDToErrorOnRevertTask): + """Task to set the load balancer to ERROR on revert.""" + + def execute(self, loadbalancer): + pass + + def revert(self, loadbalancer, *args, **kwargs): + super(LoadBalancerToErrorOnRevertTask, self).revert(loadbalancer.id) + + +class MemberToErrorOnRevertTask(BaseLifecycleTask): + """Task to set a member to ERROR on revert.""" + + def execute(self, member, listeners, loadbalancer, pool): + pass + + def revert(self, member, listeners, loadbalancer, pool, *args, **kwargs): + self.task_utils.mark_member_prov_status_error(member.id) + for listener in listeners: + self.task_utils.mark_listener_prov_status_active(listener.id) + self.task_utils.mark_pool_prov_status_active(pool.id) + self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id) + + +class MembersToErrorOnRevertTask(BaseLifecycleTask): + """Task to set members to ERROR on revert.""" + + def execute(self, members, listeners, loadbalancer, pool): + pass + + def revert(self, members, listeners, loadbalancer, pool, *args, **kwargs): + for m in members: + self.task_utils.mark_member_prov_status_error(m.id) + for listener in listeners: + self.task_utils.mark_listener_prov_status_active(listener.id) + self.task_utils.mark_pool_prov_status_active(pool.id) + self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id) + + +class PoolToErrorOnRevertTask(BaseLifecycleTask): + """Task to set a pool to ERROR on revert.""" + + def execute(self, pool, listeners, loadbalancer): + pass + + def revert(self, pool, listeners, loadbalancer, *args, **kwargs): + self.task_utils.mark_pool_prov_status_error(pool.id) + self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id) + for listener in listeners: + self.task_utils.mark_listener_prov_status_active(listener.id) diff --git a/octavia/controller/worker/v2/tasks/model_tasks.py b/octavia/controller/worker/v2/tasks/model_tasks.py new file mode 100644 index 0000000000..72557cafcb --- /dev/null +++ b/octavia/controller/worker/v2/tasks/model_tasks.py @@ -0,0 +1,41 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from taskflow import task + + +class DeleteModelObject(task.Task): + """Task to delete an object in a model.""" + + def execute(self, object): + + object.delete() + + +class UpdateAttributes(task.Task): + """Task to update an object for changes.""" + + def execute(self, object, update_dict): + """Update an object and its associated resources. + + Note: This relies on the data_model update() methods to handle complex + objects with nested objects (LoadBalancer.vip, + Pool.session_persistence, etc.) + + :param object: The object will be updated. + :param update_dict: The updates dictionary. + :returns: None + """ + object.update(update_dict) diff --git a/octavia/controller/worker/v2/tasks/network_tasks.py b/octavia/controller/worker/v2/tasks/network_tasks.py new file mode 100644 index 0000000000..0f1535f88d --- /dev/null +++ b/octavia/controller/worker/v2/tasks/network_tasks.py @@ -0,0 +1,659 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo_config import cfg +from oslo_log import log as logging +import six +from taskflow import task +from taskflow.types import failure + +from octavia.common import constants +from octavia.common import utils +from octavia.controller.worker import task_utils +from octavia.network import base +from octavia.network import data_models as n_data_models + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF + + +class BaseNetworkTask(task.Task): + """Base task to load drivers common to the tasks.""" + + def __init__(self, **kwargs): + super(BaseNetworkTask, self).__init__(**kwargs) + self._network_driver = None + self.task_utils = task_utils.TaskUtils() + + @property + def network_driver(self): + if self._network_driver is None: + self._network_driver = utils.get_network_driver() + return self._network_driver + + +class CalculateAmphoraDelta(BaseNetworkTask): + + default_provides = constants.DELTA + + def execute(self, loadbalancer, amphora): + LOG.debug("Calculating network delta for amphora id: %s", amphora.id) + + # Figure out what networks we want + # seed with lb network(s) + vrrp_port = self.network_driver.get_port(amphora.vrrp_port_id) + desired_network_ids = {vrrp_port.network_id}.union( + CONF.controller_worker.amp_boot_network_list) + + for pool in loadbalancer.pools: + member_networks = [ + self.network_driver.get_subnet(member.subnet_id).network_id + for member in pool.members + if member.subnet_id + ] + desired_network_ids.update(member_networks) + + nics = self.network_driver.get_plugged_networks(amphora.compute_id) + # assume we don't have two nics in the same network + actual_network_nics = dict((nic.network_id, nic) for nic in nics) + + del_ids = set(actual_network_nics) - desired_network_ids + delete_nics = list( + actual_network_nics[net_id] for net_id in del_ids) + + add_ids = desired_network_ids - set(actual_network_nics) + add_nics = list(n_data_models.Interface( + network_id=net_id) for net_id in add_ids) + delta = n_data_models.Delta( + amphora_id=amphora.id, compute_id=amphora.compute_id, + add_nics=add_nics, delete_nics=delete_nics) + return delta + + +class CalculateDelta(BaseNetworkTask): + """Task to calculate the delta between + + the nics on the amphora and the ones + we need. Returns a list for + plumbing them. + """ + + default_provides = constants.DELTAS + + def execute(self, loadbalancer): + """Compute which NICs need to be plugged + + for the amphora to become operational. + + :param loadbalancer: the loadbalancer to calculate deltas for all + amphorae + :returns: dict of octavia.network.data_models.Delta keyed off amphora + id + """ + + calculate_amp = CalculateAmphoraDelta() + deltas = {} + for amphora in six.moves.filter( + lambda amp: amp.status == constants.AMPHORA_ALLOCATED, + loadbalancer.amphorae): + + delta = calculate_amp.execute(loadbalancer, amphora) + deltas[amphora.id] = delta + return deltas + + +class GetPlumbedNetworks(BaseNetworkTask): + """Task to figure out the NICS on an amphora. + + This will likely move into the amphora driver + :returns: Array of networks + """ + + default_provides = constants.NICS + + def execute(self, amphora): + """Get plumbed networks for the amphora.""" + + LOG.debug("Getting plumbed networks for amphora id: %s", amphora.id) + + return self.network_driver.get_plugged_networks(amphora.compute_id) + + +class PlugNetworks(BaseNetworkTask): + """Task to plug the networks. + + This uses the delta to add all missing networks/nics + """ + + def execute(self, amphora, delta): + """Update the amphora networks for the delta.""" + + LOG.debug("Plug or unplug networks for amphora id: %s", amphora.id) + + if not delta: + LOG.debug("No network deltas for amphora id: %s", amphora.id) + return + + # add nics + for nic in delta.add_nics: + self.network_driver.plug_network(amphora.compute_id, + nic.network_id) + + def revert(self, amphora, delta, *args, **kwargs): + """Handle a failed network plug by removing all nics added.""" + + LOG.warning("Unable to plug networks for amp id %s", amphora.id) + if not delta: + return + + for nic in delta.add_nics: + try: + self.network_driver.unplug_network(amphora.compute_id, + nic.network_id) + except base.NetworkNotFound: + pass + + +class UnPlugNetworks(BaseNetworkTask): + """Task to unplug the networks + + Loop over all nics and unplug them + based on delta + """ + + def execute(self, amphora, delta): + """Unplug the networks.""" + + LOG.debug("Unplug network for amphora") + if not delta: + LOG.debug("No network deltas for amphora id: %s", amphora.id) + return + + for nic in delta.delete_nics: + try: + self.network_driver.unplug_network(amphora.compute_id, + nic.network_id) + except base.NetworkNotFound: + LOG.debug("Network %d not found", nic.network_id) + except Exception: + LOG.exception("Unable to unplug network") + # TODO(xgerman) follow up if that makes sense + + +class GetMemberPorts(BaseNetworkTask): + + def execute(self, loadbalancer, amphora): + vip_port = self.network_driver.get_port(loadbalancer.vip.port_id) + member_ports = [] + interfaces = self.network_driver.get_plugged_networks( + amphora.compute_id) + for interface in interfaces: + port = self.network_driver.get_port(interface.port_id) + if vip_port.network_id == port.network_id: + continue + port.network = self.network_driver.get_network(port.network_id) + for fixed_ip in port.fixed_ips: + if amphora.lb_network_ip == fixed_ip.ip_address: + break + fixed_ip.subnet = self.network_driver.get_subnet( + fixed_ip.subnet_id) + # Only add the port to the list if the IP wasn't the mgmt IP + else: + member_ports.append(port) + return member_ports + + +class HandleNetworkDelta(BaseNetworkTask): + """Task to plug and unplug networks + + Plug or unplug networks based on delta + """ + + def execute(self, amphora, delta): + """Handle network plugging based off deltas.""" + added_ports = {} + added_ports[amphora.id] = [] + for nic in delta.add_nics: + interface = self.network_driver.plug_network(delta.compute_id, + nic.network_id) + port = self.network_driver.get_port(interface.port_id) + port.network = self.network_driver.get_network(port.network_id) + for fixed_ip in port.fixed_ips: + fixed_ip.subnet = self.network_driver.get_subnet( + fixed_ip.subnet_id) + added_ports[amphora.id].append(port) + for nic in delta.delete_nics: + try: + self.network_driver.unplug_network(delta.compute_id, + nic.network_id) + except base.NetworkNotFound: + LOG.debug("Network %d not found ", nic.network_id) + except Exception: + LOG.exception("Unable to unplug network") + return added_ports + + def revert(self, result, amphora, delta, *args, **kwargs): + """Handle a network plug or unplug failures.""" + + if isinstance(result, failure.Failure): + return + + if not delta: + return + + LOG.warning("Unable to plug networks for amp id %s", + delta.amphora_id) + + for nic in delta.add_nics: + try: + self.network_driver.unplug_network(delta.compute_id, + nic.network_id) + except Exception: + pass + + +class HandleNetworkDeltas(BaseNetworkTask): + """Task to plug and unplug networks + + Loop through the deltas and plug or unplug + networks based on delta + """ + + def execute(self, deltas): + """Handle network plugging based off deltas.""" + added_ports = {} + for amp_id, delta in six.iteritems(deltas): + added_ports[amp_id] = [] + for nic in delta.add_nics: + interface = self.network_driver.plug_network(delta.compute_id, + nic.network_id) + port = self.network_driver.get_port(interface.port_id) + port.network = self.network_driver.get_network(port.network_id) + for fixed_ip in port.fixed_ips: + fixed_ip.subnet = self.network_driver.get_subnet( + fixed_ip.subnet_id) + added_ports[amp_id].append(port) + for nic in delta.delete_nics: + try: + self.network_driver.unplug_network(delta.compute_id, + nic.network_id) + except base.NetworkNotFound: + LOG.debug("Network %d not found ", nic.network_id) + except Exception: + LOG.exception("Unable to unplug network") + return added_ports + + def revert(self, result, deltas, *args, **kwargs): + """Handle a network plug or unplug failures.""" + + if isinstance(result, failure.Failure): + return + for amp_id, delta in six.iteritems(deltas): + LOG.warning("Unable to plug networks for amp id %s", + delta.amphora_id) + if not delta: + return + + for nic in delta.add_nics: + try: + self.network_driver.unplug_network(delta.compute_id, + nic.network_id) + except base.NetworkNotFound: + pass + + +class PlugVIP(BaseNetworkTask): + """Task to plumb a VIP.""" + + def execute(self, loadbalancer): + """Plumb a vip to an amphora.""" + + LOG.debug("Plumbing VIP for loadbalancer id: %s", loadbalancer.id) + + amps_data = self.network_driver.plug_vip(loadbalancer, + loadbalancer.vip) + return amps_data + + def revert(self, result, loadbalancer, *args, **kwargs): + """Handle a failure to plumb a vip.""" + + if isinstance(result, failure.Failure): + return + LOG.warning("Unable to plug VIP for loadbalancer id %s", + loadbalancer.id) + + try: + # Make sure we have the current port IDs for cleanup + for amp_data in result: + for amphora in six.moves.filter( + # pylint: disable=cell-var-from-loop + lambda amp: amp.id == amp_data.id, + loadbalancer.amphorae): + amphora.vrrp_port_id = amp_data.vrrp_port_id + amphora.ha_port_id = amp_data.ha_port_id + + self.network_driver.unplug_vip(loadbalancer, loadbalancer.vip) + except Exception as e: + LOG.error("Failed to unplug VIP. Resources may still " + "be in use from vip: %(vip)s due to error: %(except)s", + {'vip': loadbalancer.vip.ip_address, 'except': e}) + + +class UpdateVIPSecurityGroup(BaseNetworkTask): + """Task to setup SG for LB.""" + + def execute(self, loadbalancer): + """Task to setup SG for LB.""" + + LOG.debug("Setup SG for loadbalancer id: %s", loadbalancer.id) + + self.network_driver.update_vip_sg(loadbalancer, loadbalancer.vip) + + +class GetSubnetFromVIP(BaseNetworkTask): + """Task to plumb a VIP.""" + + def execute(self, loadbalancer): + """Plumb a vip to an amphora.""" + + LOG.debug("Getting subnet for LB: %s", loadbalancer.id) + + return self.network_driver.get_subnet(loadbalancer.vip.subnet_id) + + +class PlugVIPAmpphora(BaseNetworkTask): + """Task to plumb a VIP.""" + + def execute(self, loadbalancer, amphora, subnet): + """Plumb a vip to an amphora.""" + + LOG.debug("Plumbing VIP for amphora id: %s", amphora.id) + + amp_data = self.network_driver.plug_aap_port( + loadbalancer, loadbalancer.vip, amphora, subnet) + return amp_data + + def revert(self, result, loadbalancer, amphora, subnet, *args, **kwargs): + """Handle a failure to plumb a vip.""" + + if isinstance(result, failure.Failure): + return + LOG.warning("Unable to plug VIP for amphora id %s " + "load balancer id %s", + amphora.id, loadbalancer.id) + + try: + amphora.vrrp_port_id = result.vrrp_port_id + amphora.ha_port_id = result.ha_port_id + + self.network_driver.unplug_aap_port(loadbalancer.vip, + amphora, subnet) + except Exception as e: + LOG.error('Failed to unplug AAP port. Resources may still be in ' + 'use for VIP: %s due to error: %s', loadbalancer.vip, e) + + +class UnplugVIP(BaseNetworkTask): + """Task to unplug the vip.""" + + def execute(self, loadbalancer): + """Unplug the vip.""" + + LOG.debug("Unplug vip on amphora") + try: + self.network_driver.unplug_vip(loadbalancer, loadbalancer.vip) + except Exception: + LOG.exception("Unable to unplug vip from load balancer %s", + loadbalancer.id) + + +class AllocateVIP(BaseNetworkTask): + """Task to allocate a VIP.""" + + def execute(self, loadbalancer): + """Allocate a vip to the loadbalancer.""" + + LOG.debug("Allocate_vip port_id %s, subnet_id %s," + "ip_address %s", + loadbalancer.vip.port_id, + loadbalancer.vip.subnet_id, + loadbalancer.vip.ip_address) + return self.network_driver.allocate_vip(loadbalancer) + + def revert(self, result, loadbalancer, *args, **kwargs): + """Handle a failure to allocate vip.""" + + if isinstance(result, failure.Failure): + LOG.exception("Unable to allocate VIP") + return + vip = result + LOG.warning("Deallocating vip %s", vip.ip_address) + try: + self.network_driver.deallocate_vip(vip) + except Exception as e: + LOG.error("Failed to deallocate VIP. Resources may still " + "be in use from vip: %(vip)s due to error: %(except)s", + {'vip': vip.ip_address, 'except': e}) + + +class DeallocateVIP(BaseNetworkTask): + """Task to deallocate a VIP.""" + + def execute(self, loadbalancer): + """Deallocate a VIP.""" + + LOG.debug("Deallocating a VIP %s", loadbalancer.vip.ip_address) + + # NOTE(blogan): this is kind of ugly but sufficient for now. Drivers + # will need access to the load balancer that the vip is/was attached + # to. However the data model serialization for the vip does not give a + # backref to the loadbalancer if accessed through the loadbalancer. + vip = loadbalancer.vip + vip.load_balancer = loadbalancer + self.network_driver.deallocate_vip(vip) + + +class UpdateVIP(BaseNetworkTask): + """Task to update a VIP.""" + + def execute(self, loadbalancer): + LOG.debug("Updating VIP of load_balancer %s.", loadbalancer.id) + + self.network_driver.update_vip(loadbalancer) + + +class UpdateVIPForDelete(BaseNetworkTask): + """Task to update a VIP for listener delete flows.""" + + def execute(self, loadbalancer): + LOG.debug("Updating VIP for listener delete on load_balancer %s.", + loadbalancer.id) + + self.network_driver.update_vip(loadbalancer, for_delete=True) + + +class GetAmphoraNetworkConfigs(BaseNetworkTask): + """Task to retrieve amphora network details.""" + + def execute(self, loadbalancer, amphora=None): + LOG.debug("Retrieving vip network details.") + return self.network_driver.get_network_configs(loadbalancer, + amphora=amphora) + + +class GetAmphoraeNetworkConfigs(BaseNetworkTask): + """Task to retrieve amphorae network details.""" + + def execute(self, loadbalancer): + LOG.debug("Retrieving vip network details.") + return self.network_driver.get_network_configs(loadbalancer) + + +class FailoverPreparationForAmphora(BaseNetworkTask): + """Task to prepare an amphora for failover.""" + + def execute(self, amphora): + LOG.debug("Prepare amphora %s for failover.", amphora.id) + + self.network_driver.failover_preparation(amphora) + + +class RetrievePortIDsOnAmphoraExceptLBNetwork(BaseNetworkTask): + """Task retrieving all the port ids on an amphora, except lb network.""" + + def execute(self, amphora): + LOG.debug("Retrieve all but the lb network port id on amphora %s.", + amphora.id) + + interfaces = self.network_driver.get_plugged_networks( + compute_id=amphora.compute_id) + + ports = [] + for interface_ in interfaces: + if interface_.port_id not in ports: + port = self.network_driver.get_port(port_id=interface_.port_id) + ips = port.fixed_ips + lb_network = False + for ip in ips: + if ip.ip_address == amphora.lb_network_ip: + lb_network = True + if not lb_network: + ports.append(port) + + return ports + + +class PlugPorts(BaseNetworkTask): + """Task to plug neutron ports into a compute instance.""" + + def execute(self, amphora, ports): + for port in ports: + LOG.debug('Plugging port ID: %(port_id)s into compute instance: ' + '%(compute_id)s.', + {'port_id': port.id, 'compute_id': amphora.compute_id}) + self.network_driver.plug_port(amphora, port) + + +class PlugVIPPort(BaseNetworkTask): + """Task to plug a VIP into a compute instance.""" + + def execute(self, amphora, amphorae_network_config): + vrrp_port = amphorae_network_config.get(amphora.id).vrrp_port + LOG.debug('Plugging VIP VRRP port ID: %(port_id)s into compute ' + 'instance: %(compute_id)s.', + {'port_id': vrrp_port.id, 'compute_id': amphora.compute_id}) + self.network_driver.plug_port(amphora, vrrp_port) + + def revert(self, result, amphora, amphorae_network_config, + *args, **kwargs): + vrrp_port = None + try: + vrrp_port = amphorae_network_config.get(amphora.id).vrrp_port + self.network_driver.unplug_port(amphora, vrrp_port) + except Exception: + LOG.warning('Failed to unplug vrrp port: %(port)s from amphora: ' + '%(amp)s', {'port': vrrp_port.id, 'amp': amphora.id}) + + +class WaitForPortDetach(BaseNetworkTask): + """Task to wait for the neutron ports to detach from an amphora.""" + + def execute(self, amphora): + LOG.debug('Waiting for ports to detach from amphora: %(amp_id)s.', + {'amp_id': amphora.id}) + self.network_driver.wait_for_port_detach(amphora) + + +class ApplyQos(BaseNetworkTask): + """Apply Quality of Services to the VIP""" + + def _apply_qos_on_vrrp_ports(self, loadbalancer, amps_data, qos_policy_id, + is_revert=False, request_qos_id=None): + """Call network driver to apply QoS Policy on the vrrp ports.""" + if not amps_data: + amps_data = loadbalancer.amphorae + + apply_qos = ApplyQosAmphora() + for amp_data in amps_data: + apply_qos._apply_qos_on_vrrp_port(loadbalancer, amp_data, + qos_policy_id) + + def execute(self, loadbalancer, amps_data=None, update_dict=None): + """Apply qos policy on the vrrp ports which are related with vip.""" + qos_policy_id = loadbalancer.vip.qos_policy_id + if not qos_policy_id and ( + update_dict and ( + 'vip' not in update_dict or + 'qos_policy_id' not in update_dict['vip'])): + return + self._apply_qos_on_vrrp_ports(loadbalancer, amps_data, qos_policy_id) + + def revert(self, result, loadbalancer, amps_data=None, update_dict=None, + *args, **kwargs): + """Handle a failure to apply QoS to VIP""" + request_qos_id = loadbalancer.vip.qos_policy_id + orig_lb = self.task_utils.get_current_loadbalancer_from_db( + loadbalancer.id) + orig_qos_id = orig_lb.vip.qos_policy_id + if request_qos_id != orig_qos_id: + self._apply_qos_on_vrrp_ports(loadbalancer, amps_data, orig_qos_id, + is_revert=True, + request_qos_id=request_qos_id) + + +class ApplyQosAmphora(BaseNetworkTask): + """Apply Quality of Services to the VIP""" + + def _apply_qos_on_vrrp_port(self, loadbalancer, amp_data, qos_policy_id, + is_revert=False, request_qos_id=None): + """Call network driver to apply QoS Policy on the vrrp ports.""" + try: + self.network_driver.apply_qos_on_port(qos_policy_id, + amp_data.vrrp_port_id) + except Exception: + if not is_revert: + raise + LOG.warning('Failed to undo qos policy %(qos_id)s ' + 'on vrrp port: %(port)s from ' + 'amphorae: %(amp)s', + {'qos_id': request_qos_id, + 'port': amp_data.vrrp_port_id, + 'amp': [amp.id for amp in amp_data]}) + + def execute(self, loadbalancer, amp_data=None, update_dict=None): + """Apply qos policy on the vrrp ports which are related with vip.""" + qos_policy_id = loadbalancer.vip.qos_policy_id + if not qos_policy_id and ( + update_dict and ( + 'vip' not in update_dict or + 'qos_policy_id' not in update_dict['vip'])): + return + self._apply_qos_on_vrrp_port(loadbalancer, amp_data, qos_policy_id) + + def revert(self, result, loadbalancer, amp_data=None, update_dict=None, + *args, **kwargs): + """Handle a failure to apply QoS to VIP""" + try: + request_qos_id = loadbalancer.vip.qos_policy_id + orig_lb = self.task_utils.get_current_loadbalancer_from_db( + loadbalancer.id) + orig_qos_id = orig_lb.vip.qos_policy_id + if request_qos_id != orig_qos_id: + self._apply_qos_on_vrrp_port(loadbalancer, amp_data, + orig_qos_id, is_revert=True, + request_qos_id=request_qos_id) + except Exception as e: + LOG.error('Failed to remove QoS policy: %s from port: %s due ' + 'to error: %s', orig_qos_id, amp_data.vrrp_port_id, e) diff --git a/octavia/tests/unit/api/drivers/amphora_driver/v1/__init__.py b/octavia/tests/unit/api/drivers/amphora_driver/v1/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/api/drivers/amphora_driver/v1/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/api/drivers/amphora_driver/test_amphora_driver.py b/octavia/tests/unit/api/drivers/amphora_driver/v1/test_amphora_driver.py similarity index 99% rename from octavia/tests/unit/api/drivers/amphora_driver/test_amphora_driver.py rename to octavia/tests/unit/api/drivers/amphora_driver/v1/test_amphora_driver.py index 856ca6995e..7bc0c7cf25 100644 --- a/octavia/tests/unit/api/drivers/amphora_driver/test_amphora_driver.py +++ b/octavia/tests/unit/api/drivers/amphora_driver/v1/test_amphora_driver.py @@ -18,7 +18,7 @@ from oslo_utils import uuidutils from octavia_lib.api.drivers import data_models as driver_dm from octavia_lib.api.drivers import exceptions -from octavia.api.drivers.amphora_driver import driver +from octavia.api.drivers.amphora_driver.v1 import driver from octavia.common import constants as consts from octavia.network import base as network_base from octavia.tests.unit.api.drivers import sample_data_models diff --git a/octavia/tests/unit/api/drivers/amphora_driver/v2/__init__.py b/octavia/tests/unit/api/drivers/amphora_driver/v2/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/api/drivers/amphora_driver/v2/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/api/drivers/amphora_driver/v2/test_amphora_driver.py b/octavia/tests/unit/api/drivers/amphora_driver/v2/test_amphora_driver.py new file mode 100644 index 0000000000..2d9f3357b3 --- /dev/null +++ b/octavia/tests/unit/api/drivers/amphora_driver/v2/test_amphora_driver.py @@ -0,0 +1,486 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import mock + +from oslo_utils import uuidutils + +from octavia_lib.api.drivers import data_models as driver_dm +from octavia_lib.api.drivers import exceptions + +from octavia.api.drivers.amphora_driver.v2 import driver +from octavia.common import constants as consts +from octavia.network import base as network_base +from octavia.tests.unit.api.drivers import sample_data_models +from octavia.tests.unit import base + + +class TestAmphoraDriver(base.TestRpc): + def setUp(self): + super(TestAmphoraDriver, self).setUp() + self.amp_driver = driver.AmphoraProviderDriver() + self.sample_data = sample_data_models.SampleDriverDataModels() + + @mock.patch('octavia.common.utils.get_network_driver') + def test_create_vip_port(self, mock_get_net_driver): + mock_net_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_net_driver + mock_net_driver.allocate_vip.return_value = self.sample_data.db_vip + + provider_vip_dict = self.amp_driver.create_vip_port( + self.sample_data.lb_id, self.sample_data.project_id, + self.sample_data.provider_vip_dict) + + self.assertEqual(self.sample_data.provider_vip_dict, provider_vip_dict) + + @mock.patch('octavia.common.utils.get_network_driver') + def test_create_vip_port_failed(self, mock_get_net_driver): + mock_net_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_net_driver + mock_net_driver.allocate_vip.side_effect = ( + network_base.AllocateVIPException()) + + self.assertRaises(exceptions.DriverError, + self.amp_driver.create_vip_port, + self.sample_data.lb_id, self.sample_data.project_id, + self.sample_data.provider_vip_dict) + + # Load Balancer + @mock.patch('oslo_messaging.RPCClient.cast') + def test_loadbalancer_create(self, mock_cast): + provider_lb = driver_dm.LoadBalancer( + loadbalancer_id=self.sample_data.lb_id) + self.amp_driver.loadbalancer_create(provider_lb) + payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id, + consts.FLAVOR: None} + mock_cast.assert_called_with({}, 'create_load_balancer', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_loadbalancer_delete(self, mock_cast): + provider_lb = driver_dm.LoadBalancer( + loadbalancer_id=self.sample_data.lb_id) + self.amp_driver.loadbalancer_delete(provider_lb) + payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id, + 'cascade': False} + mock_cast.assert_called_with({}, 'delete_load_balancer', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_loadbalancer_failover(self, mock_cast): + self.amp_driver.loadbalancer_failover(self.sample_data.lb_id) + payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id} + mock_cast.assert_called_with({}, 'failover_load_balancer', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_loadbalancer_update(self, mock_cast): + old_provider_lb = driver_dm.LoadBalancer( + loadbalancer_id=self.sample_data.lb_id) + provider_lb = driver_dm.LoadBalancer( + loadbalancer_id=self.sample_data.lb_id, admin_state_up=True) + lb_dict = {'enabled': True} + self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb) + payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id, + consts.LOAD_BALANCER_UPDATES: lb_dict} + mock_cast.assert_called_with({}, 'update_load_balancer', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_loadbalancer_update_name(self, mock_cast): + old_provider_lb = driver_dm.LoadBalancer( + loadbalancer_id=self.sample_data.lb_id) + provider_lb = driver_dm.LoadBalancer( + loadbalancer_id=self.sample_data.lb_id, name='Great LB') + lb_dict = {'name': 'Great LB'} + self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb) + payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id, + consts.LOAD_BALANCER_UPDATES: lb_dict} + mock_cast.assert_called_with({}, 'update_load_balancer', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_loadbalancer_update_qos(self, mock_cast): + qos_policy_id = uuidutils.generate_uuid() + old_provider_lb = driver_dm.LoadBalancer( + loadbalancer_id=self.sample_data.lb_id) + provider_lb = driver_dm.LoadBalancer( + loadbalancer_id=self.sample_data.lb_id, + vip_qos_policy_id=qos_policy_id) + lb_dict = {'vip': {'qos_policy_id': qos_policy_id}} + self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb) + payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id, + consts.LOAD_BALANCER_UPDATES: lb_dict} + mock_cast.assert_called_with({}, 'update_load_balancer', **payload) + + # Listener + @mock.patch('oslo_messaging.RPCClient.cast') + def test_listener_create(self, mock_cast): + provider_listener = driver_dm.Listener( + listener_id=self.sample_data.listener1_id) + self.amp_driver.listener_create(provider_listener) + payload = {consts.LISTENER_ID: self.sample_data.listener1_id} + mock_cast.assert_called_with({}, 'create_listener', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_listener_delete(self, mock_cast): + provider_listener = driver_dm.Listener( + listener_id=self.sample_data.listener1_id) + self.amp_driver.listener_delete(provider_listener) + payload = {consts.LISTENER_ID: self.sample_data.listener1_id} + mock_cast.assert_called_with({}, 'delete_listener', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_listener_update(self, mock_cast): + old_provider_listener = driver_dm.Listener( + listener_id=self.sample_data.listener1_id) + provider_listener = driver_dm.Listener( + listener_id=self.sample_data.listener1_id, admin_state_up=False) + listener_dict = {'enabled': False} + self.amp_driver.listener_update(old_provider_listener, + provider_listener) + payload = {consts.LISTENER_ID: self.sample_data.listener1_id, + consts.LISTENER_UPDATES: listener_dict} + mock_cast.assert_called_with({}, 'update_listener', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_listener_update_name(self, mock_cast): + old_provider_listener = driver_dm.Listener( + listener_id=self.sample_data.listener1_id) + provider_listener = driver_dm.Listener( + listener_id=self.sample_data.listener1_id, name='Great Listener') + listener_dict = {'name': 'Great Listener'} + self.amp_driver.listener_update(old_provider_listener, + provider_listener) + payload = {consts.LISTENER_ID: self.sample_data.listener1_id, + consts.LISTENER_UPDATES: listener_dict} + mock_cast.assert_called_with({}, 'update_listener', **payload) + + # Pool + @mock.patch('oslo_messaging.RPCClient.cast') + def test_pool_create(self, mock_cast): + provider_pool = driver_dm.Pool( + pool_id=self.sample_data.pool1_id) + self.amp_driver.pool_create(provider_pool) + payload = {consts.POOL_ID: self.sample_data.pool1_id} + mock_cast.assert_called_with({}, 'create_pool', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_pool_delete(self, mock_cast): + provider_pool = driver_dm.Pool( + pool_id=self.sample_data.pool1_id) + self.amp_driver.pool_delete(provider_pool) + payload = {consts.POOL_ID: self.sample_data.pool1_id} + mock_cast.assert_called_with({}, 'delete_pool', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_pool_update(self, mock_cast): + old_provider_pool = driver_dm.Pool( + pool_id=self.sample_data.pool1_id) + provider_pool = driver_dm.Pool( + pool_id=self.sample_data.pool1_id, admin_state_up=True) + pool_dict = {'enabled': True} + self.amp_driver.pool_update(old_provider_pool, provider_pool) + payload = {consts.POOL_ID: self.sample_data.pool1_id, + consts.POOL_UPDATES: pool_dict} + mock_cast.assert_called_with({}, 'update_pool', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_pool_update_name(self, mock_cast): + old_provider_pool = driver_dm.Pool( + pool_id=self.sample_data.pool1_id) + provider_pool = driver_dm.Pool( + pool_id=self.sample_data.pool1_id, name='Great pool', + admin_state_up=True, tls_enabled=True) + pool_dict = {'name': 'Great pool', + 'enabled': True, + 'tls_enabled': True} + self.amp_driver.pool_update(old_provider_pool, provider_pool) + payload = {consts.POOL_ID: self.sample_data.pool1_id, + consts.POOL_UPDATES: pool_dict} + mock_cast.assert_called_with({}, 'update_pool', **payload) + + # Member + @mock.patch('oslo_messaging.RPCClient.cast') + def test_member_create(self, mock_cast): + provider_member = driver_dm.Member( + member_id=self.sample_data.member1_id) + self.amp_driver.member_create(provider_member) + payload = {consts.MEMBER_ID: self.sample_data.member1_id} + mock_cast.assert_called_with({}, 'create_member', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_member_delete(self, mock_cast): + provider_member = driver_dm.Member( + member_id=self.sample_data.member1_id) + self.amp_driver.member_delete(provider_member) + payload = {consts.MEMBER_ID: self.sample_data.member1_id} + mock_cast.assert_called_with({}, 'delete_member', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_member_update(self, mock_cast): + old_provider_member = driver_dm.Member( + member_id=self.sample_data.member1_id) + provider_member = driver_dm.Member( + member_id=self.sample_data.member1_id, admin_state_up=True) + member_dict = {'enabled': True} + self.amp_driver.member_update(old_provider_member, provider_member) + payload = {consts.MEMBER_ID: self.sample_data.member1_id, + consts.MEMBER_UPDATES: member_dict} + mock_cast.assert_called_with({}, 'update_member', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_member_update_name(self, mock_cast): + old_provider_member = driver_dm.Member( + member_id=self.sample_data.member1_id) + provider_member = driver_dm.Member( + member_id=self.sample_data.member1_id, name='Great member') + member_dict = {'name': 'Great member'} + self.amp_driver.member_update(old_provider_member, provider_member) + payload = {consts.MEMBER_ID: self.sample_data.member1_id, + consts.MEMBER_UPDATES: member_dict} + mock_cast.assert_called_with({}, 'update_member', **payload) + + @mock.patch('octavia.db.api.get_session') + @mock.patch('octavia.db.repositories.PoolRepository.get') + @mock.patch('oslo_messaging.RPCClient.cast') + def test_member_batch_update(self, mock_cast, mock_pool_get, mock_session): + mock_pool = mock.MagicMock() + mock_pool.members = self.sample_data.db_pool1_members + mock_pool_get.return_value = mock_pool + + prov_mem_update = driver_dm.Member( + member_id=self.sample_data.member2_id, + pool_id=self.sample_data.pool1_id, admin_state_up=False, + address='192.0.2.17', monitor_address='192.0.2.77', + protocol_port=80, name='updated-member2') + prov_new_member = driver_dm.Member( + member_id=self.sample_data.member3_id, + pool_id=self.sample_data.pool1_id, + address='192.0.2.18', monitor_address='192.0.2.28', + protocol_port=80, name='member3') + prov_members = [prov_mem_update, prov_new_member] + + update_mem_dict = {'ip_address': '192.0.2.17', + 'name': 'updated-member2', + 'monitor_address': '192.0.2.77', + 'id': self.sample_data.member2_id, + 'enabled': False, + 'protocol_port': 80, + 'pool_id': self.sample_data.pool1_id} + + self.amp_driver.member_batch_update(prov_members) + + payload = {'old_member_ids': [self.sample_data.member1_id], + 'new_member_ids': [self.sample_data.member3_id], + 'updated_members': [update_mem_dict]} + mock_cast.assert_called_with({}, 'batch_update_members', **payload) + + @mock.patch('octavia.db.api.get_session') + @mock.patch('octavia.db.repositories.PoolRepository.get') + @mock.patch('oslo_messaging.RPCClient.cast') + def test_member_batch_update_no_admin_addr(self, mock_cast, + mock_pool_get, mock_session): + mock_pool = mock.MagicMock() + mock_pool.members = self.sample_data.db_pool1_members + mock_pool_get.return_value = mock_pool + + prov_mem_update = driver_dm.Member( + member_id=self.sample_data.member2_id, + pool_id=self.sample_data.pool1_id, + monitor_address='192.0.2.77', + protocol_port=80, name='updated-member2') + prov_new_member = driver_dm.Member( + member_id=self.sample_data.member3_id, + pool_id=self.sample_data.pool1_id, + address='192.0.2.18', monitor_address='192.0.2.28', + protocol_port=80, name='member3') + prov_members = [prov_mem_update, prov_new_member] + + update_mem_dict = {'name': 'updated-member2', + 'monitor_address': '192.0.2.77', + 'id': self.sample_data.member2_id, + 'protocol_port': 80, + 'pool_id': self.sample_data.pool1_id} + + self.amp_driver.member_batch_update(prov_members) + + payload = {'old_member_ids': [self.sample_data.member1_id], + 'new_member_ids': [self.sample_data.member3_id], + 'updated_members': [update_mem_dict]} + mock_cast.assert_called_with({}, 'batch_update_members', **payload) + + # Health Monitor + @mock.patch('oslo_messaging.RPCClient.cast') + def test_health_monitor_create(self, mock_cast): + provider_HM = driver_dm.HealthMonitor( + healthmonitor_id=self.sample_data.hm1_id) + self.amp_driver.health_monitor_create(provider_HM) + payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id} + mock_cast.assert_called_with({}, 'create_health_monitor', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_health_monitor_delete(self, mock_cast): + provider_HM = driver_dm.HealthMonitor( + healthmonitor_id=self.sample_data.hm1_id) + self.amp_driver.health_monitor_delete(provider_HM) + payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id} + mock_cast.assert_called_with({}, 'delete_health_monitor', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_health_monitor_update(self, mock_cast): + old_provider_hm = driver_dm.HealthMonitor( + healthmonitor_id=self.sample_data.hm1_id) + provider_hm = driver_dm.HealthMonitor( + healthmonitor_id=self.sample_data.hm1_id, admin_state_up=True, + max_retries=1, max_retries_down=2) + hm_dict = {'enabled': True, 'rise_threshold': 1, 'fall_threshold': 2} + self.amp_driver.health_monitor_update(old_provider_hm, provider_hm) + payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id, + consts.HEALTH_MONITOR_UPDATES: hm_dict} + mock_cast.assert_called_with({}, 'update_health_monitor', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_health_monitor_update_name(self, mock_cast): + old_provider_hm = driver_dm.HealthMonitor( + healthmonitor_id=self.sample_data.hm1_id) + provider_hm = driver_dm.HealthMonitor( + healthmonitor_id=self.sample_data.hm1_id, name='Great HM') + hm_dict = {'name': 'Great HM'} + self.amp_driver.health_monitor_update(old_provider_hm, provider_hm) + payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id, + consts.HEALTH_MONITOR_UPDATES: hm_dict} + mock_cast.assert_called_with({}, 'update_health_monitor', **payload) + + # L7 Policy + @mock.patch('oslo_messaging.RPCClient.cast') + def test_l7policy_create(self, mock_cast): + provider_l7policy = driver_dm.L7Policy( + l7policy_id=self.sample_data.l7policy1_id) + self.amp_driver.l7policy_create(provider_l7policy) + payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id} + mock_cast.assert_called_with({}, 'create_l7policy', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_l7policy_delete(self, mock_cast): + provider_l7policy = driver_dm.L7Policy( + l7policy_id=self.sample_data.l7policy1_id) + self.amp_driver.l7policy_delete(provider_l7policy) + payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id} + mock_cast.assert_called_with({}, 'delete_l7policy', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_l7policy_update(self, mock_cast): + old_provider_l7policy = driver_dm.L7Policy( + l7policy_id=self.sample_data.l7policy1_id) + provider_l7policy = driver_dm.L7Policy( + l7policy_id=self.sample_data.l7policy1_id, admin_state_up=True) + l7policy_dict = {'enabled': True} + self.amp_driver.l7policy_update(old_provider_l7policy, + provider_l7policy) + payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id, + consts.L7POLICY_UPDATES: l7policy_dict} + mock_cast.assert_called_with({}, 'update_l7policy', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_l7policy_update_name(self, mock_cast): + old_provider_l7policy = driver_dm.L7Policy( + l7policy_id=self.sample_data.l7policy1_id) + provider_l7policy = driver_dm.L7Policy( + l7policy_id=self.sample_data.l7policy1_id, name='Great L7Policy') + l7policy_dict = {'name': 'Great L7Policy'} + self.amp_driver.l7policy_update(old_provider_l7policy, + provider_l7policy) + payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id, + consts.L7POLICY_UPDATES: l7policy_dict} + mock_cast.assert_called_with({}, 'update_l7policy', **payload) + + # L7 Rules + @mock.patch('oslo_messaging.RPCClient.cast') + def test_l7rule_create(self, mock_cast): + provider_l7rule = driver_dm.L7Rule( + l7rule_id=self.sample_data.l7rule1_id) + self.amp_driver.l7rule_create(provider_l7rule) + payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id} + mock_cast.assert_called_with({}, 'create_l7rule', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_l7rule_delete(self, mock_cast): + provider_l7rule = driver_dm.L7Rule( + l7rule_id=self.sample_data.l7rule1_id) + self.amp_driver.l7rule_delete(provider_l7rule) + payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id} + mock_cast.assert_called_with({}, 'delete_l7rule', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_l7rule_update(self, mock_cast): + old_provider_l7rule = driver_dm.L7Rule( + l7rule_id=self.sample_data.l7rule1_id) + provider_l7rule = driver_dm.L7Rule( + l7rule_id=self.sample_data.l7rule1_id, admin_state_up=True) + l7rule_dict = {'enabled': True} + self.amp_driver.l7rule_update(old_provider_l7rule, provider_l7rule) + payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id, + consts.L7RULE_UPDATES: l7rule_dict} + mock_cast.assert_called_with({}, 'update_l7rule', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_l7rule_update_invert(self, mock_cast): + old_provider_l7rule = driver_dm.L7Rule( + l7rule_id=self.sample_data.l7rule1_id) + provider_l7rule = driver_dm.L7Rule( + l7rule_id=self.sample_data.l7rule1_id, invert=True) + l7rule_dict = {'invert': True} + self.amp_driver.l7rule_update(old_provider_l7rule, provider_l7rule) + payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id, + consts.L7RULE_UPDATES: l7rule_dict} + mock_cast.assert_called_with({}, 'update_l7rule', **payload) + + # Flavor + def test_get_supported_flavor_metadata(self): + test_schema = { + "properties": { + "test_name": {"description": "Test description"}, + "test_name2": {"description": "Another description"}}} + ref_dict = {"test_name": "Test description", + "test_name2": "Another description"} + + # mock out the supported_flavor_metadata + with mock.patch('octavia.api.drivers.amphora_driver.flavor_schema.' + 'SUPPORTED_FLAVOR_SCHEMA', test_schema): + result = self.amp_driver.get_supported_flavor_metadata() + self.assertEqual(ref_dict, result) + + # Test for bad schema + with mock.patch('octavia.api.drivers.amphora_driver.flavor_schema.' + 'SUPPORTED_FLAVOR_SCHEMA', 'bogus'): + self.assertRaises(exceptions.DriverError, + self.amp_driver.get_supported_flavor_metadata) + + @mock.patch('jsonschema.validators.requests') + def test_validate_flavor(self, mock_validate): + ref_dict = {consts.LOADBALANCER_TOPOLOGY: consts.TOPOLOGY_SINGLE} + self.amp_driver.validate_flavor(ref_dict) + + # Test bad flavor metadata value is bad + ref_dict = {consts.LOADBALANCER_TOPOLOGY: 'bogus'} + self.assertRaises(exceptions.UnsupportedOptionError, + self.amp_driver.validate_flavor, + ref_dict) + + # Test bad flavor metadata key + ref_dict = {'bogus': 'bogus'} + self.assertRaises(exceptions.UnsupportedOptionError, + self.amp_driver.validate_flavor, + ref_dict) + + # Test for bad schema + with mock.patch('octavia.api.drivers.amphora_driver.flavor_schema.' + 'SUPPORTED_FLAVOR_SCHEMA', 'bogus'): + self.assertRaises(exceptions.DriverError, + self.amp_driver.validate_flavor, 'bogus') diff --git a/octavia/tests/unit/controller/healthmanager/test_health_manager.py b/octavia/tests/unit/controller/healthmanager/test_health_manager.py index ec73cb5eb5..1670422614 100644 --- a/octavia/tests/unit/controller/healthmanager/test_health_manager.py +++ b/octavia/tests/unit/controller/healthmanager/test_health_manager.py @@ -45,7 +45,7 @@ class TestHealthManager(base.TestCase): super(TestHealthManager, self).setUp() @mock.patch('octavia.db.api.wait_for_connection') - @mock.patch('octavia.controller.worker.controller_worker.' + @mock.patch('octavia.controller.worker.v1.controller_worker.' 'ControllerWorker.failover_amphora') @mock.patch('octavia.db.repositories.AmphoraHealthRepository.' 'get_stale_amphora') @@ -86,7 +86,7 @@ class TestHealthManager(base.TestCase): self.assertRaises(TestException, hm.health_check) self.assertEqual(4, mock_session.rollback.call_count) - @mock.patch('octavia.controller.worker.controller_worker.' + @mock.patch('octavia.controller.worker.v1.controller_worker.' 'ControllerWorker.failover_amphora') @mock.patch('octavia.db.repositories.AmphoraHealthRepository.' 'get_stale_amphora', return_value=None) @@ -102,7 +102,7 @@ class TestHealthManager(base.TestCase): session_mock.assert_called_once_with(autocommit=False) self.assertFalse(failover_mock.called) - @mock.patch('octavia.controller.worker.controller_worker.' + @mock.patch('octavia.controller.worker.v1.controller_worker.' 'ControllerWorker.failover_amphora') @mock.patch('octavia.db.repositories.AmphoraHealthRepository.' 'get_stale_amphora', return_value=None) diff --git a/octavia/tests/unit/controller/housekeeping/test_house_keeping.py b/octavia/tests/unit/controller/housekeeping/test_house_keeping.py index e63da3d80c..16926dff0e 100644 --- a/octavia/tests/unit/controller/housekeeping/test_house_keeping.py +++ b/octavia/tests/unit/controller/housekeeping/test_house_keeping.py @@ -219,7 +219,7 @@ class TestCertRotation(base.TestCase): def setUp(self): super(TestCertRotation, self).setUp() - @mock.patch('octavia.controller.worker.controller_worker.' + @mock.patch('octavia.controller.worker.v1.controller_worker.' 'ControllerWorker.amphora_cert_rotation') @mock.patch('octavia.db.repositories.AmphoraRepository.' 'get_cert_expiring_amphora') @@ -239,7 +239,7 @@ class TestCertRotation(base.TestCase): self.assertRaises(TestException, cr.rotate) amp_cert_mock.assert_called_once_with(AMPHORA_ID) - @mock.patch('octavia.controller.worker.controller_worker.' + @mock.patch('octavia.controller.worker.v1.controller_worker.' 'ControllerWorker.amphora_cert_rotation') @mock.patch('octavia.db.repositories.AmphoraRepository.' 'get_cert_expiring_amphora') @@ -259,7 +259,7 @@ class TestCertRotation(base.TestCase): self.assertIsNone(cr.rotate()) amp_cert_mock.assert_called_once_with(AMPHORA_ID) - @mock.patch('octavia.controller.worker.controller_worker.' + @mock.patch('octavia.controller.worker.v1.controller_worker.' 'ControllerWorker.amphora_cert_rotation') @mock.patch('octavia.db.repositories.AmphoraRepository.' 'get_cert_expiring_amphora') diff --git a/octavia/tests/unit/controller/queue/v1/__init__.py b/octavia/tests/unit/controller/queue/v1/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/controller/queue/v1/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/controller/queue/test_consumer.py b/octavia/tests/unit/controller/queue/v1/test_consumer.py similarity index 94% rename from octavia/tests/unit/controller/queue/test_consumer.py rename to octavia/tests/unit/controller/queue/v1/test_consumer.py index 4e3865a587..b776667255 100644 --- a/octavia/tests/unit/controller/queue/test_consumer.py +++ b/octavia/tests/unit/controller/queue/v1/test_consumer.py @@ -17,8 +17,8 @@ from oslo_config import cfg from oslo_config import fixture as oslo_fixture import oslo_messaging as messaging -from octavia.controller.queue import consumer -from octavia.controller.queue import endpoint +from octavia.controller.queue.v1 import consumer +from octavia.controller.queue.v1 import endpoints from octavia.tests.unit import base @@ -32,7 +32,7 @@ class TestConsumer(base.TestRpc): self.conf = conf.conf @mock.patch.object(messaging, 'Target') - @mock.patch.object(endpoint, 'Endpoint') + @mock.patch.object(endpoints, 'Endpoints') @mock.patch.object(messaging, 'get_rpc_server') def test_consumer_run(self, mock_rpc_server, mock_endpoint, mock_target): mock_rpc_server_rv = mock.Mock() diff --git a/octavia/tests/unit/controller/queue/test_endpoint.py b/octavia/tests/unit/controller/queue/v1/test_endpoints.py similarity index 95% rename from octavia/tests/unit/controller/queue/test_endpoint.py rename to octavia/tests/unit/controller/queue/v1/test_endpoints.py index 9557bfc06b..cc7c15f16a 100644 --- a/octavia/tests/unit/controller/queue/test_endpoint.py +++ b/octavia/tests/unit/controller/queue/v1/test_endpoints.py @@ -17,25 +17,25 @@ from oslo_config import cfg from oslo_config import fixture as oslo_fixture from oslo_utils import uuidutils -from octavia.controller.queue import endpoint -from octavia.controller.worker import controller_worker +from octavia.controller.queue.v1 import endpoints +from octavia.controller.worker.v1 import controller_worker from octavia.tests.unit import base -class TestEndpoint(base.TestCase): +class TestEndpoints(base.TestCase): def setUp(self): - super(TestEndpoint, self).setUp() + super(TestEndpoints, self).setUp() conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) conf.config(octavia_plugins='hot_plug_plugin') mock_class = mock.create_autospec(controller_worker.ControllerWorker) - self.worker_patcher = mock.patch('octavia.controller.queue.endpoint.' - 'stevedore_driver') + self.worker_patcher = mock.patch('octavia.controller.queue.v1.' + 'endpoints.stevedore_driver') self.worker_patcher.start().ControllerWorker = mock_class - self.ep = endpoint.Endpoint() + self.ep = endpoints.Endpoints() self.context = {} self.resource_updates = {} self.resource_id = 1234 diff --git a/octavia/tests/unit/controller/queue/v2/__init__.py b/octavia/tests/unit/controller/queue/v2/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/controller/queue/v2/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/controller/queue/v2/test_consumer.py b/octavia/tests/unit/controller/queue/v2/test_consumer.py new file mode 100644 index 0000000000..dddcb15fc8 --- /dev/null +++ b/octavia/tests/unit/controller/queue/v2/test_consumer.py @@ -0,0 +1,72 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +import oslo_messaging as messaging + +from octavia.common import constants +from octavia.controller.queue.v2 import consumer +from octavia.controller.queue.v2 import endpoints +from octavia.tests.unit import base + + +class TestConsumer(base.TestRpc): + + def setUp(self): + super(TestConsumer, self).setUp() + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(host='test-hostname') + self.conf = conf.conf + + @mock.patch.object(messaging, 'Target') + @mock.patch.object(endpoints, 'Endpoints') + @mock.patch.object(messaging, 'get_rpc_server') + def test_consumer_run(self, mock_rpc_server, mock_endpoint, mock_target): + mock_rpc_server_rv = mock.Mock() + mock_rpc_server.return_value = mock_rpc_server_rv + mock_endpoint_rv = mock.Mock() + mock_endpoint.return_value = mock_endpoint_rv + mock_target_rv = mock.Mock() + mock_target.return_value = mock_target_rv + + consumer.ConsumerService(1, self.conf).run() + + mock_target.assert_called_once_with(topic=constants.TOPIC_AMPHORA_V2, + server='test-hostname', + fanout=False) + mock_endpoint.assert_called_once_with() + + @mock.patch.object(messaging, 'get_rpc_server') + def test_consumer_terminate(self, mock_rpc_server): + mock_rpc_server_rv = mock.Mock() + mock_rpc_server.return_value = mock_rpc_server_rv + + cons = consumer.ConsumerService(1, self.conf) + cons.run() + cons.terminate() + mock_rpc_server_rv.stop.assert_called_once_with() + self.assertFalse(mock_rpc_server_rv.wait.called) + + @mock.patch.object(messaging, 'get_rpc_server') + def test_consumer_graceful_terminate(self, mock_rpc_server): + mock_rpc_server_rv = mock.Mock() + mock_rpc_server.return_value = mock_rpc_server_rv + + cons = consumer.ConsumerService(1, self.conf) + cons.run() + cons.terminate(graceful=True) + mock_rpc_server_rv.stop.assert_called_once_with() + mock_rpc_server_rv.wait.assert_called_once_with() diff --git a/octavia/tests/unit/controller/queue/v2/test_endpoints.py b/octavia/tests/unit/controller/queue/v2/test_endpoints.py new file mode 100644 index 0000000000..226f164eff --- /dev/null +++ b/octavia/tests/unit/controller/queue/v2/test_endpoints.py @@ -0,0 +1,182 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils + +from octavia.controller.queue.v2 import endpoints +from octavia.controller.worker.v2 import controller_worker +from octavia.tests.unit import base + + +class TestEndpoints(base.TestCase): + + def setUp(self): + super(TestEndpoints, self).setUp() + + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(octavia_plugins='hot_plug_plugin') + + mock_class = mock.create_autospec(controller_worker.ControllerWorker) + self.worker_patcher = mock.patch('octavia.controller.queue.v2.' + 'endpoints.stevedore_driver') + self.worker_patcher.start().ControllerWorker = mock_class + + self.ep = endpoints.Endpoints() + self.context = {} + self.resource_updates = {} + self.resource_id = 1234 + self.server_group_id = 3456 + self.flavor_id = uuidutils.generate_uuid() + + def test_create_load_balancer(self): + self.ep.create_load_balancer(self.context, self.resource_id, + flavor=self.flavor_id) + self.ep.worker.create_load_balancer.assert_called_once_with( + self.resource_id, self.flavor_id) + + def test_create_load_balancer_no_flavor(self): + self.ep.create_load_balancer(self.context, self.resource_id) + self.ep.worker.create_load_balancer.assert_called_once_with( + self.resource_id, None) + + def test_update_load_balancer(self): + self.ep.update_load_balancer(self.context, self.resource_id, + self.resource_updates) + self.ep.worker.update_load_balancer.assert_called_once_with( + self.resource_id, self.resource_updates) + + def test_delete_load_balancer(self): + self.ep.delete_load_balancer(self.context, self.resource_id) + self.ep.worker.delete_load_balancer.assert_called_once_with( + self.resource_id, False) + + def test_failover_load_balancer(self): + self.ep.failover_load_balancer(self.context, self.resource_id) + self.ep.worker.failover_loadbalancer.assert_called_once_with( + self.resource_id) + + def test_failover_amphora(self): + self.ep.failover_amphora(self.context, self.resource_id) + self.ep.worker.failover_amphora.assert_called_once_with( + self.resource_id) + + def test_create_listener(self): + self.ep.create_listener(self.context, self.resource_id) + self.ep.worker.create_listener.assert_called_once_with( + self.resource_id) + + def test_update_listener(self): + self.ep.update_listener(self.context, self.resource_id, + self.resource_updates) + self.ep.worker.update_listener.assert_called_once_with( + self.resource_id, self.resource_updates) + + def test_delete_listener(self): + self.ep.delete_listener(self.context, self.resource_id) + self.ep.worker.delete_listener.assert_called_once_with( + self.resource_id) + + def test_create_pool(self): + self.ep.create_pool(self.context, self.resource_id) + self.ep.worker.create_pool.assert_called_once_with( + self.resource_id) + + def test_update_pool(self): + self.ep.update_pool(self.context, self.resource_id, + self.resource_updates) + self.ep.worker.update_pool.assert_called_once_with( + self.resource_id, self.resource_updates) + + def test_delete_pool(self): + self.ep.delete_pool(self.context, self.resource_id) + self.ep.worker.delete_pool.assert_called_once_with( + self.resource_id) + + def test_create_health_monitor(self): + self.ep.create_health_monitor(self.context, self.resource_id) + self.ep.worker.create_health_monitor.assert_called_once_with( + self.resource_id) + + def test_update_health_monitor(self): + self.ep.update_health_monitor(self.context, self.resource_id, + self.resource_updates) + self.ep.worker.update_health_monitor.assert_called_once_with( + self.resource_id, self.resource_updates) + + def test_delete_health_monitor(self): + self.ep.delete_health_monitor(self.context, self.resource_id) + self.ep.worker.delete_health_monitor.assert_called_once_with( + self.resource_id) + + def test_create_member(self): + self.ep.create_member(self.context, self.resource_id) + self.ep.worker.create_member.assert_called_once_with( + self.resource_id) + + def test_update_member(self): + self.ep.update_member(self.context, self.resource_id, + self.resource_updates) + self.ep.worker.update_member.assert_called_once_with( + self.resource_id, self.resource_updates) + + def test_batch_update_members(self): + self.ep.batch_update_members( + self.context, [9], [11], [self.resource_updates]) + self.ep.worker.batch_update_members.assert_called_once_with( + [9], [11], [self.resource_updates]) + + def test_delete_member(self): + self.ep.delete_member(self.context, self.resource_id) + self.ep.worker.delete_member.assert_called_once_with( + self.resource_id) + + def test_create_l7policy(self): + self.ep.create_l7policy(self.context, self.resource_id) + self.ep.worker.create_l7policy.assert_called_once_with( + self.resource_id) + + def test_update_l7policy(self): + self.ep.update_l7policy(self.context, self.resource_id, + self.resource_updates) + self.ep.worker.update_l7policy.assert_called_once_with( + self.resource_id, self.resource_updates) + + def test_delete_l7policy(self): + self.ep.delete_l7policy(self.context, self.resource_id) + self.ep.worker.delete_l7policy.assert_called_once_with( + self.resource_id) + + def test_create_l7rule(self): + self.ep.create_l7rule(self.context, self.resource_id) + self.ep.worker.create_l7rule.assert_called_once_with( + self.resource_id) + + def test_update_l7rule(self): + self.ep.update_l7rule(self.context, self.resource_id, + self.resource_updates) + self.ep.worker.update_l7rule.assert_called_once_with( + self.resource_id, self.resource_updates) + + def test_delete_l7rule(self): + self.ep.delete_l7rule(self.context, self.resource_id) + self.ep.worker.delete_l7rule.assert_called_once_with( + self.resource_id) + + def test_update_amphora_agent_config(self): + self.ep.update_amphora_agent_config(self.context, self.resource_id) + self.ep.worker.update_amphora_agent_config.assert_called_once_with( + self.resource_id) diff --git a/octavia/tests/unit/controller/worker/v1/__init__.py b/octavia/tests/unit/controller/worker/v1/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v1/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/controller/worker/v1/flows/__init__.py b/octavia/tests/unit/controller/worker/v1/flows/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v1/flows/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/controller/worker/flows/test_amphora_flows.py b/octavia/tests/unit/controller/worker/v1/flows/test_amphora_flows.py similarity index 99% rename from octavia/tests/unit/controller/worker/flows/test_amphora_flows.py rename to octavia/tests/unit/controller/worker/v1/flows/test_amphora_flows.py index e541f4138c..30b6828c38 100644 --- a/octavia/tests/unit/controller/worker/flows/test_amphora_flows.py +++ b/octavia/tests/unit/controller/worker/v1/flows/test_amphora_flows.py @@ -20,7 +20,7 @@ from taskflow.patterns import linear_flow as flow from octavia.common import constants from octavia.common import data_models -from octavia.controller.worker.flows import amphora_flows +from octavia.controller.worker.v1.flows import amphora_flows import octavia.tests.unit.base as base AUTH_VERSION = '2' diff --git a/octavia/tests/unit/controller/worker/flows/test_health_monitor_flows.py b/octavia/tests/unit/controller/worker/v1/flows/test_health_monitor_flows.py similarity index 97% rename from octavia/tests/unit/controller/worker/flows/test_health_monitor_flows.py rename to octavia/tests/unit/controller/worker/v1/flows/test_health_monitor_flows.py index 00d0755ff0..e63c29fa6e 100644 --- a/octavia/tests/unit/controller/worker/flows/test_health_monitor_flows.py +++ b/octavia/tests/unit/controller/worker/v1/flows/test_health_monitor_flows.py @@ -16,7 +16,7 @@ from taskflow.patterns import linear_flow as flow from octavia.common import constants -from octavia.controller.worker.flows import health_monitor_flows +from octavia.controller.worker.v1.flows import health_monitor_flows import octavia.tests.unit.base as base diff --git a/octavia/tests/unit/controller/worker/flows/test_l7policy_flows.py b/octavia/tests/unit/controller/worker/v1/flows/test_l7policy_flows.py similarity index 97% rename from octavia/tests/unit/controller/worker/flows/test_l7policy_flows.py rename to octavia/tests/unit/controller/worker/v1/flows/test_l7policy_flows.py index 22d762b632..823d5ac854 100644 --- a/octavia/tests/unit/controller/worker/flows/test_l7policy_flows.py +++ b/octavia/tests/unit/controller/worker/v1/flows/test_l7policy_flows.py @@ -16,7 +16,7 @@ from taskflow.patterns import linear_flow as flow from octavia.common import constants -from octavia.controller.worker.flows import l7policy_flows +from octavia.controller.worker.v1.flows import l7policy_flows import octavia.tests.unit.base as base diff --git a/octavia/tests/unit/controller/worker/flows/test_l7rule_flows.py b/octavia/tests/unit/controller/worker/v1/flows/test_l7rule_flows.py similarity index 97% rename from octavia/tests/unit/controller/worker/flows/test_l7rule_flows.py rename to octavia/tests/unit/controller/worker/v1/flows/test_l7rule_flows.py index 9003d0480c..36fce25e49 100644 --- a/octavia/tests/unit/controller/worker/flows/test_l7rule_flows.py +++ b/octavia/tests/unit/controller/worker/v1/flows/test_l7rule_flows.py @@ -16,7 +16,7 @@ from taskflow.patterns import linear_flow as flow from octavia.common import constants -from octavia.controller.worker.flows import l7rule_flows +from octavia.controller.worker.v1.flows import l7rule_flows import octavia.tests.unit.base as base diff --git a/octavia/tests/unit/controller/worker/flows/test_listener_flows.py b/octavia/tests/unit/controller/worker/v1/flows/test_listener_flows.py similarity index 98% rename from octavia/tests/unit/controller/worker/flows/test_listener_flows.py rename to octavia/tests/unit/controller/worker/v1/flows/test_listener_flows.py index 2d4723107a..bc82fbb070 100644 --- a/octavia/tests/unit/controller/worker/flows/test_listener_flows.py +++ b/octavia/tests/unit/controller/worker/v1/flows/test_listener_flows.py @@ -16,7 +16,7 @@ import mock from taskflow.patterns import linear_flow as flow from octavia.common import constants -from octavia.controller.worker.flows import listener_flows +from octavia.controller.worker.v1.flows import listener_flows import octavia.tests.unit.base as base diff --git a/octavia/tests/unit/controller/worker/flows/test_load_balancer_flows.py b/octavia/tests/unit/controller/worker/v1/flows/test_load_balancer_flows.py similarity index 99% rename from octavia/tests/unit/controller/worker/flows/test_load_balancer_flows.py rename to octavia/tests/unit/controller/worker/v1/flows/test_load_balancer_flows.py index 5bc5128b31..c0789a4d27 100644 --- a/octavia/tests/unit/controller/worker/flows/test_load_balancer_flows.py +++ b/octavia/tests/unit/controller/worker/v1/flows/test_load_balancer_flows.py @@ -20,7 +20,7 @@ from taskflow.patterns import linear_flow as flow from octavia.common import constants from octavia.common import exceptions -from octavia.controller.worker.flows import load_balancer_flows +from octavia.controller.worker.v1.flows import load_balancer_flows import octavia.tests.unit.base as base diff --git a/octavia/tests/unit/controller/worker/flows/test_member_flows.py b/octavia/tests/unit/controller/worker/v1/flows/test_member_flows.py similarity index 98% rename from octavia/tests/unit/controller/worker/flows/test_member_flows.py rename to octavia/tests/unit/controller/worker/v1/flows/test_member_flows.py index c1edbfcacf..f03c3d126a 100644 --- a/octavia/tests/unit/controller/worker/flows/test_member_flows.py +++ b/octavia/tests/unit/controller/worker/v1/flows/test_member_flows.py @@ -17,7 +17,7 @@ import mock from taskflow.patterns import linear_flow as flow from octavia.common import constants -from octavia.controller.worker.flows import member_flows +from octavia.controller.worker.v1.flows import member_flows import octavia.tests.unit.base as base diff --git a/octavia/tests/unit/controller/worker/flows/test_pool_flows.py b/octavia/tests/unit/controller/worker/v1/flows/test_pool_flows.py similarity index 97% rename from octavia/tests/unit/controller/worker/flows/test_pool_flows.py rename to octavia/tests/unit/controller/worker/v1/flows/test_pool_flows.py index fcc08a2c03..fd9cd8a929 100644 --- a/octavia/tests/unit/controller/worker/flows/test_pool_flows.py +++ b/octavia/tests/unit/controller/worker/v1/flows/test_pool_flows.py @@ -16,7 +16,7 @@ from taskflow.patterns import linear_flow as flow from octavia.common import constants -from octavia.controller.worker.flows import pool_flows +from octavia.controller.worker.v1.flows import pool_flows import octavia.tests.unit.base as base diff --git a/octavia/tests/unit/controller/worker/v1/tasks/__init__.py b/octavia/tests/unit/controller/worker/v1/tasks/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v1/tasks/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/controller/worker/tasks/test_amphora_driver_tasks.py b/octavia/tests/unit/controller/worker/v1/tasks/test_amphora_driver_tasks.py similarity index 99% rename from octavia/tests/unit/controller/worker/tasks/test_amphora_driver_tasks.py rename to octavia/tests/unit/controller/worker/v1/tasks/test_amphora_driver_tasks.py index 02c30a6ac5..5bb70adfb6 100644 --- a/octavia/tests/unit/controller/worker/tasks/test_amphora_driver_tasks.py +++ b/octavia/tests/unit/controller/worker/v1/tasks/test_amphora_driver_tasks.py @@ -24,7 +24,7 @@ from octavia.amphorae.driver_exceptions import exceptions as driver_except from octavia.common import constants from octavia.common import data_models from octavia.common import utils -from octavia.controller.worker.tasks import amphora_driver_tasks +from octavia.controller.worker.v1.tasks import amphora_driver_tasks from octavia.db import repositories as repo import octavia.tests.unit.base as base @@ -60,7 +60,7 @@ _session_mock = mock.MagicMock() @mock.patch('octavia.db.repositories.ListenerRepository.get', return_value=_listener_mock) @mock.patch('octavia.db.api.get_session', return_value=_session_mock) -@mock.patch('octavia.controller.worker.tasks.amphora_driver_tasks.LOG') +@mock.patch('octavia.controller.worker.v1.tasks.amphora_driver_tasks.LOG') @mock.patch('oslo_utils.uuidutils.generate_uuid', return_value=AMP_ID) @mock.patch('stevedore.driver.DriverManager.driver') class TestAmphoraDriverTasks(base.TestCase): diff --git a/octavia/tests/unit/controller/worker/tasks/test_cert_task.py b/octavia/tests/unit/controller/worker/v1/tasks/test_cert_task.py similarity index 96% rename from octavia/tests/unit/controller/worker/tasks/test_cert_task.py rename to octavia/tests/unit/controller/worker/v1/tasks/test_cert_task.py index 6bb9b184ea..43e2a172f9 100644 --- a/octavia/tests/unit/controller/worker/tasks/test_cert_task.py +++ b/octavia/tests/unit/controller/worker/v1/tasks/test_cert_task.py @@ -20,7 +20,7 @@ from oslo_config import cfg from octavia.certificates.common import local from octavia.common import utils -from octavia.controller.worker.tasks import cert_task +from octavia.controller.worker.v1.tasks import cert_task import octavia.tests.unit.base as base CONF = cfg.CONF diff --git a/octavia/tests/unit/controller/worker/tasks/test_compute_tasks.py b/octavia/tests/unit/controller/worker/v1/tasks/test_compute_tasks.py similarity index 99% rename from octavia/tests/unit/controller/worker/tasks/test_compute_tasks.py rename to octavia/tests/unit/controller/worker/v1/tasks/test_compute_tasks.py index a89ca172b0..b86e0492e2 100644 --- a/octavia/tests/unit/controller/worker/tasks/test_compute_tasks.py +++ b/octavia/tests/unit/controller/worker/v1/tasks/test_compute_tasks.py @@ -22,7 +22,7 @@ from oslo_utils import uuidutils from octavia.common import constants from octavia.common import exceptions from octavia.common import utils -from octavia.controller.worker.tasks import compute_tasks +from octavia.controller.worker.v1.tasks import compute_tasks from octavia.tests.common import utils as test_utils import octavia.tests.unit.base as base diff --git a/octavia/tests/unit/controller/worker/tasks/test_database_tasks.py b/octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks.py similarity index 99% rename from octavia/tests/unit/controller/worker/tasks/test_database_tasks.py rename to octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks.py index 7611edb621..3f2b1931e5 100644 --- a/octavia/tests/unit/controller/worker/tasks/test_database_tasks.py +++ b/octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks.py @@ -25,7 +25,7 @@ from taskflow.types import failure from octavia.common import constants from octavia.common import data_models from octavia.common import utils -from octavia.controller.worker.tasks import database_tasks +from octavia.controller.worker.v1.tasks import database_tasks from octavia.db import repositories as repo import octavia.tests.unit.base as base @@ -97,7 +97,7 @@ _compute_mock.compute_flavor = COMPUTE_FLAVOR @mock.patch('octavia.db.repositories.ListenerRepository.update') @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') @mock.patch('octavia.db.api.get_session', return_value='TEST') -@mock.patch('octavia.controller.worker.tasks.database_tasks.LOG') +@mock.patch('octavia.controller.worker.v1.tasks.database_tasks.LOG') @mock.patch('oslo_utils.uuidutils.generate_uuid', return_value=AMP_ID) class TestDatabaseTasks(base.TestCase): diff --git a/octavia/tests/unit/controller/worker/tasks/test_database_tasks_quota.py b/octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks_quota.py similarity index 99% rename from octavia/tests/unit/controller/worker/tasks/test_database_tasks_quota.py rename to octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks_quota.py index 40594a6fd1..b376e844c2 100644 --- a/octavia/tests/unit/controller/worker/tasks/test_database_tasks_quota.py +++ b/octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks_quota.py @@ -19,7 +19,7 @@ from taskflow.types import failure from octavia.common import data_models from octavia.common import exceptions -from octavia.controller.worker.tasks import database_tasks +from octavia.controller.worker.v1.tasks import database_tasks import octavia.tests.unit.base as base diff --git a/octavia/tests/unit/controller/worker/tasks/test_lifecycle_tasks.py b/octavia/tests/unit/controller/worker/v1/tasks/test_lifecycle_tasks.py similarity index 99% rename from octavia/tests/unit/controller/worker/tasks/test_lifecycle_tasks.py rename to octavia/tests/unit/controller/worker/v1/tasks/test_lifecycle_tasks.py index 1a3e319bd2..22a25e2c0a 100644 --- a/octavia/tests/unit/controller/worker/tasks/test_lifecycle_tasks.py +++ b/octavia/tests/unit/controller/worker/v1/tasks/test_lifecycle_tasks.py @@ -15,7 +15,7 @@ import mock from oslo_utils import uuidutils -from octavia.controller.worker.tasks import lifecycle_tasks +from octavia.controller.worker.v1.tasks import lifecycle_tasks import octavia.tests.unit.base as base diff --git a/octavia/tests/unit/controller/worker/tasks/test_model_tasks.py b/octavia/tests/unit/controller/worker/v1/tasks/test_model_tasks.py similarity index 95% rename from octavia/tests/unit/controller/worker/tasks/test_model_tasks.py rename to octavia/tests/unit/controller/worker/v1/tasks/test_model_tasks.py index 7191e9e0f4..3c50646f3a 100644 --- a/octavia/tests/unit/controller/worker/tasks/test_model_tasks.py +++ b/octavia/tests/unit/controller/worker/v1/tasks/test_model_tasks.py @@ -15,7 +15,7 @@ import mock -from octavia.controller.worker.tasks import model_tasks +from octavia.controller.worker.v1.tasks import model_tasks import octavia.tests.unit.base as base diff --git a/octavia/tests/unit/controller/worker/tasks/test_network_tasks.py b/octavia/tests/unit/controller/worker/v1/tasks/test_network_tasks.py similarity index 99% rename from octavia/tests/unit/controller/worker/tasks/test_network_tasks.py rename to octavia/tests/unit/controller/worker/v1/tasks/test_network_tasks.py index a09daa40be..912c67b4ba 100644 --- a/octavia/tests/unit/controller/worker/tasks/test_network_tasks.py +++ b/octavia/tests/unit/controller/worker/v1/tasks/test_network_tasks.py @@ -21,7 +21,7 @@ from taskflow.types import failure from octavia.common import constants from octavia.common import data_models as o_data_models -from octavia.controller.worker.tasks import network_tasks +from octavia.controller.worker.v1.tasks import network_tasks from octavia.network import base as net_base from octavia.network import data_models from octavia.tests.common import constants as t_constants diff --git a/octavia/tests/unit/controller/worker/test_controller_worker.py b/octavia/tests/unit/controller/worker/v1/test_controller_worker.py similarity index 96% rename from octavia/tests/unit/controller/worker/test_controller_worker.py rename to octavia/tests/unit/controller/worker/v1/test_controller_worker.py index 46d67e660e..a8c3f989a0 100644 --- a/octavia/tests/unit/controller/worker/test_controller_worker.py +++ b/octavia/tests/unit/controller/worker/v1/test_controller_worker.py @@ -21,7 +21,7 @@ from oslo_utils import uuidutils from octavia.common import base_taskflow from octavia.common import constants from octavia.common import data_models -from octavia.controller.worker import controller_worker +from octavia.controller.worker.v1 import controller_worker import octavia.tests.unit.base as base @@ -115,7 +115,7 @@ class TestControllerWorker(base.TestCase): super(TestControllerWorker, self).setUp() - @mock.patch('octavia.controller.worker.flows.' + @mock.patch('octavia.controller.worker.v1.flows.' 'amphora_flows.AmphoraFlows.get_create_amphora_flow', return_value='TEST') def test_create_amphora(self, @@ -150,7 +150,7 @@ class TestControllerWorker(base.TestCase): self.assertEqual(AMP_ID, amp) - @mock.patch('octavia.controller.worker.flows.' + @mock.patch('octavia.controller.worker.v1.flows.' 'amphora_flows.AmphoraFlows.get_delete_amphora_flow', return_value='TEST') def test_delete_amphora(self, @@ -182,7 +182,7 @@ class TestControllerWorker(base.TestCase): _flow_mock.run.assert_called_once_with() - @mock.patch('octavia.controller.worker.flows.' + @mock.patch('octavia.controller.worker.v1.flows.' 'health_monitor_flows.HealthMonitorFlows.' 'get_create_health_monitor_flow', return_value=_flow_mock) @@ -220,7 +220,7 @@ class TestControllerWorker(base.TestCase): _flow_mock.run.assert_called_once_with() self.assertEqual(2, mock_health_mon_repo_get.call_count) - @mock.patch('octavia.controller.worker.flows.' + @mock.patch('octavia.controller.worker.v1.flows.' 'health_monitor_flows.HealthMonitorFlows.' 'get_delete_health_monitor_flow', return_value=_flow_mock) @@ -256,7 +256,7 @@ class TestControllerWorker(base.TestCase): _flow_mock.run.assert_called_once_with() - @mock.patch('octavia.controller.worker.flows.' + @mock.patch('octavia.controller.worker.v1.flows.' 'health_monitor_flows.HealthMonitorFlows.' 'get_update_health_monitor_flow', return_value=_flow_mock) @@ -296,7 +296,7 @@ class TestControllerWorker(base.TestCase): _flow_mock.run.assert_called_once_with() - @mock.patch('octavia.controller.worker.flows.' + @mock.patch('octavia.controller.worker.v1.flows.' 'listener_flows.ListenerFlows.get_create_listener_flow', return_value=_flow_mock) def test_create_listener(self, @@ -329,7 +329,7 @@ class TestControllerWorker(base.TestCase): _flow_mock.run.assert_called_once_with() self.assertEqual(2, mock_listener_repo_get.call_count) - @mock.patch('octavia.controller.worker.flows.' + @mock.patch('octavia.controller.worker.v1.flows.' 'listener_flows.ListenerFlows.get_delete_listener_flow', return_value=_flow_mock) def test_delete_listener(self, @@ -358,7 +358,7 @@ class TestControllerWorker(base.TestCase): _flow_mock.run.assert_called_once_with() - @mock.patch('octavia.controller.worker.flows.' + @mock.patch('octavia.controller.worker.v1.flows.' 'listener_flows.ListenerFlows.get_update_listener_flow', return_value=_flow_mock) def test_update_listener(self, @@ -393,7 +393,7 @@ class TestControllerWorker(base.TestCase): _flow_mock.run.assert_called_once_with() - @mock.patch('octavia.controller.worker.flows.load_balancer_flows.' + @mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.' 'LoadBalancerFlows.get_create_load_balancer_flow', return_value=_flow_mock) def test_create_load_balancer_single( @@ -439,7 +439,7 @@ class TestControllerWorker(base.TestCase): mock_eng.run.assert_any_call() self.assertEqual(4, mock_lb_repo_get.call_count) - @mock.patch('octavia.controller.worker.flows.load_balancer_flows.' + @mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.' 'LoadBalancerFlows.get_create_load_balancer_flow', return_value=_flow_mock) def test_create_load_balancer_active_standby( @@ -484,7 +484,7 @@ class TestControllerWorker(base.TestCase): mock_get_create_load_balancer_flow.return_value, store=store) mock_eng.run.assert_any_call() - @mock.patch('octavia.controller.worker.flows.load_balancer_flows.' + @mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.' 'LoadBalancerFlows.get_create_load_balancer_flow') def test_create_load_balancer_full_graph_single( self, @@ -530,11 +530,11 @@ class TestControllerWorker(base.TestCase): mock_get_create_load_balancer_flow.return_value, store=store) mock_eng.run.assert_any_call() - @mock.patch('octavia.controller.worker.flows.load_balancer_flows.' + @mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.' 'LoadBalancerFlows.get_create_load_balancer_flow') - @mock.patch('octavia.controller.worker.flows.load_balancer_flows.' + @mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.' 'LoadBalancerFlows._create_single_topology') - @mock.patch('octavia.controller.worker.flows.load_balancer_flows.' + @mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.' 'LoadBalancerFlows._create_active_standby_topology') def test_create_load_balancer_full_graph_active_standby( self, @@ -581,7 +581,7 @@ class TestControllerWorker(base.TestCase): mock_get_create_load_balancer_flow.return_value, store=store) mock_eng.run.assert_any_call() - @mock.patch('octavia.controller.worker.flows.load_balancer_flows.' + @mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.' 'LoadBalancerFlows.get_delete_load_balancer_flow', return_value=(_flow_mock, {'test': 'test'})) def test_delete_load_balancer_without_cascade(self, @@ -619,7 +619,7 @@ class TestControllerWorker(base.TestCase): ) _flow_mock.run.assert_called_once_with() - @mock.patch('octavia.controller.worker.flows.load_balancer_flows.' + @mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.' 'LoadBalancerFlows.get_cascade_delete_load_balancer_flow', return_value=(_flow_mock, {'test': 'test'})) def test_delete_load_balancer_with_cascade(self, @@ -657,7 +657,7 @@ class TestControllerWorker(base.TestCase): ) _flow_mock.run.assert_called_once_with() - @mock.patch('octavia.controller.worker.flows.load_balancer_flows.' + @mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.' 'LoadBalancerFlows.get_update_load_balancer_flow', return_value=_flow_mock) @mock.patch('octavia.db.repositories.ListenerRepository.get_all', @@ -698,7 +698,7 @@ class TestControllerWorker(base.TestCase): _flow_mock.run.assert_called_once_with() - @mock.patch('octavia.controller.worker.flows.' + @mock.patch('octavia.controller.worker.v1.flows.' 'member_flows.MemberFlows.get_create_member_flow', return_value=_flow_mock) def test_create_member(self, @@ -734,7 +734,7 @@ class TestControllerWorker(base.TestCase): _flow_mock.run.assert_called_once_with() self.assertEqual(2, mock_member_repo_get.call_count) - @mock.patch('octavia.controller.worker.flows.' + @mock.patch('octavia.controller.worker.v1.flows.' 'member_flows.MemberFlows.get_delete_member_flow', return_value=_flow_mock) def test_delete_member(self, @@ -768,7 +768,7 @@ class TestControllerWorker(base.TestCase): _flow_mock.run.assert_called_once_with() - @mock.patch('octavia.controller.worker.flows.' + @mock.patch('octavia.controller.worker.v1.flows.' 'member_flows.MemberFlows.get_update_member_flow', return_value=_flow_mock) def test_update_member(self, @@ -805,7 +805,7 @@ class TestControllerWorker(base.TestCase): _flow_mock.run.assert_called_once_with() - @mock.patch('octavia.controller.worker.flows.' + @mock.patch('octavia.controller.worker.v1.flows.' 'member_flows.MemberFlows.get_batch_update_members_flow', return_value=_flow_mock) def test_batch_update_members(self, @@ -837,7 +837,7 @@ class TestControllerWorker(base.TestCase): _flow_mock.run.assert_called_once_with() - @mock.patch('octavia.controller.worker.flows.' + @mock.patch('octavia.controller.worker.v1.flows.' 'pool_flows.PoolFlows.get_create_pool_flow', return_value=_flow_mock) def test_create_pool(self, @@ -871,7 +871,7 @@ class TestControllerWorker(base.TestCase): _flow_mock.run.assert_called_once_with() self.assertEqual(2, mock_pool_repo_get.call_count) - @mock.patch('octavia.controller.worker.flows.' + @mock.patch('octavia.controller.worker.v1.flows.' 'pool_flows.PoolFlows.get_delete_pool_flow', return_value=_flow_mock) def test_delete_pool(self, @@ -903,7 +903,7 @@ class TestControllerWorker(base.TestCase): _flow_mock.run.assert_called_once_with() - @mock.patch('octavia.controller.worker.flows.' + @mock.patch('octavia.controller.worker.v1.flows.' 'pool_flows.PoolFlows.get_update_pool_flow', return_value=_flow_mock) def test_update_pool(self, @@ -938,7 +938,7 @@ class TestControllerWorker(base.TestCase): _flow_mock.run.assert_called_once_with() - @mock.patch('octavia.controller.worker.flows.' + @mock.patch('octavia.controller.worker.v1.flows.' 'l7policy_flows.L7PolicyFlows.get_create_l7policy_flow', return_value=_flow_mock) def test_create_l7policy(self, @@ -972,7 +972,7 @@ class TestControllerWorker(base.TestCase): _flow_mock.run.assert_called_once_with() self.assertEqual(2, mock_l7policy_repo_get.call_count) - @mock.patch('octavia.controller.worker.flows.' + @mock.patch('octavia.controller.worker.v1.flows.' 'l7policy_flows.L7PolicyFlows.get_delete_l7policy_flow', return_value=_flow_mock) def test_delete_l7policy(self, @@ -1004,7 +1004,7 @@ class TestControllerWorker(base.TestCase): _flow_mock.run.assert_called_once_with() - @mock.patch('octavia.controller.worker.flows.' + @mock.patch('octavia.controller.worker.v1.flows.' 'l7policy_flows.L7PolicyFlows.get_update_l7policy_flow', return_value=_flow_mock) def test_update_l7policy(self, @@ -1039,7 +1039,7 @@ class TestControllerWorker(base.TestCase): _flow_mock.run.assert_called_once_with() - @mock.patch('octavia.controller.worker.flows.' + @mock.patch('octavia.controller.worker.v1.flows.' 'l7rule_flows.L7RuleFlows.get_create_l7rule_flow', return_value=_flow_mock) def test_create_l7rule(self, @@ -1074,7 +1074,7 @@ class TestControllerWorker(base.TestCase): _flow_mock.run.assert_called_once_with() self.assertEqual(2, mock_l7rule_repo_get.call_count) - @mock.patch('octavia.controller.worker.flows.' + @mock.patch('octavia.controller.worker.v1.flows.' 'l7rule_flows.L7RuleFlows.get_delete_l7rule_flow', return_value=_flow_mock) def test_delete_l7rule(self, @@ -1107,7 +1107,7 @@ class TestControllerWorker(base.TestCase): _flow_mock.run.assert_called_once_with() - @mock.patch('octavia.controller.worker.flows.' + @mock.patch('octavia.controller.worker.v1.flows.' 'l7rule_flows.L7RuleFlows.get_update_l7rule_flow', return_value=_flow_mock) def test_update_l7rule(self, @@ -1145,7 +1145,7 @@ class TestControllerWorker(base.TestCase): @mock.patch('octavia.db.repositories.FlavorRepository.' 'get_flavor_metadata_dict', return_value={}) - @mock.patch('octavia.controller.worker.flows.' + @mock.patch('octavia.controller.worker.v1.flows.' 'amphora_flows.AmphoraFlows.get_failover_flow', return_value=_flow_mock) @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') @@ -1185,8 +1185,8 @@ class TestControllerWorker(base.TestCase): mock_update.assert_called_with(_db_session, LB_ID, provisioning_status=constants.ACTIVE) - @mock.patch('octavia.controller.worker.controller_worker.ControllerWorker.' - '_perform_amphora_failover') + @mock.patch('octavia.controller.worker.v1.controller_worker.' + 'ControllerWorker._perform_amphora_failover') def test_failover_amp_missing_amp(self, mock_perform_amp_failover, mock_api_get_session, @@ -1208,8 +1208,8 @@ class TestControllerWorker(base.TestCase): mock_perform_amp_failover.assert_not_called() - @mock.patch('octavia.controller.worker.controller_worker.ControllerWorker.' - '_perform_amphora_failover') + @mock.patch('octavia.controller.worker.v1.controller_worker.' + 'ControllerWorker._perform_amphora_failover') @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') def test_failover_amp_flow_exception(self, mock_update, @@ -1232,8 +1232,8 @@ class TestControllerWorker(base.TestCase): mock_update.assert_called_with(_db_session, LB_ID, provisioning_status=constants.ERROR) - @mock.patch('octavia.controller.worker.controller_worker.ControllerWorker.' - '_perform_amphora_failover') + @mock.patch('octavia.controller.worker.v1.controller_worker.' + 'ControllerWorker._perform_amphora_failover') @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') def test_failover_amp_no_lb(self, mock_lb_update, @@ -1287,7 +1287,7 @@ class TestControllerWorker(base.TestCase): mock_delete.assert_called_with(_db_session, amphora_id=AMP_ID) mock_taskflow_load.assert_not_called() - @mock.patch('octavia.controller.worker.' + @mock.patch('octavia.controller.worker.v1.' 'controller_worker.ControllerWorker._perform_amphora_failover') @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') def test_failover_loadbalancer(self, @@ -1336,7 +1336,7 @@ class TestControllerWorker(base.TestCase): @mock.patch('octavia.db.repositories.FlavorRepository.' 'get_flavor_metadata_dict', return_value={}) - @mock.patch('octavia.controller.worker.flows.' + @mock.patch('octavia.controller.worker.v1.flows.' 'amphora_flows.AmphoraFlows.get_failover_flow', return_value=_flow_mock) @mock.patch( @@ -1383,7 +1383,7 @@ class TestControllerWorker(base.TestCase): mock_update.assert_called_with(_db_session, LB_ID, provisioning_status=constants.ACTIVE) - @mock.patch('octavia.controller.worker.flows.' + @mock.patch('octavia.controller.worker.v1.flows.' 'amphora_flows.AmphoraFlows.cert_rotate_amphora_flow', return_value=_flow_mock) def test_amphora_cert_rotation(self, @@ -1412,7 +1412,7 @@ class TestControllerWorker(base.TestCase): @mock.patch('octavia.db.repositories.FlavorRepository.' 'get_flavor_metadata_dict') @mock.patch('octavia.db.repositories.AmphoraRepository.get_lb_for_amphora') - @mock.patch('octavia.controller.worker.flows.' + @mock.patch('octavia.controller.worker.v1.flows.' 'amphora_flows.AmphoraFlows.update_amphora_config_flow', return_value=_flow_mock) def test_update_amphora_agent_config(self, diff --git a/octavia/tests/unit/controller/worker/v2/__init__.py b/octavia/tests/unit/controller/worker/v2/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/controller/worker/v2/flows/__init__.py b/octavia/tests/unit/controller/worker/v2/flows/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/flows/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/controller/worker/v2/flows/test_amphora_flows.py b/octavia/tests/unit/controller/worker/v2/flows/test_amphora_flows.py new file mode 100644 index 0000000000..0996afc9f4 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/flows/test_amphora_flows.py @@ -0,0 +1,422 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import mock +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from taskflow.patterns import linear_flow as flow + +from octavia.common import constants +from octavia.common import data_models +from octavia.controller.worker.v2.flows import amphora_flows +import octavia.tests.unit.base as base + +AUTH_VERSION = '2' + + +# NOTE: We patch the get_network_driver for all the calls so we don't +# inadvertently make real calls. +@mock.patch('octavia.common.utils.get_network_driver') +class TestAmphoraFlows(base.TestCase): + + def setUp(self): + super(TestAmphoraFlows, self).setUp() + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + self.conf.config( + group="controller_worker", + amphora_driver='amphora_haproxy_rest_driver') + self.conf.config(group="nova", enable_anti_affinity=False) + self.AmpFlow = amphora_flows.AmphoraFlows() + self.amp1 = data_models.Amphora(id=1) + self.amp2 = data_models.Amphora(id=2) + self.amp3 = data_models.Amphora(id=3, status=constants.DELETED) + self.lb = data_models.LoadBalancer( + id=4, amphorae=[self.amp1, self.amp2, self.amp3]) + + def test_get_create_amphora_flow(self, mock_get_net_driver): + + amp_flow = self.AmpFlow.get_create_amphora_flow() + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.AMPHORA, amp_flow.provides) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.SERVER_PEM, amp_flow.provides) + + self.assertEqual(5, len(amp_flow.provides)) + self.assertEqual(2, len(amp_flow.requires)) + + def test_get_create_amphora_flow_cert(self, mock_get_net_driver): + self.AmpFlow = amphora_flows.AmphoraFlows() + + amp_flow = self.AmpFlow.get_create_amphora_flow() + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.AMPHORA, amp_flow.provides) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + + self.assertEqual(5, len(amp_flow.provides)) + self.assertEqual(2, len(amp_flow.requires)) + + def test_get_create_amphora_for_lb_flow(self, mock_get_net_driver): + + amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow( + 'SOMEPREFIX', constants.ROLE_STANDALONE) + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + + self.assertIn(constants.AMPHORA, amp_flow.provides) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) + self.assertIn(constants.SERVER_PEM, amp_flow.provides) + + self.assertEqual(5, len(amp_flow.provides)) + self.assertEqual(3, len(amp_flow.requires)) + + def test_get_cert_create_amphora_for_lb_flow(self, mock_get_net_driver): + + self.AmpFlow = amphora_flows.AmphoraFlows() + + amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow( + 'SOMEPREFIX', constants.ROLE_STANDALONE) + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + + self.assertIn(constants.AMPHORA, amp_flow.provides) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) + self.assertIn(constants.SERVER_PEM, amp_flow.provides) + + self.assertEqual(5, len(amp_flow.provides)) + self.assertEqual(3, len(amp_flow.requires)) + + def test_get_cert_master_create_amphora_for_lb_flow( + self, mock_get_net_driver): + + self.AmpFlow = amphora_flows.AmphoraFlows() + + amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow( + 'SOMEPREFIX', constants.ROLE_MASTER) + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + + self.assertIn(constants.AMPHORA, amp_flow.provides) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) + self.assertIn(constants.SERVER_PEM, amp_flow.provides) + + self.assertEqual(5, len(amp_flow.provides)) + self.assertEqual(3, len(amp_flow.requires)) + + def test_get_cert_master_rest_anti_affinity_create_amphora_for_lb_flow( + self, mock_get_net_driver): + + self.conf.config(group="nova", enable_anti_affinity=True) + + self.AmpFlow = amphora_flows.AmphoraFlows() + amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow( + 'SOMEPREFIX', constants.ROLE_MASTER) + + self.assertIsInstance(amp_flow, flow.Flow) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) + self.assertIn(constants.SERVER_GROUP_ID, amp_flow.requires) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) + self.assertIn(constants.SERVER_PEM, amp_flow.provides) + + self.assertEqual(5, len(amp_flow.provides)) + self.assertEqual(4, len(amp_flow.requires)) + self.conf.config(group="nova", enable_anti_affinity=False) + + def test_get_cert_backup_create_amphora_for_lb_flow( + self, mock_get_net_driver): + self.AmpFlow = amphora_flows.AmphoraFlows() + + amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow( + 'SOMEPREFIX', constants.ROLE_BACKUP) + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + + self.assertIn(constants.AMPHORA, amp_flow.provides) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) + self.assertIn(constants.SERVER_PEM, amp_flow.provides) + + self.assertEqual(5, len(amp_flow.provides)) + self.assertEqual(3, len(amp_flow.requires)) + + def test_get_cert_bogus_create_amphora_for_lb_flow( + self, mock_get_net_driver): + self.AmpFlow = amphora_flows.AmphoraFlows() + + amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow( + 'SOMEPREFIX', 'BOGUS_ROLE') + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + + self.assertIn(constants.AMPHORA, amp_flow.provides) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) + self.assertIn(constants.SERVER_PEM, amp_flow.provides) + + self.assertEqual(5, len(amp_flow.provides)) + self.assertEqual(3, len(amp_flow.requires)) + + def test_get_cert_backup_rest_anti_affinity_create_amphora_for_lb_flow( + self, mock_get_net_driver): + self.conf.config(group="nova", enable_anti_affinity=True) + + self.AmpFlow = amphora_flows.AmphoraFlows() + amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow( + 'SOMEPREFIX', constants.ROLE_BACKUP) + + self.assertIsInstance(amp_flow, flow.Flow) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) + self.assertIn(constants.SERVER_GROUP_ID, amp_flow.requires) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) + self.assertIn(constants.SERVER_PEM, amp_flow.provides) + + self.assertEqual(5, len(amp_flow.provides)) + self.assertEqual(4, len(amp_flow.requires)) + self.conf.config(group="nova", enable_anti_affinity=False) + + def test_get_delete_amphora_flow(self, mock_get_net_driver): + + amp_flow = self.AmpFlow.get_delete_amphora_flow() + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.AMPHORA, amp_flow.requires) + + self.assertEqual(0, len(amp_flow.provides)) + self.assertEqual(1, len(amp_flow.requires)) + + def test_allocate_amp_to_lb_decider(self, mock_get_net_driver): + history = mock.MagicMock() + values = mock.MagicMock(side_effect=[['TEST'], [None]]) + history.values = values + result = self.AmpFlow._allocate_amp_to_lb_decider(history) + self.assertTrue(result) + result = self.AmpFlow._allocate_amp_to_lb_decider(history) + self.assertFalse(result) + + def test_create_new_amp_for_lb_decider(self, mock_get_net_driver): + history = mock.MagicMock() + values = mock.MagicMock(side_effect=[[None], ['TEST']]) + history.values = values + result = self.AmpFlow._create_new_amp_for_lb_decider(history) + self.assertTrue(result) + result = self.AmpFlow._create_new_amp_for_lb_decider(history) + self.assertFalse(result) + + def test_get_failover_flow_allocated(self, mock_get_net_driver): + + amp_flow = self.AmpFlow.get_failover_flow( + load_balancer=self.lb) + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.FAILED_AMPHORA, amp_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + + self.assertIn(constants.AMP_DATA, amp_flow.provides) + self.assertIn(constants.AMPHORA, amp_flow.provides) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) + self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) + self.assertIn(constants.LISTENERS, amp_flow.provides) + self.assertIn(constants.LOADBALANCER, amp_flow.provides) + + self.assertEqual(4, len(amp_flow.requires)) + self.assertEqual(12, len(amp_flow.provides)) + + amp_flow = self.AmpFlow.get_failover_flow( + role=constants.ROLE_MASTER, load_balancer=self.lb) + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.FAILED_AMPHORA, amp_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + + self.assertIn(constants.AMP_DATA, amp_flow.provides) + self.assertIn(constants.AMPHORA, amp_flow.provides) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) + self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) + self.assertIn(constants.LISTENERS, amp_flow.provides) + self.assertIn(constants.LOADBALANCER, amp_flow.provides) + + self.assertEqual(4, len(amp_flow.requires)) + self.assertEqual(12, len(amp_flow.provides)) + + amp_flow = self.AmpFlow.get_failover_flow( + role=constants.ROLE_BACKUP, load_balancer=self.lb) + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.FAILED_AMPHORA, amp_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + + self.assertIn(constants.AMP_DATA, amp_flow.provides) + self.assertIn(constants.AMPHORA, amp_flow.provides) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) + self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) + self.assertIn(constants.LISTENERS, amp_flow.provides) + self.assertIn(constants.LOADBALANCER, amp_flow.provides) + + self.assertEqual(4, len(amp_flow.requires)) + self.assertEqual(12, len(amp_flow.provides)) + + amp_flow = self.AmpFlow.get_failover_flow( + role='BOGUSROLE', load_balancer=self.lb) + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.FAILED_AMPHORA, amp_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + + self.assertIn(constants.AMP_DATA, amp_flow.provides) + self.assertIn(constants.AMPHORA, amp_flow.provides) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) + self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) + self.assertIn(constants.LISTENERS, amp_flow.provides) + self.assertIn(constants.LOADBALANCER, amp_flow.provides) + + self.assertEqual(4, len(amp_flow.requires)) + self.assertEqual(12, len(amp_flow.provides)) + + def test_get_failover_flow_spare(self, mock_get_net_driver): + + amp_flow = self.AmpFlow.get_failover_flow() + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.FAILED_AMPHORA, amp_flow.requires) + + self.assertEqual(1, len(amp_flow.requires)) + self.assertEqual(0, len(amp_flow.provides)) + + def test_cert_rotate_amphora_flow(self, mock_get_net_driver): + self.AmpFlow = amphora_flows.AmphoraFlows() + + amp_rotate_flow = self.AmpFlow.cert_rotate_amphora_flow() + self.assertIsInstance(amp_rotate_flow, flow.Flow) + + self.assertIn(constants.SERVER_PEM, amp_rotate_flow.provides) + self.assertIn(constants.AMPHORA, amp_rotate_flow.requires) + + self.assertEqual(1, len(amp_rotate_flow.provides)) + self.assertEqual(2, len(amp_rotate_flow.requires)) + + def test_get_vrrp_subflow(self, mock_get_net_driver): + vrrp_subflow = self.AmpFlow.get_vrrp_subflow('123') + + self.assertIsInstance(vrrp_subflow, flow.Flow) + + self.assertIn(constants.LOADBALANCER, vrrp_subflow.provides) + + self.assertIn(constants.LOADBALANCER, vrrp_subflow.requires) + + self.assertEqual(2, len(vrrp_subflow.provides)) + self.assertEqual(1, len(vrrp_subflow.requires)) + + def test_get_post_map_lb_subflow(self, mock_get_net_driver): + + self.AmpFlow = amphora_flows.AmphoraFlows() + + amp_flow = self.AmpFlow._get_post_map_lb_subflow( + 'SOMEPREFIX', constants.ROLE_MASTER) + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.FLAVOR, amp_flow.requires) + self.assertIn(constants.AMPHORA_ID, amp_flow.requires) + self.assertIn(constants.AMPHORA, amp_flow.provides) + + self.assertEqual(1, len(amp_flow.provides)) + self.assertEqual(2, len(amp_flow.requires)) + + amp_flow = self.AmpFlow._get_post_map_lb_subflow( + 'SOMEPREFIX', constants.ROLE_BACKUP) + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.FLAVOR, amp_flow.requires) + self.assertIn(constants.AMPHORA_ID, amp_flow.requires) + self.assertIn(constants.AMPHORA, amp_flow.provides) + + self.assertEqual(1, len(amp_flow.provides)) + self.assertEqual(2, len(amp_flow.requires)) + + amp_flow = self.AmpFlow._get_post_map_lb_subflow( + 'SOMEPREFIX', constants.ROLE_STANDALONE) + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.FLAVOR, amp_flow.requires) + self.assertIn(constants.AMPHORA_ID, amp_flow.requires) + self.assertIn(constants.AMPHORA, amp_flow.provides) + + self.assertEqual(1, len(amp_flow.provides)) + self.assertEqual(2, len(amp_flow.requires)) + + amp_flow = self.AmpFlow._get_post_map_lb_subflow( + 'SOMEPREFIX', 'BOGUS_ROLE') + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.FLAVOR, amp_flow.requires) + self.assertIn(constants.AMPHORA_ID, amp_flow.requires) + self.assertIn(constants.AMPHORA, amp_flow.provides) + + self.assertEqual(1, len(amp_flow.provides)) + self.assertEqual(2, len(amp_flow.requires)) + + def test_update_amphora_config_flow(self, mock_get_net_driver): + + amp_flow = self.AmpFlow.update_amphora_config_flow() + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.AMPHORA, amp_flow.requires) + self.assertIn(constants.FLAVOR, amp_flow.requires) + + self.assertEqual(2, len(amp_flow.requires)) + self.assertEqual(0, len(amp_flow.provides)) diff --git a/octavia/tests/unit/controller/worker/v2/flows/test_health_monitor_flows.py b/octavia/tests/unit/controller/worker/v2/flows/test_health_monitor_flows.py new file mode 100644 index 0000000000..d1fb280fe2 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/flows/test_health_monitor_flows.py @@ -0,0 +1,72 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from taskflow.patterns import linear_flow as flow + +from octavia.common import constants +from octavia.controller.worker.v2.flows import health_monitor_flows +import octavia.tests.unit.base as base + + +class TestHealthMonitorFlows(base.TestCase): + + def setUp(self): + self.HealthMonitorFlow = health_monitor_flows.HealthMonitorFlows() + + super(TestHealthMonitorFlows, self).setUp() + + def test_get_create_health_monitor_flow(self): + + health_mon_flow = (self.HealthMonitorFlow. + get_create_health_monitor_flow()) + + self.assertIsInstance(health_mon_flow, flow.Flow) + + self.assertIn(constants.LISTENERS, health_mon_flow.requires) + self.assertIn(constants.LOADBALANCER, health_mon_flow.requires) + self.assertIn(constants.POOL, health_mon_flow.requires) + + self.assertEqual(4, len(health_mon_flow.requires)) + self.assertEqual(0, len(health_mon_flow.provides)) + + def test_get_delete_health_monitor_flow(self): + + health_mon_flow = (self.HealthMonitorFlow. + get_delete_health_monitor_flow()) + + self.assertIsInstance(health_mon_flow, flow.Flow) + + self.assertIn(constants.HEALTH_MON, health_mon_flow.requires) + self.assertIn(constants.LISTENERS, health_mon_flow.requires) + self.assertIn(constants.LOADBALANCER, health_mon_flow.requires) + self.assertIn(constants.POOL, health_mon_flow.requires) + + self.assertEqual(4, len(health_mon_flow.requires)) + self.assertEqual(0, len(health_mon_flow.provides)) + + def test_get_update_health_monitor_flow(self): + + health_mon_flow = (self.HealthMonitorFlow. + get_update_health_monitor_flow()) + + self.assertIsInstance(health_mon_flow, flow.Flow) + + self.assertIn(constants.LISTENERS, health_mon_flow.requires) + self.assertIn(constants.LOADBALANCER, health_mon_flow.requires) + self.assertIn(constants.HEALTH_MON, health_mon_flow.requires) + self.assertIn(constants.UPDATE_DICT, health_mon_flow.requires) + + self.assertEqual(5, len(health_mon_flow.requires)) + self.assertEqual(0, len(health_mon_flow.provides)) diff --git a/octavia/tests/unit/controller/worker/v2/flows/test_l7policy_flows.py b/octavia/tests/unit/controller/worker/v2/flows/test_l7policy_flows.py new file mode 100644 index 0000000000..2e462690c0 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/flows/test_l7policy_flows.py @@ -0,0 +1,67 @@ +# Copyright 2016 Blue Box, an IBM Company +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from taskflow.patterns import linear_flow as flow + +from octavia.common import constants +from octavia.controller.worker.v2.flows import l7policy_flows +import octavia.tests.unit.base as base + + +class TestL7PolicyFlows(base.TestCase): + + def setUp(self): + self.L7PolicyFlow = l7policy_flows.L7PolicyFlows() + + super(TestL7PolicyFlows, self).setUp() + + def test_get_create_l7policy_flow(self): + + l7policy_flow = self.L7PolicyFlow.get_create_l7policy_flow() + + self.assertIsInstance(l7policy_flow, flow.Flow) + + self.assertIn(constants.LISTENERS, l7policy_flow.requires) + self.assertIn(constants.LOADBALANCER, l7policy_flow.requires) + + self.assertEqual(3, len(l7policy_flow.requires)) + self.assertEqual(0, len(l7policy_flow.provides)) + + def test_get_delete_l7policy_flow(self): + + l7policy_flow = self.L7PolicyFlow.get_delete_l7policy_flow() + + self.assertIsInstance(l7policy_flow, flow.Flow) + + self.assertIn(constants.LISTENERS, l7policy_flow.requires) + self.assertIn(constants.LOADBALANCER, l7policy_flow.requires) + self.assertIn(constants.L7POLICY, l7policy_flow.requires) + + self.assertEqual(3, len(l7policy_flow.requires)) + self.assertEqual(0, len(l7policy_flow.provides)) + + def test_get_update_l7policy_flow(self): + + l7policy_flow = self.L7PolicyFlow.get_update_l7policy_flow() + + self.assertIsInstance(l7policy_flow, flow.Flow) + + self.assertIn(constants.L7POLICY, l7policy_flow.requires) + self.assertIn(constants.LISTENERS, l7policy_flow.requires) + self.assertIn(constants.LOADBALANCER, l7policy_flow.requires) + self.assertIn(constants.UPDATE_DICT, l7policy_flow.requires) + + self.assertEqual(4, len(l7policy_flow.requires)) + self.assertEqual(0, len(l7policy_flow.provides)) diff --git a/octavia/tests/unit/controller/worker/v2/flows/test_l7rule_flows.py b/octavia/tests/unit/controller/worker/v2/flows/test_l7rule_flows.py new file mode 100644 index 0000000000..fa77a42a42 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/flows/test_l7rule_flows.py @@ -0,0 +1,67 @@ +# Copyright 2016 Blue Box, an IBM Company +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from taskflow.patterns import linear_flow as flow + +from octavia.common import constants +from octavia.controller.worker.v2.flows import l7rule_flows +import octavia.tests.unit.base as base + + +class TestL7RuleFlows(base.TestCase): + + def setUp(self): + self.L7RuleFlow = l7rule_flows.L7RuleFlows() + + super(TestL7RuleFlows, self).setUp() + + def test_get_create_l7rule_flow(self): + + l7rule_flow = self.L7RuleFlow.get_create_l7rule_flow() + + self.assertIsInstance(l7rule_flow, flow.Flow) + + self.assertIn(constants.LISTENERS, l7rule_flow.requires) + self.assertIn(constants.LOADBALANCER, l7rule_flow.requires) + + self.assertEqual(4, len(l7rule_flow.requires)) + self.assertEqual(0, len(l7rule_flow.provides)) + + def test_get_delete_l7rule_flow(self): + + l7rule_flow = self.L7RuleFlow.get_delete_l7rule_flow() + + self.assertIsInstance(l7rule_flow, flow.Flow) + + self.assertIn(constants.LISTENERS, l7rule_flow.requires) + self.assertIn(constants.LOADBALANCER, l7rule_flow.requires) + self.assertIn(constants.L7RULE, l7rule_flow.requires) + + self.assertEqual(4, len(l7rule_flow.requires)) + self.assertEqual(0, len(l7rule_flow.provides)) + + def test_get_update_l7rule_flow(self): + + l7rule_flow = self.L7RuleFlow.get_update_l7rule_flow() + + self.assertIsInstance(l7rule_flow, flow.Flow) + + self.assertIn(constants.L7RULE, l7rule_flow.requires) + self.assertIn(constants.LISTENERS, l7rule_flow.requires) + self.assertIn(constants.LOADBALANCER, l7rule_flow.requires) + self.assertIn(constants.UPDATE_DICT, l7rule_flow.requires) + + self.assertEqual(5, len(l7rule_flow.requires)) + self.assertEqual(0, len(l7rule_flow.provides)) diff --git a/octavia/tests/unit/controller/worker/v2/flows/test_listener_flows.py b/octavia/tests/unit/controller/worker/v2/flows/test_listener_flows.py new file mode 100644 index 0000000000..70f7d352cd --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/flows/test_listener_flows.py @@ -0,0 +1,90 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +import mock +from taskflow.patterns import linear_flow as flow + +from octavia.common import constants +from octavia.controller.worker.v2.flows import listener_flows +import octavia.tests.unit.base as base + + +# NOTE: We patch the get_network_driver for all the calls so we don't +# inadvertently make real calls. +@mock.patch('octavia.common.utils.get_network_driver') +class TestListenerFlows(base.TestCase): + + def setUp(self): + self.ListenerFlow = listener_flows.ListenerFlows() + + super(TestListenerFlows, self).setUp() + + def test_get_create_listener_flow(self, mock_get_net_driver): + + listener_flow = self.ListenerFlow.get_create_listener_flow() + + self.assertIsInstance(listener_flow, flow.Flow) + + self.assertIn(constants.LOADBALANCER, listener_flow.requires) + self.assertIn(constants.LISTENERS, listener_flow.requires) + + self.assertEqual(2, len(listener_flow.requires)) + self.assertEqual(0, len(listener_flow.provides)) + + def test_get_delete_listener_flow(self, mock_get_net_driver): + + listener_flow = self.ListenerFlow.get_delete_listener_flow() + + self.assertIsInstance(listener_flow, flow.Flow) + + self.assertIn(constants.LISTENER, listener_flow.requires) + self.assertIn(constants.LOADBALANCER, listener_flow.requires) + + self.assertEqual(2, len(listener_flow.requires)) + self.assertEqual(0, len(listener_flow.provides)) + + def test_get_delete_listener_internal_flow(self, mock_get_net_driver): + listener_flow = self.ListenerFlow.get_delete_listener_internal_flow( + 'test-listener') + + self.assertIsInstance(listener_flow, flow.Flow) + + self.assertIn('test-listener', listener_flow.requires) + self.assertIn(constants.LOADBALANCER, listener_flow.requires) + + self.assertEqual(2, len(listener_flow.requires)) + self.assertEqual(0, len(listener_flow.provides)) + + def test_get_update_listener_flow(self, mock_get_net_driver): + + listener_flow = self.ListenerFlow.get_update_listener_flow() + + self.assertIsInstance(listener_flow, flow.Flow) + + self.assertIn(constants.LISTENER, listener_flow.requires) + self.assertIn(constants.LOADBALANCER, listener_flow.requires) + self.assertIn(constants.UPDATE_DICT, listener_flow.requires) + self.assertIn(constants.LISTENERS, listener_flow.requires) + + self.assertEqual(4, len(listener_flow.requires)) + self.assertEqual(0, len(listener_flow.provides)) + + def test_get_create_all_listeners_flow(self, mock_get_net_driver): + listeners_flow = self.ListenerFlow.get_create_all_listeners_flow() + self.assertIsInstance(listeners_flow, flow.Flow) + self.assertIn(constants.LOADBALANCER, listeners_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, listeners_flow.requires) + self.assertIn(constants.LOADBALANCER, listeners_flow.provides) + self.assertEqual(2, len(listeners_flow.requires)) + self.assertEqual(2, len(listeners_flow.provides)) diff --git a/octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py b/octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py new file mode 100644 index 0000000000..8f0825ab15 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py @@ -0,0 +1,227 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import mock +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from taskflow.patterns import linear_flow as flow + +from octavia.common import constants +from octavia.common import exceptions +from octavia.controller.worker.v2.flows import load_balancer_flows +import octavia.tests.unit.base as base + + +# NOTE: We patch the get_network_driver for all the calls so we don't +# inadvertently make real calls. +@mock.patch('octavia.common.utils.get_network_driver') +class TestLoadBalancerFlows(base.TestCase): + + def setUp(self): + super(TestLoadBalancerFlows, self).setUp() + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + self.conf.config( + group="controller_worker", + amphora_driver='amphora_haproxy_rest_driver') + self.conf.config(group="nova", enable_anti_affinity=False) + self.LBFlow = load_balancer_flows.LoadBalancerFlows() + + def test_get_create_load_balancer_flow(self, mock_get_net_driver): + amp_flow = self.LBFlow.get_create_load_balancer_flow( + constants.TOPOLOGY_SINGLE) + self.assertIsInstance(amp_flow, flow.Flow) + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + self.assertIn(constants.AMPHORA, amp_flow.provides) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) + + def test_get_create_active_standby_load_balancer_flow( + self, mock_get_net_driver): + amp_flow = self.LBFlow.get_create_load_balancer_flow( + constants.TOPOLOGY_ACTIVE_STANDBY) + self.assertIsInstance(amp_flow, flow.Flow) + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + self.assertIn(constants.AMPHORA, amp_flow.provides) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) + + def test_get_create_anti_affinity_active_standby_load_balancer_flow( + self, mock_get_net_driver): + self.conf.config(group="nova", enable_anti_affinity=True) + + self._LBFlow = load_balancer_flows.LoadBalancerFlows() + amp_flow = self._LBFlow.get_create_load_balancer_flow( + constants.TOPOLOGY_ACTIVE_STANDBY) + self.assertIsInstance(amp_flow, flow.Flow) + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + self.assertIn(constants.SERVER_GROUP_ID, amp_flow.provides) + self.assertIn(constants.AMPHORA, amp_flow.provides) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) + self.conf.config(group="nova", enable_anti_affinity=False) + + def test_get_create_bogus_topology_load_balancer_flow( + self, mock_get_net_driver): + self.assertRaises(exceptions.InvalidTopology, + self.LBFlow.get_create_load_balancer_flow, + 'BOGUS') + + def test_get_delete_load_balancer_flow(self, mock_get_net_driver): + lb_mock = mock.Mock() + listener_mock = mock.Mock() + listener_mock.id = '123' + lb_mock.listeners = [listener_mock] + + lb_flow, store = self.LBFlow.get_delete_load_balancer_flow(lb_mock) + + self.assertIsInstance(lb_flow, flow.Flow) + + self.assertIn(constants.LOADBALANCER, lb_flow.requires) + self.assertIn(constants.SERVER_GROUP_ID, lb_flow.requires) + + self.assertEqual(0, len(lb_flow.provides)) + self.assertEqual(2, len(lb_flow.requires)) + + def test_get_delete_load_balancer_flow_cascade(self, mock_get_net_driver): + lb_mock = mock.Mock() + listener_mock = mock.Mock() + listener_mock.id = '123' + lb_mock.listeners = [listener_mock] + pool_mock = mock.Mock() + pool_mock.id = '345' + lb_mock.pools = [pool_mock] + l7_mock = mock.Mock() + l7_mock.id = '678' + listener_mock.l7policies = [l7_mock] + + lb_flow, store = self.LBFlow.get_cascade_delete_load_balancer_flow( + lb_mock) + + self.assertIsInstance(lb_flow, flow.Flow) + self.assertEqual({'listener_123': listener_mock, + 'pool345': pool_mock}, store) + + self.assertIn(constants.LOADBALANCER, lb_flow.requires) + + self.assertEqual(1, len(lb_flow.provides)) + self.assertEqual(4, len(lb_flow.requires)) + + def test_get_update_load_balancer_flow(self, mock_get_net_driver): + + lb_flow = self.LBFlow.get_update_load_balancer_flow() + + self.assertIsInstance(lb_flow, flow.Flow) + + self.assertIn(constants.LOADBALANCER, lb_flow.requires) + + self.assertEqual(0, len(lb_flow.provides)) + self.assertEqual(3, len(lb_flow.requires)) + + def test_get_post_lb_amp_association_flow(self, mock_get_net_driver): + amp_flow = self.LBFlow.get_post_lb_amp_association_flow( + '123', constants.TOPOLOGY_SINGLE) + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + self.assertIn(constants.UPDATE_DICT, amp_flow.requires) + self.assertIn(constants.LOADBALANCER, amp_flow.provides) + + self.assertEqual(1, len(amp_flow.provides)) + self.assertEqual(2, len(amp_flow.requires)) + + # Test Active/Standby path + amp_flow = self.LBFlow.get_post_lb_amp_association_flow( + '123', constants.TOPOLOGY_ACTIVE_STANDBY) + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + self.assertIn(constants.UPDATE_DICT, amp_flow.requires) + self.assertIn(constants.LOADBALANCER, amp_flow.provides) + + self.assertEqual(2, len(amp_flow.provides)) + self.assertEqual(2, len(amp_flow.requires)) + + # Test mark_active=False + amp_flow = self.LBFlow.get_post_lb_amp_association_flow( + '123', constants.TOPOLOGY_ACTIVE_STANDBY) + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + self.assertIn(constants.UPDATE_DICT, amp_flow.requires) + self.assertIn(constants.LOADBALANCER, amp_flow.provides) + + self.assertEqual(2, len(amp_flow.provides)) + self.assertEqual(2, len(amp_flow.requires)) + + def test_get_create_load_balancer_flows_single_listeners( + self, mock_get_net_driver): + create_flow = ( + self.LBFlow.get_create_load_balancer_flow( + constants.TOPOLOGY_SINGLE, True + ) + ) + self.assertIsInstance(create_flow, flow.Flow) + self.assertIn(constants.LOADBALANCER_ID, create_flow.requires) + self.assertIn(constants.UPDATE_DICT, create_flow.requires) + + self.assertIn(constants.LISTENERS, create_flow.provides) + self.assertIn(constants.AMPHORA, create_flow.provides) + self.assertIn(constants.AMPHORA_ID, create_flow.provides) + self.assertIn(constants.COMPUTE_ID, create_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, create_flow.provides) + self.assertIn(constants.LOADBALANCER, create_flow.provides) + self.assertIn(constants.DELTAS, create_flow.provides) + self.assertIn(constants.ADDED_PORTS, create_flow.provides) + self.assertIn(constants.VIP, create_flow.provides) + self.assertIn(constants.AMP_DATA, create_flow.provides) + self.assertIn(constants.AMPHORA_NETWORK_CONFIG, create_flow.provides) + + self.assertEqual(4, len(create_flow.requires)) + self.assertEqual(13, len(create_flow.provides), + create_flow.provides) + + def test_get_create_load_balancer_flows_active_standby_listeners( + self, mock_get_net_driver): + create_flow = ( + self.LBFlow.get_create_load_balancer_flow( + constants.TOPOLOGY_ACTIVE_STANDBY, True + ) + ) + self.assertIsInstance(create_flow, flow.Flow) + self.assertIn(constants.LOADBALANCER_ID, create_flow.requires) + self.assertIn(constants.UPDATE_DICT, create_flow.requires) + + self.assertIn(constants.LISTENERS, create_flow.provides) + self.assertIn(constants.AMPHORA, create_flow.provides) + self.assertIn(constants.AMPHORA_ID, create_flow.provides) + self.assertIn(constants.COMPUTE_ID, create_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, create_flow.provides) + self.assertIn(constants.LOADBALANCER, create_flow.provides) + self.assertIn(constants.DELTAS, create_flow.provides) + self.assertIn(constants.ADDED_PORTS, create_flow.provides) + self.assertIn(constants.VIP, create_flow.provides) + self.assertIn(constants.AMP_DATA, create_flow.provides) + self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, + create_flow.provides) + + self.assertEqual(4, len(create_flow.requires)) + self.assertEqual(14, len(create_flow.provides), + create_flow.provides) diff --git a/octavia/tests/unit/controller/worker/v2/flows/test_member_flows.py b/octavia/tests/unit/controller/worker/v2/flows/test_member_flows.py new file mode 100644 index 0000000000..ac2c7d71ef --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/flows/test_member_flows.py @@ -0,0 +1,88 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import mock +from taskflow.patterns import linear_flow as flow + +from octavia.common import constants +from octavia.controller.worker.v2.flows import member_flows +import octavia.tests.unit.base as base + + +# NOTE: We patch the get_network_driver for all the calls so we don't +# inadvertently make real calls. +@mock.patch('octavia.common.utils.get_network_driver') +class TestMemberFlows(base.TestCase): + + def setUp(self): + self.MemberFlow = member_flows.MemberFlows() + + super(TestMemberFlows, self).setUp() + + def test_get_create_member_flow(self, mock_get_net_driver): + + member_flow = self.MemberFlow.get_create_member_flow() + + self.assertIsInstance(member_flow, flow.Flow) + + self.assertIn(constants.LISTENERS, member_flow.requires) + self.assertIn(constants.LOADBALANCER, member_flow.requires) + self.assertIn(constants.POOL, member_flow.requires) + + self.assertEqual(4, len(member_flow.requires)) + self.assertEqual(2, len(member_flow.provides)) + + def test_get_delete_member_flow(self, mock_get_net_driver): + + member_flow = self.MemberFlow.get_delete_member_flow() + + self.assertIsInstance(member_flow, flow.Flow) + + self.assertIn(constants.MEMBER, member_flow.requires) + self.assertIn(constants.LISTENERS, member_flow.requires) + self.assertIn(constants.LOADBALANCER, member_flow.requires) + self.assertIn(constants.POOL, member_flow.requires) + + self.assertEqual(4, len(member_flow.requires)) + self.assertEqual(0, len(member_flow.provides)) + + def test_get_update_member_flow(self, mock_get_net_driver): + + member_flow = self.MemberFlow.get_update_member_flow() + + self.assertIsInstance(member_flow, flow.Flow) + + self.assertIn(constants.MEMBER, member_flow.requires) + self.assertIn(constants.LISTENERS, member_flow.requires) + self.assertIn(constants.LOADBALANCER, member_flow.requires) + self.assertIn(constants.POOL, member_flow.requires) + self.assertIn(constants.UPDATE_DICT, member_flow.requires) + + self.assertEqual(5, len(member_flow.requires)) + self.assertEqual(0, len(member_flow.provides)) + + def test_get_batch_update_members_flow(self, mock_get_net_driver): + + member_flow = self.MemberFlow.get_batch_update_members_flow( + [], [], []) + + self.assertIsInstance(member_flow, flow.Flow) + + self.assertIn(constants.LISTENERS, member_flow.requires) + self.assertIn(constants.LOADBALANCER, member_flow.requires) + self.assertIn(constants.POOL, member_flow.requires) + + self.assertEqual(3, len(member_flow.requires)) + self.assertEqual(2, len(member_flow.provides)) diff --git a/octavia/tests/unit/controller/worker/v2/flows/test_pool_flows.py b/octavia/tests/unit/controller/worker/v2/flows/test_pool_flows.py new file mode 100644 index 0000000000..2b9d7377e3 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/flows/test_pool_flows.py @@ -0,0 +1,77 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from taskflow.patterns import linear_flow as flow + +from octavia.common import constants +from octavia.controller.worker.v2.flows import pool_flows +import octavia.tests.unit.base as base + + +class TestPoolFlows(base.TestCase): + + def setUp(self): + self.PoolFlow = pool_flows.PoolFlows() + + super(TestPoolFlows, self).setUp() + + def test_get_create_pool_flow(self): + + pool_flow = self.PoolFlow.get_create_pool_flow() + + self.assertIsInstance(pool_flow, flow.Flow) + + self.assertIn(constants.LISTENERS, pool_flow.requires) + self.assertIn(constants.LOADBALANCER, pool_flow.requires) + + self.assertEqual(3, len(pool_flow.requires)) + self.assertEqual(0, len(pool_flow.provides)) + + def test_get_delete_pool_flow(self): + + pool_flow = self.PoolFlow.get_delete_pool_flow() + + self.assertIsInstance(pool_flow, flow.Flow) + + self.assertIn(constants.LISTENERS, pool_flow.requires) + self.assertIn(constants.LOADBALANCER, pool_flow.requires) + self.assertIn(constants.POOL, pool_flow.requires) + + self.assertEqual(3, len(pool_flow.requires)) + self.assertEqual(1, len(pool_flow.provides)) + + def test_get_delete_pool_flow_internal(self): + + pool_flow = self.PoolFlow.get_delete_pool_flow_internal('test') + + self.assertIsInstance(pool_flow, flow.Flow) + self.assertIn('test', pool_flow.requires) + + self.assertEqual(1, len(pool_flow.requires)) + self.assertEqual(1, len(pool_flow.provides)) + + def test_get_update_pool_flow(self): + + pool_flow = self.PoolFlow.get_update_pool_flow() + + self.assertIsInstance(pool_flow, flow.Flow) + + self.assertIn(constants.POOL, pool_flow.requires) + self.assertIn(constants.LISTENERS, pool_flow.requires) + self.assertIn(constants.LOADBALANCER, pool_flow.requires) + self.assertIn(constants.UPDATE_DICT, pool_flow.requires) + + self.assertEqual(4, len(pool_flow.requires)) + self.assertEqual(0, len(pool_flow.provides)) diff --git a/octavia/tests/unit/controller/worker/v2/tasks/__init__.py b/octavia/tests/unit/controller/worker/v2/tasks/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/tasks/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py b/octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py new file mode 100644 index 0000000000..eb68c17e90 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py @@ -0,0 +1,672 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from cryptography import fernet +import mock +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils +from taskflow.types import failure + +from octavia.amphorae.driver_exceptions import exceptions as driver_except +from octavia.common import constants +from octavia.common import data_models +from octavia.common import utils +from octavia.controller.worker.v2.tasks import amphora_driver_tasks +from octavia.db import repositories as repo +import octavia.tests.unit.base as base + + +AMP_ID = uuidutils.generate_uuid() +COMPUTE_ID = uuidutils.generate_uuid() +LISTENER_ID = uuidutils.generate_uuid() +LB_ID = uuidutils.generate_uuid() +CONN_MAX_RETRIES = 10 +CONN_RETRY_INTERVAL = 6 +FAKE_CONFIG_FILE = 'fake config file' + +_amphora_mock = mock.MagicMock() +_amphora_mock.id = AMP_ID +_amphora_mock.status = constants.AMPHORA_ALLOCATED +_load_balancer_mock = mock.MagicMock() +_load_balancer_mock.id = LB_ID +_listener_mock = mock.MagicMock() +_listener_mock.id = LISTENER_ID +_load_balancer_mock.listeners = [_listener_mock] +_vip_mock = mock.MagicMock() +_load_balancer_mock.vip = _vip_mock +_LB_mock = mock.MagicMock() +_amphorae_mock = [_amphora_mock] +_network_mock = mock.MagicMock() +_port_mock = mock.MagicMock() +_ports_mock = [_port_mock] +_session_mock = mock.MagicMock() + + +@mock.patch('octavia.db.repositories.AmphoraRepository.update') +@mock.patch('octavia.db.repositories.ListenerRepository.update') +@mock.patch('octavia.db.repositories.ListenerRepository.get', + return_value=_listener_mock) +@mock.patch('octavia.db.api.get_session', return_value=_session_mock) +@mock.patch('octavia.controller.worker.v2.tasks.amphora_driver_tasks.LOG') +@mock.patch('oslo_utils.uuidutils.generate_uuid', return_value=AMP_ID) +@mock.patch('stevedore.driver.DriverManager.driver') +class TestAmphoraDriverTasks(base.TestCase): + + def setUp(self): + + _LB_mock.amphorae = [_amphora_mock] + _LB_mock.id = LB_ID + conf = oslo_fixture.Config(cfg.CONF) + conf.config(group="haproxy_amphora", + active_connection_max_retries=CONN_MAX_RETRIES) + conf.config(group="haproxy_amphora", + active_connection_rety_interval=CONN_RETRY_INTERVAL) + conf.config(group="controller_worker", + loadbalancer_topology=constants.TOPOLOGY_SINGLE) + super(TestAmphoraDriverTasks, self).setUp() + + def test_amp_listener_update(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_update): + + timeout_dict = {constants.REQ_CONN_TIMEOUT: 1, + constants.REQ_READ_TIMEOUT: 2, + constants.CONN_MAX_RETRIES: 3, + constants.CONN_RETRY_INTERVAL: 4} + + amp_list_update_obj = amphora_driver_tasks.AmpListenersUpdate() + amp_list_update_obj.execute([_listener_mock], 0, + [_amphora_mock], timeout_dict) + + mock_driver.update_amphora_listeners.assert_called_once_with( + [_listener_mock], 0, [_amphora_mock], timeout_dict) + + mock_driver.update_amphora_listeners.side_effect = Exception('boom') + + amp_list_update_obj.execute([_listener_mock], 0, + [_amphora_mock], timeout_dict) + + mock_amphora_repo_update.assert_called_once_with( + _session_mock, AMP_ID, status=constants.ERROR) + + def test_listener_update(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_update): + + listener_update_obj = amphora_driver_tasks.ListenersUpdate() + listener_update_obj.execute(_load_balancer_mock, [_listener_mock]) + + mock_driver.update.assert_called_once_with(_listener_mock, _vip_mock) + + # Test the revert + amp = listener_update_obj.revert(_load_balancer_mock) + repo.ListenerRepository.update.assert_called_once_with( + _session_mock, + id=LISTENER_ID, + provisioning_status=constants.ERROR) + self.assertIsNone(amp) + + # Test the revert with exception + repo.ListenerRepository.update.reset_mock() + mock_listener_repo_update.side_effect = Exception('fail') + amp = listener_update_obj.revert(_load_balancer_mock) + repo.ListenerRepository.update.assert_called_once_with( + _session_mock, + id=LISTENER_ID, + provisioning_status=constants.ERROR) + self.assertIsNone(amp) + + def test_listeners_update(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_update): + listeners_update_obj = amphora_driver_tasks.ListenersUpdate() + listeners = [data_models.Listener(id='listener1'), + data_models.Listener(id='listener2')] + vip = data_models.Vip(ip_address='10.0.0.1') + lb = data_models.LoadBalancer(id='lb1', listeners=listeners, vip=vip) + listeners_update_obj.execute(lb, listeners) + mock_driver.update.assert_has_calls([mock.call(listeners[0], vip), + mock.call(listeners[1], vip)]) + self.assertEqual(2, mock_driver.update.call_count) + self.assertIsNotNone(listeners[0].load_balancer) + self.assertIsNotNone(listeners[1].load_balancer) + + # Test the revert + amp = listeners_update_obj.revert(lb) + expected_db_calls = [mock.call(_session_mock, + id=listeners[0].id, + provisioning_status=constants.ERROR), + mock.call(_session_mock, + id=listeners[1].id, + provisioning_status=constants.ERROR)] + repo.ListenerRepository.update.has_calls(expected_db_calls) + self.assertEqual(2, repo.ListenerRepository.update.call_count) + self.assertIsNone(amp) + + def test_listener_stop(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_update): + + listener_stop_obj = amphora_driver_tasks.ListenerStop() + listener_stop_obj.execute(_load_balancer_mock, _listener_mock) + + mock_driver.stop.assert_called_once_with(_listener_mock, _vip_mock) + + # Test the revert + amp = listener_stop_obj.revert(_listener_mock) + repo.ListenerRepository.update.assert_called_once_with( + _session_mock, + id=LISTENER_ID, + provisioning_status=constants.ERROR) + self.assertIsNone(amp) + + # Test the revert with exception + repo.ListenerRepository.update.reset_mock() + mock_listener_repo_update.side_effect = Exception('fail') + amp = listener_stop_obj.revert(_listener_mock) + repo.ListenerRepository.update.assert_called_once_with( + _session_mock, + id=LISTENER_ID, + provisioning_status=constants.ERROR) + self.assertIsNone(amp) + + def test_listener_start(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_update): + + listener_start_obj = amphora_driver_tasks.ListenerStart() + listener_start_obj.execute(_load_balancer_mock, _listener_mock) + + mock_driver.start.assert_called_once_with(_listener_mock, _vip_mock) + + # Test the revert + amp = listener_start_obj.revert(_listener_mock) + repo.ListenerRepository.update.assert_called_once_with( + _session_mock, + id=LISTENER_ID, + provisioning_status=constants.ERROR) + self.assertIsNone(amp) + + # Test the revert with exception + repo.ListenerRepository.update.reset_mock() + mock_listener_repo_update.side_effect = Exception('fail') + amp = listener_start_obj.revert(_listener_mock) + repo.ListenerRepository.update.assert_called_once_with( + _session_mock, + id=LISTENER_ID, + provisioning_status=constants.ERROR) + self.assertIsNone(amp) + + def test_listener_delete(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_update): + + listener_delete_obj = amphora_driver_tasks.ListenerDelete() + listener_delete_obj.execute(_load_balancer_mock, _listener_mock) + + mock_driver.delete.assert_called_once_with(_listener_mock, _vip_mock) + + # Test the revert + amp = listener_delete_obj.revert(_listener_mock) + repo.ListenerRepository.update.assert_called_once_with( + _session_mock, + id=LISTENER_ID, + provisioning_status=constants.ERROR) + self.assertIsNone(amp) + + # Test the revert with exception + repo.ListenerRepository.update.reset_mock() + mock_listener_repo_update.side_effect = Exception('fail') + amp = listener_delete_obj.revert(_listener_mock) + repo.ListenerRepository.update.assert_called_once_with( + _session_mock, + id=LISTENER_ID, + provisioning_status=constants.ERROR) + self.assertIsNone(amp) + + def test_amphora_get_info(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_update): + + amphora_get_info_obj = amphora_driver_tasks.AmphoraGetInfo() + amphora_get_info_obj.execute(_amphora_mock) + + mock_driver.get_info.assert_called_once_with( + _amphora_mock) + + def test_amphora_get_diagnostics(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_update): + + amphora_get_diagnostics_obj = (amphora_driver_tasks. + AmphoraGetDiagnostics()) + amphora_get_diagnostics_obj.execute(_amphora_mock) + + mock_driver.get_diagnostics.assert_called_once_with( + _amphora_mock) + + def test_amphora_finalize(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_update): + + amphora_finalize_obj = amphora_driver_tasks.AmphoraFinalize() + amphora_finalize_obj.execute(_amphora_mock) + + mock_driver.finalize_amphora.assert_called_once_with( + _amphora_mock) + + # Test revert + amp = amphora_finalize_obj.revert(None, _amphora_mock) + repo.AmphoraRepository.update.assert_called_once_with( + _session_mock, + id=AMP_ID, + status=constants.ERROR) + self.assertIsNone(amp) + + # Test revert with exception + repo.AmphoraRepository.update.reset_mock() + mock_amphora_repo_update.side_effect = Exception('fail') + amp = amphora_finalize_obj.revert(None, _amphora_mock) + repo.AmphoraRepository.update.assert_called_once_with( + _session_mock, + id=AMP_ID, + status=constants.ERROR) + self.assertIsNone(amp) + + def test_amphora_post_network_plug(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_update): + + amphora_post_network_plug_obj = (amphora_driver_tasks. + AmphoraPostNetworkPlug()) + amphora_post_network_plug_obj.execute(_amphora_mock, _ports_mock) + + (mock_driver.post_network_plug. + assert_called_once_with)(_amphora_mock, _port_mock) + + # Test revert + amp = amphora_post_network_plug_obj.revert(None, _amphora_mock) + repo.AmphoraRepository.update.assert_called_once_with( + _session_mock, + id=AMP_ID, + status=constants.ERROR) + + self.assertIsNone(amp) + + # Test revert with exception + repo.AmphoraRepository.update.reset_mock() + mock_amphora_repo_update.side_effect = Exception('fail') + amp = amphora_post_network_plug_obj.revert(None, _amphora_mock) + repo.AmphoraRepository.update.assert_called_once_with( + _session_mock, + id=AMP_ID, + status=constants.ERROR) + + self.assertIsNone(amp) + + def test_amphorae_post_network_plug(self, mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_update): + mock_driver.get_network.return_value = _network_mock + _amphora_mock.id = AMP_ID + _amphora_mock.compute_id = COMPUTE_ID + _LB_mock.amphorae = [_amphora_mock] + amphora_post_network_plug_obj = (amphora_driver_tasks. + AmphoraePostNetworkPlug()) + + port_mock = mock.Mock() + _deltas_mock = {_amphora_mock.id: [port_mock]} + + amphora_post_network_plug_obj.execute(_LB_mock, _deltas_mock) + + (mock_driver.post_network_plug. + assert_called_once_with(_amphora_mock, port_mock)) + + # Test revert + amp = amphora_post_network_plug_obj.revert(None, _LB_mock, + _deltas_mock) + repo.AmphoraRepository.update.assert_called_once_with( + _session_mock, + id=AMP_ID, + status=constants.ERROR) + + self.assertIsNone(amp) + + # Test revert with exception + repo.AmphoraRepository.update.reset_mock() + mock_amphora_repo_update.side_effect = Exception('fail') + amp = amphora_post_network_plug_obj.revert(None, _LB_mock, + _deltas_mock) + repo.AmphoraRepository.update.assert_called_once_with( + _session_mock, + id=AMP_ID, + status=constants.ERROR) + + self.assertIsNone(amp) + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') + def test_amphora_post_vip_plug(self, + mock_loadbalancer_repo_update, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_update): + + amphorae_net_config_mock = mock.Mock() + amphora_post_vip_plug_obj = amphora_driver_tasks.AmphoraPostVIPPlug() + amphora_post_vip_plug_obj.execute(_amphora_mock, + _LB_mock, + amphorae_net_config_mock) + + mock_driver.post_vip_plug.assert_called_once_with( + _amphora_mock, _LB_mock, amphorae_net_config_mock) + + # Test revert + amp = amphora_post_vip_plug_obj.revert(None, _amphora_mock, _LB_mock) + repo.AmphoraRepository.update.assert_called_once_with( + _session_mock, + id=AMP_ID, + status=constants.ERROR) + repo.LoadBalancerRepository.update.assert_called_once_with( + _session_mock, + id=LB_ID, + provisioning_status=constants.ERROR) + + self.assertIsNone(amp) + + # Test revert with repo exceptions + repo.AmphoraRepository.update.reset_mock() + repo.LoadBalancerRepository.update.reset_mock() + mock_amphora_repo_update.side_effect = Exception('fail') + mock_loadbalancer_repo_update.side_effect = Exception('fail') + amp = amphora_post_vip_plug_obj.revert(None, _amphora_mock, _LB_mock) + repo.AmphoraRepository.update.assert_called_once_with( + _session_mock, + id=AMP_ID, + status=constants.ERROR) + repo.LoadBalancerRepository.update.assert_called_once_with( + _session_mock, + id=LB_ID, + provisioning_status=constants.ERROR) + + self.assertIsNone(amp) + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') + def test_amphorae_post_vip_plug(self, + mock_loadbalancer_repo_update, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_update): + + amphorae_net_config_mock = mock.Mock() + amphora_post_vip_plug_obj = amphora_driver_tasks.AmphoraePostVIPPlug() + amphora_post_vip_plug_obj.execute(_LB_mock, + amphorae_net_config_mock) + + mock_driver.post_vip_plug.assert_called_once_with( + _amphora_mock, _LB_mock, amphorae_net_config_mock) + + # Test revert + amp = amphora_post_vip_plug_obj.revert(None, _LB_mock) + repo.LoadBalancerRepository.update.assert_called_once_with( + _session_mock, + id=LB_ID, + provisioning_status=constants.ERROR) + + self.assertIsNone(amp) + + # Test revert with exception + repo.LoadBalancerRepository.update.reset_mock() + mock_loadbalancer_repo_update.side_effect = Exception('fail') + amp = amphora_post_vip_plug_obj.revert(None, _LB_mock) + repo.LoadBalancerRepository.update.assert_called_once_with( + _session_mock, + id=LB_ID, + provisioning_status=constants.ERROR) + + self.assertIsNone(amp) + + def test_amphora_cert_upload(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_update): + key = utils.get_six_compatible_server_certs_key_passphrase() + fer = fernet.Fernet(key) + pem_file_mock = fer.encrypt( + utils.get_six_compatible_value('test-pem-file')) + amphora_cert_upload_mock = amphora_driver_tasks.AmphoraCertUpload() + amphora_cert_upload_mock.execute(_amphora_mock, pem_file_mock) + + mock_driver.upload_cert_amp.assert_called_once_with( + _amphora_mock, fer.decrypt(pem_file_mock)) + + def test_amphora_update_vrrp_interface(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_update): + _LB_mock.amphorae = _amphorae_mock + + timeout_dict = {constants.CONN_MAX_RETRIES: CONN_MAX_RETRIES, + constants.CONN_RETRY_INTERVAL: CONN_RETRY_INTERVAL} + + amphora_update_vrrp_interface_obj = ( + amphora_driver_tasks.AmphoraUpdateVRRPInterface()) + amphora_update_vrrp_interface_obj.execute(_LB_mock) + mock_driver.get_vrrp_interface.assert_called_once_with( + _amphora_mock, timeout_dict=timeout_dict) + + # Test revert + mock_driver.reset_mock() + + _LB_mock.amphorae = _amphorae_mock + amphora_update_vrrp_interface_obj.revert("BADRESULT", _LB_mock) + mock_amphora_repo_update.assert_called_with(_session_mock, + _amphora_mock.id, + vrrp_interface=None) + + mock_driver.reset_mock() + mock_amphora_repo_update.reset_mock() + + failure_obj = failure.Failure.from_exception(Exception("TESTEXCEPT")) + amphora_update_vrrp_interface_obj.revert(failure_obj, _LB_mock) + self.assertFalse(mock_amphora_repo_update.called) + + # Test revert with exception + mock_driver.reset_mock() + mock_amphora_repo_update.reset_mock() + mock_amphora_repo_update.side_effect = Exception('fail') + + _LB_mock.amphorae = _amphorae_mock + amphora_update_vrrp_interface_obj.revert("BADRESULT", _LB_mock) + mock_amphora_repo_update.assert_called_with(_session_mock, + _amphora_mock.id, + vrrp_interface=None) + + def test_amphora_vrrp_update(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_update): + amphorae_network_config = mock.MagicMock() + amphora_vrrp_update_obj = ( + amphora_driver_tasks.AmphoraVRRPUpdate()) + amphora_vrrp_update_obj.execute(_LB_mock, amphorae_network_config) + mock_driver.update_vrrp_conf.assert_called_once_with( + _LB_mock, amphorae_network_config) + + def test_amphora_vrrp_stop(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_update): + amphora_vrrp_stop_obj = ( + amphora_driver_tasks.AmphoraVRRPStop()) + amphora_vrrp_stop_obj.execute(_LB_mock) + mock_driver.stop_vrrp_service.assert_called_once_with(_LB_mock) + + def test_amphora_vrrp_start(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_update): + amphora_vrrp_start_obj = ( + amphora_driver_tasks.AmphoraVRRPStart()) + amphora_vrrp_start_obj.execute(_LB_mock) + mock_driver.start_vrrp_service.assert_called_once_with(_LB_mock) + + def test_amphora_compute_connectivity_wait(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_update): + amp_compute_conn_wait_obj = ( + amphora_driver_tasks.AmphoraComputeConnectivityWait()) + amp_compute_conn_wait_obj.execute(_amphora_mock) + mock_driver.get_info.assert_called_once_with(_amphora_mock) + + mock_driver.get_info.side_effect = driver_except.TimeOutException() + self.assertRaises(driver_except.TimeOutException, + amp_compute_conn_wait_obj.execute, _amphora_mock) + mock_amphora_repo_update.assert_called_once_with( + _session_mock, AMP_ID, status=constants.ERROR) + + @mock.patch('octavia.amphorae.backends.agent.agent_jinja_cfg.' + 'AgentJinjaTemplater.build_agent_config') + def test_amphora_config_update(self, + mock_build_config, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_update): + mock_build_config.return_value = FAKE_CONFIG_FILE + amp_config_update_obj = amphora_driver_tasks.AmphoraConfigUpdate() + mock_driver.update_amphora_agent_config.side_effect = [ + None, None, driver_except.AmpDriverNotImplementedError, + driver_except.TimeOutException] + # With Flavor + flavor = {constants.LOADBALANCER_TOPOLOGY: + constants.TOPOLOGY_ACTIVE_STANDBY} + amp_config_update_obj.execute(_amphora_mock, flavor) + mock_build_config.assert_called_once_with( + _amphora_mock.id, constants.TOPOLOGY_ACTIVE_STANDBY) + mock_driver.update_amphora_agent_config.assert_called_once_with( + _amphora_mock, FAKE_CONFIG_FILE) + # With no Flavor + mock_driver.reset_mock() + mock_build_config.reset_mock() + amp_config_update_obj.execute(_amphora_mock, None) + mock_build_config.assert_called_once_with( + _amphora_mock.id, constants.TOPOLOGY_SINGLE) + mock_driver.update_amphora_agent_config.assert_called_once_with( + _amphora_mock, FAKE_CONFIG_FILE) + # With amphora that does not support config update + mock_driver.reset_mock() + mock_build_config.reset_mock() + amp_config_update_obj.execute(_amphora_mock, flavor) + mock_build_config.assert_called_once_with( + _amphora_mock.id, constants.TOPOLOGY_ACTIVE_STANDBY) + mock_driver.update_amphora_agent_config.assert_called_once_with( + _amphora_mock, FAKE_CONFIG_FILE) + # With an unknown exception + mock_driver.reset_mock() + mock_build_config.reset_mock() + self.assertRaises(driver_except.TimeOutException, + amp_config_update_obj.execute, + _amphora_mock, flavor) diff --git a/octavia/tests/unit/controller/worker/v2/tasks/test_cert_task.py b/octavia/tests/unit/controller/worker/v2/tasks/test_cert_task.py new file mode 100644 index 0000000000..42ef37da1f --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/tasks/test_cert_task.py @@ -0,0 +1,47 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from cryptography import fernet +import mock + +from oslo_config import cfg + +from octavia.certificates.common import local +from octavia.common import utils +from octavia.controller.worker.v2.tasks import cert_task +import octavia.tests.unit.base as base + +CONF = cfg.CONF + + +class TestCertTasks(base.TestCase): + + @mock.patch('stevedore.driver.DriverManager.driver') + def test_execute(self, mock_driver): + key = utils.get_six_compatible_server_certs_key_passphrase() + fer = fernet.Fernet(key) + dummy_cert = local.LocalCert( + utils.get_six_compatible_value('test_cert'), + utils.get_six_compatible_value('test_key')) + mock_driver.generate_cert_key_pair.side_effect = [dummy_cert] + c = cert_task.GenerateServerPEMTask() + pem = c.execute('123') + self.assertEqual( + fer.decrypt(pem), + dummy_cert.get_certificate() + + dummy_cert.get_private_key() + ) + mock_driver.generate_cert_key_pair.assert_called_once_with( + cn='123', validity=CONF.certificates.cert_validity_time) diff --git a/octavia/tests/unit/controller/worker/v2/tasks/test_compute_tasks.py b/octavia/tests/unit/controller/worker/v2/tasks/test_compute_tasks.py new file mode 100644 index 0000000000..c31ea07322 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/tasks/test_compute_tasks.py @@ -0,0 +1,466 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from cryptography import fernet +import mock +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils + +from octavia.common import constants +from octavia.common import exceptions +from octavia.common import utils +from octavia.controller.worker.v2.tasks import compute_tasks +from octavia.tests.common import utils as test_utils +import octavia.tests.unit.base as base + + +AMP_FLAVOR_ID = '10' +AMP_IMAGE_ID = '11' +AMP_IMAGE_TAG = 'glance_tag' +AMP_SSH_KEY_NAME = None +AMP_NET = [uuidutils.generate_uuid()] +AMP_SEC_GROUPS = [] +AMP_WAIT = 12 +AMPHORA_ID = uuidutils.generate_uuid() +COMPUTE_ID = uuidutils.generate_uuid() +LB_NET_IP = '192.0.2.1' +PORT_ID = uuidutils.generate_uuid() +SERVER_GRPOUP_ID = uuidutils.generate_uuid() + + +class TestException(Exception): + + def __init__(self, value): + self.value = value + + def __str__(self): + return repr(self.value) + +_amphora_mock = mock.MagicMock() +_amphora_mock.id = AMPHORA_ID +_amphora_mock.compute_id = COMPUTE_ID +_load_balancer_mock = mock.MagicMock() +_load_balancer_mock.amphorae = [_amphora_mock] +_port = mock.MagicMock() +_port.id = PORT_ID + + +class TestComputeTasks(base.TestCase): + + def setUp(self): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + self.conf.config( + group="controller_worker", amp_flavor_id=AMP_FLAVOR_ID) + self.conf.config( + group="controller_worker", amp_image_id=AMP_IMAGE_ID) + self.conf.config( + group="controller_worker", amp_image_tag=AMP_IMAGE_TAG) + self.conf.config( + group="controller_worker", amp_ssh_key_name=AMP_SSH_KEY_NAME) + self.conf.config( + group="controller_worker", amp_boot_network_list=AMP_NET) + self.conf.config( + group="controller_worker", amp_active_wait_sec=AMP_WAIT) + self.conf.config( + group="controller_worker", amp_secgroup_list=AMP_SEC_GROUPS) + self.conf.config(group="controller_worker", amp_image_owner_id='') + + _amphora_mock.id = AMPHORA_ID + _amphora_mock.status = constants.AMPHORA_ALLOCATED + + logging_mock = mock.MagicMock() + compute_tasks.LOG = logging_mock + + super(TestComputeTasks, self).setUp() + + @mock.patch('jinja2.Environment.get_template') + @mock.patch('octavia.amphorae.backends.agent.' + 'agent_jinja_cfg.AgentJinjaTemplater.' + 'build_agent_config', return_value='test_conf') + @mock.patch('stevedore.driver.DriverManager.driver') + def test_compute_create(self, mock_driver, mock_conf, mock_jinja): + + image_owner_id = uuidutils.generate_uuid() + self.conf.config( + group="controller_worker", amp_image_owner_id=image_owner_id) + + createcompute = compute_tasks.ComputeCreate() + + mock_driver.build.return_value = COMPUTE_ID + # Test execute() + compute_id = createcompute.execute(_amphora_mock.id, ports=[_port], + server_group_id=SERVER_GRPOUP_ID) + + # Validate that the build method was called properly + mock_driver.build.assert_called_once_with( + name="amphora-" + _amphora_mock.id, + amphora_flavor=AMP_FLAVOR_ID, + image_id=AMP_IMAGE_ID, + image_tag=AMP_IMAGE_TAG, + image_owner=image_owner_id, + key_name=AMP_SSH_KEY_NAME, + sec_groups=AMP_SEC_GROUPS, + network_ids=AMP_NET, + port_ids=[PORT_ID], + config_drive_files={'/etc/octavia/' + 'amphora-agent.conf': 'test_conf'}, + user_data=None, + server_group_id=SERVER_GRPOUP_ID) + + # Make sure it returns the expected compute_id + self.assertEqual(COMPUTE_ID, compute_id) + + # Test that a build exception is raised + createcompute = compute_tasks.ComputeCreate() + + self.assertRaises(TypeError, + createcompute.execute, + _amphora_mock, config_drive_files='test_cert') + + # Test revert() + + _amphora_mock.compute_id = COMPUTE_ID + + createcompute = compute_tasks.ComputeCreate() + createcompute.revert(compute_id, _amphora_mock.id) + + # Validate that the delete method was called properly + mock_driver.delete.assert_called_once_with( + COMPUTE_ID) + + # Test that a delete exception is not raised + + createcompute.revert(COMPUTE_ID, _amphora_mock.id) + + @mock.patch('jinja2.Environment.get_template') + @mock.patch('octavia.amphorae.backends.agent.' + 'agent_jinja_cfg.AgentJinjaTemplater.' + 'build_agent_config', return_value='test_conf') + @mock.patch('octavia.common.jinja.' + 'user_data_jinja_cfg.UserDataJinjaCfg.' + 'build_user_data_config', return_value='test_conf') + @mock.patch('stevedore.driver.DriverManager.driver') + def test_compute_create_user_data(self, mock_driver, + mock_ud_conf, mock_conf, mock_jinja): + + self.conf.config( + group="controller_worker", user_data_config_drive=True) + mock_ud_conf.return_value = 'test_ud_conf' + createcompute = compute_tasks.ComputeCreate() + + mock_driver.build.return_value = COMPUTE_ID + # Test execute() + compute_id = createcompute.execute(_amphora_mock.id, ports=[_port]) + + # Validate that the build method was called properly + mock_driver.build.assert_called_once_with( + name="amphora-" + _amphora_mock.id, + amphora_flavor=AMP_FLAVOR_ID, + image_id=AMP_IMAGE_ID, + image_tag=AMP_IMAGE_TAG, + image_owner='', + key_name=AMP_SSH_KEY_NAME, + sec_groups=AMP_SEC_GROUPS, + network_ids=AMP_NET, + port_ids=[PORT_ID], + config_drive_files=None, + user_data='test_ud_conf', + server_group_id=None) + + # Make sure it returns the expected compute_id + self.assertEqual(COMPUTE_ID, compute_id) + + # Test that a build exception is raised + createcompute = compute_tasks.ComputeCreate() + + self.assertRaises(TypeError, + createcompute.execute, + _amphora_mock, config_drive_files='test_cert') + + # Test revert() + + _amphora_mock.compute_id = COMPUTE_ID + + createcompute = compute_tasks.ComputeCreate() + createcompute.revert(compute_id, _amphora_mock.id) + + # Validate that the delete method was called properly + mock_driver.delete.assert_called_once_with( + COMPUTE_ID) + + # Test that a delete exception is not raised + + createcompute.revert(COMPUTE_ID, _amphora_mock.id) + + @mock.patch('jinja2.Environment.get_template') + @mock.patch('octavia.amphorae.backends.agent.' + 'agent_jinja_cfg.AgentJinjaTemplater.' + 'build_agent_config', return_value='test_conf') + @mock.patch('stevedore.driver.DriverManager.driver') + def test_compute_create_without_ssh_access(self, mock_driver, + mock_conf, mock_jinja): + + createcompute = compute_tasks.ComputeCreate() + + mock_driver.build.return_value = COMPUTE_ID + self.conf.config( + group="controller_worker", amp_ssh_access_allowed=False) + self.conf.config( + group="controller_worker", user_data_config_drive=False) + + # Test execute() + compute_id = createcompute.execute(_amphora_mock.id, ports=[_port], + server_group_id=SERVER_GRPOUP_ID) + + # Validate that the build method was called properly + mock_driver.build.assert_called_once_with( + name="amphora-" + _amphora_mock.id, + amphora_flavor=AMP_FLAVOR_ID, + image_id=AMP_IMAGE_ID, + image_tag=AMP_IMAGE_TAG, + image_owner='', + key_name=None, + sec_groups=AMP_SEC_GROUPS, + network_ids=AMP_NET, + port_ids=[PORT_ID], + config_drive_files={'/etc/octavia/' + 'amphora-agent.conf': 'test_conf'}, + user_data=None, + server_group_id=SERVER_GRPOUP_ID) + + self.assertEqual(COMPUTE_ID, compute_id) + + # Test that a build exception is raised + createcompute = compute_tasks.ComputeCreate() + + self.assertRaises(TypeError, + createcompute.execute, + _amphora_mock, config_drive_files='test_cert') + + # Test revert() + + _amphora_mock.compute_id = COMPUTE_ID + + createcompute = compute_tasks.ComputeCreate() + createcompute.revert(compute_id, _amphora_mock.id) + + # Validate that the delete method was called properly + mock_driver.delete.assert_called_once_with( + COMPUTE_ID) + + # Test that a delete exception is not raised + + createcompute.revert(COMPUTE_ID, _amphora_mock.id) + + @mock.patch('jinja2.Environment.get_template') + @mock.patch('octavia.amphorae.backends.agent.' + 'agent_jinja_cfg.AgentJinjaTemplater.' + 'build_agent_config', return_value='test_conf') + @mock.patch('stevedore.driver.DriverManager.driver') + def test_compute_create_cert(self, mock_driver, mock_conf, mock_jinja): + createcompute = compute_tasks.CertComputeCreate() + key = utils.get_six_compatible_server_certs_key_passphrase() + fer = fernet.Fernet(key) + + mock_driver.build.return_value = COMPUTE_ID + path = '/etc/octavia/certs/ca_01.pem' + self.useFixture(test_utils.OpenFixture(path, 'test')) + # Test execute() + test_cert = fer.encrypt( + utils.get_six_compatible_value('test_cert') + ) + compute_id = createcompute.execute(_amphora_mock.id, test_cert, + server_group_id=SERVER_GRPOUP_ID + ) + + # Validate that the build method was called properly + mock_driver.build.assert_called_once_with( + name="amphora-" + _amphora_mock.id, + amphora_flavor=AMP_FLAVOR_ID, + image_id=AMP_IMAGE_ID, + image_tag=AMP_IMAGE_TAG, + image_owner='', + key_name=AMP_SSH_KEY_NAME, + sec_groups=AMP_SEC_GROUPS, + network_ids=AMP_NET, + port_ids=[], + user_data=None, + config_drive_files={ + '/etc/octavia/certs/server.pem': fer.decrypt(test_cert), + '/etc/octavia/certs/client_ca.pem': 'test', + '/etc/octavia/amphora-agent.conf': 'test_conf'}, + server_group_id=SERVER_GRPOUP_ID) + + self.assertEqual(COMPUTE_ID, compute_id) + + # Test that a build exception is raised + self.useFixture(test_utils.OpenFixture(path, 'test')) + + createcompute = compute_tasks.ComputeCreate() + self.assertRaises(TypeError, + createcompute.execute, + _amphora_mock, + config_drive_files=test_cert) + + # Test revert() + + _amphora_mock.compute_id = COMPUTE_ID + + createcompute = compute_tasks.ComputeCreate() + createcompute.revert(compute_id, _amphora_mock.id) + + # Validate that the delete method was called properly + mock_driver.delete.assert_called_once_with(COMPUTE_ID) + + # Test that a delete exception is not raised + + createcompute.revert(COMPUTE_ID, _amphora_mock.id) + + @mock.patch('octavia.controller.worker.amphora_rate_limit' + '.AmphoraBuildRateLimit.remove_from_build_req_queue') + @mock.patch('stevedore.driver.DriverManager.driver') + @mock.patch('time.sleep') + def test_compute_wait(self, + mock_time_sleep, + mock_driver, + mock_remove_from_build_queue): + + self.conf.config(group='haproxy_amphora', build_rate_limit=5) + _amphora_mock.compute_id = COMPUTE_ID + _amphora_mock.status = constants.ACTIVE + _amphora_mock.lb_network_ip = LB_NET_IP + + mock_driver.get_amphora.return_value = _amphora_mock, None + + computewait = compute_tasks.ComputeActiveWait() + computewait.execute(COMPUTE_ID, AMPHORA_ID) + + mock_driver.get_amphora.assert_called_once_with(COMPUTE_ID) + + _amphora_mock.status = constants.DELETED + + self.assertRaises(exceptions.ComputeWaitTimeoutException, + computewait.execute, + _amphora_mock, AMPHORA_ID) + + @mock.patch('octavia.controller.worker.amphora_rate_limit' + '.AmphoraBuildRateLimit.remove_from_build_req_queue') + @mock.patch('stevedore.driver.DriverManager.driver') + @mock.patch('time.sleep') + def test_compute_wait_error_status(self, + mock_time_sleep, + mock_driver, + mock_remove_from_build_queue): + + self.conf.config(group='haproxy_amphora', build_rate_limit=5) + _amphora_mock.compute_id = COMPUTE_ID + _amphora_mock.status = constants.ACTIVE + _amphora_mock.lb_network_ip = LB_NET_IP + + mock_driver.get_amphora.return_value = _amphora_mock, None + + computewait = compute_tasks.ComputeActiveWait() + computewait.execute(COMPUTE_ID, AMPHORA_ID) + + mock_driver.get_amphora.assert_called_once_with(COMPUTE_ID) + + _amphora_mock.status = constants.ERROR + + self.assertRaises(exceptions.ComputeBuildException, + computewait.execute, + _amphora_mock, AMPHORA_ID) + + @mock.patch('octavia.controller.worker.amphora_rate_limit' + '.AmphoraBuildRateLimit.remove_from_build_req_queue') + @mock.patch('stevedore.driver.DriverManager.driver') + @mock.patch('time.sleep') + def test_compute_wait_skipped(self, + mock_time_sleep, + mock_driver, + mock_remove_from_build_queue): + _amphora_mock.compute_id = COMPUTE_ID + _amphora_mock.status = constants.ACTIVE + _amphora_mock.lb_network_ip = LB_NET_IP + + mock_driver.get_amphora.return_value = _amphora_mock, None + + computewait = compute_tasks.ComputeActiveWait() + computewait.execute(COMPUTE_ID, AMPHORA_ID) + + mock_driver.get_amphora.assert_called_once_with(COMPUTE_ID) + mock_remove_from_build_queue.assert_not_called() + + @mock.patch('stevedore.driver.DriverManager.driver') + def test_delete_amphorae_on_load_balancer(self, mock_driver): + + delete_amps = compute_tasks.DeleteAmphoraeOnLoadBalancer() + delete_amps.execute(_load_balancer_mock) + + mock_driver.delete.assert_called_once_with(COMPUTE_ID) + + @mock.patch('stevedore.driver.DriverManager.driver') + def test_compute_delete(self, mock_driver): + + delete_compute = compute_tasks.ComputeDelete() + delete_compute.execute(_amphora_mock) + + mock_driver.delete.assert_called_once_with(COMPUTE_ID) + + @mock.patch('stevedore.driver.DriverManager.driver') + def test_nova_server_group_create(self, mock_driver): + nova_sever_group_obj = compute_tasks.NovaServerGroupCreate() + + server_group_test_id = '6789' + fake_server_group = mock.MagicMock() + fake_server_group.id = server_group_test_id + fake_server_group.policy = 'anti-affinity' + mock_driver.create_server_group.return_value = fake_server_group + + # Test execute() + sg_id = nova_sever_group_obj.execute('123') + + # Validate that the build method was called properly + mock_driver.create_server_group.assert_called_once_with( + 'octavia-lb-123', 'anti-affinity') + + # Make sure it returns the expected server group_id + self.assertEqual(server_group_test_id, sg_id) + + # Test revert() + nova_sever_group_obj.revert(sg_id) + + # Validate that the delete_server_group method was called properly + mock_driver.delete_server_group.assert_called_once_with(sg_id) + + # Test revert with exception + mock_driver.reset_mock() + mock_driver.delete_server_group.side_effect = Exception('DelSGExcept') + nova_sever_group_obj.revert(sg_id) + mock_driver.delete_server_group.assert_called_once_with(sg_id) + + @mock.patch('stevedore.driver.DriverManager.driver') + def test_nova_server_group_delete_with_sever_group_id(self, mock_driver): + nova_sever_group_obj = compute_tasks.NovaServerGroupDelete() + sg_id = '6789' + nova_sever_group_obj.execute(sg_id) + mock_driver.delete_server_group.assert_called_once_with(sg_id) + + @mock.patch('stevedore.driver.DriverManager.driver') + def test_nova_server_group_delete_with_None(self, mock_driver): + nova_sever_group_obj = compute_tasks.NovaServerGroupDelete() + sg_id = None + nova_sever_group_obj.execute(sg_id) + self.assertFalse(mock_driver.delete_server_group.called, sg_id) diff --git a/octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py b/octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py new file mode 100644 index 0000000000..60bff76235 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py @@ -0,0 +1,2727 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import random + +from cryptography import fernet +import mock +from oslo_db import exception as odb_exceptions +from oslo_utils import uuidutils +from sqlalchemy.orm import exc +from taskflow.types import failure + +from octavia.common import constants +from octavia.common import data_models +from octavia.common import utils +from octavia.controller.worker.v2.tasks import database_tasks +from octavia.db import repositories as repo +import octavia.tests.unit.base as base + + +AMP_ID = uuidutils.generate_uuid() +COMPUTE_ID = uuidutils.generate_uuid() +LB_ID = uuidutils.generate_uuid() +SERVER_GROUP_ID = uuidutils.generate_uuid() +LB_NET_IP = '192.0.2.2' +LISTENER_ID = uuidutils.generate_uuid() +POOL_ID = uuidutils.generate_uuid() +HM_ID = uuidutils.generate_uuid() +MEMBER_ID = uuidutils.generate_uuid() +PORT_ID = uuidutils.generate_uuid() +SUBNET_ID = uuidutils.generate_uuid() +VRRP_PORT_ID = uuidutils.generate_uuid() +HA_PORT_ID = uuidutils.generate_uuid() +L7POLICY_ID = uuidutils.generate_uuid() +L7RULE_ID = uuidutils.generate_uuid() +VIP_IP = '192.0.5.2' +VRRP_IP = '192.0.5.3' +HA_IP = '192.0.5.4' +AMP_ROLE = 'FAKE_ROLE' +VRRP_ID = random.randrange(255) +VRRP_PRIORITY = random.randrange(100) +CACHED_ZONE = 'zone1' +IMAGE_ID = uuidutils.generate_uuid() +COMPUTE_FLAVOR = uuidutils.generate_uuid() + +_amphora_mock = mock.MagicMock() +_amphora_mock.id = AMP_ID +_amphora_mock.compute_id = COMPUTE_ID +_amphora_mock.lb_network_ip = LB_NET_IP +_amphora_mock.vrrp_ip = VRRP_IP +_amphora_mock.ha_ip = HA_IP +_amphora_mock.ha_port_id = HA_PORT_ID +_amphora_mock.vrrp_port_id = VRRP_PORT_ID +_amphora_mock.role = AMP_ROLE +_amphora_mock.vrrp_id = VRRP_ID +_amphora_mock.vrrp_priority = VRRP_PRIORITY +_amphorae = [_amphora_mock] +_loadbalancer_mock = mock.MagicMock() +_loadbalancer_mock.id = LB_ID +_loadbalancer_mock.amphorae = [_amphora_mock] +_pool_mock = mock.MagicMock() +_pool_mock.id = POOL_ID +_l7policy_mock = mock.MagicMock() +_l7policy_mock.id = L7POLICY_ID +_l7rule_mock = mock.MagicMock() +_l7rule_mock.id = L7RULE_ID +_listener_mock = mock.MagicMock() +_listener_mock.id = LISTENER_ID +_tf_failure_mock = mock.Mock(spec=failure.Failure) +_vip_mock = mock.MagicMock() +_vip_mock.port_id = PORT_ID +_vip_mock.subnet_id = SUBNET_ID +_vip_mock.ip_address = VIP_IP +_vrrp_group_mock = mock.MagicMock() +_cert_mock = mock.MagicMock() +_compute_mock = mock.MagicMock() +_compute_mock.lb_network_ip = LB_NET_IP +_compute_mock.cached_zone = CACHED_ZONE +_compute_mock.image_id = IMAGE_ID +_compute_mock.compute_flavor = COMPUTE_FLAVOR + + +@mock.patch('octavia.db.repositories.AmphoraRepository.delete') +@mock.patch('octavia.db.repositories.AmphoraRepository.update') +@mock.patch('octavia.db.repositories.ListenerRepository.update') +@mock.patch('octavia.db.repositories.LoadBalancerRepository.update') +@mock.patch('octavia.db.api.get_session', return_value='TEST') +@mock.patch('octavia.controller.worker.v2.tasks.database_tasks.LOG') +@mock.patch('oslo_utils.uuidutils.generate_uuid', return_value=AMP_ID) +class TestDatabaseTasks(base.TestCase): + + def setUp(self): + + self.health_mon_mock = mock.MagicMock() + self.health_mon_mock.id = HM_ID + self.health_mon_mock.pool_id = POOL_ID + + self.listener_mock = mock.MagicMock() + self.listener_mock.id = LISTENER_ID + + self.loadbalancer_mock = mock.MagicMock() + self.loadbalancer_mock.id = LB_ID + + self.member_mock = mock.MagicMock() + self.member_mock.id = MEMBER_ID + + self.pool_mock = mock.MagicMock() + self.pool_mock.id = POOL_ID + self.pool_mock.health_monitor = self.health_mon_mock + + self.l7policy_mock = mock.MagicMock() + self.l7policy_mock.id = L7POLICY_ID + + self.l7rule_mock = mock.MagicMock() + self.l7rule_mock.id = L7RULE_ID + self.l7rule_mock.l7policy = self.l7policy_mock + + super(TestDatabaseTasks, self).setUp() + + @mock.patch('octavia.db.repositories.AmphoraRepository.create', + return_value=_amphora_mock) + def test_create_amphora_in_db(self, + mock_create, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + create_amp_in_db = database_tasks.CreateAmphoraInDB() + amp_id = create_amp_in_db.execute() + + repo.AmphoraRepository.create.assert_called_once_with( + 'TEST', + id=AMP_ID, + status=constants.PENDING_CREATE, + cert_busy=False) + + self.assertEqual(_amphora_mock.id, amp_id) + + # Test the revert + create_amp_in_db.revert(_tf_failure_mock) + self.assertFalse(mock_amphora_repo_delete.called) + + mock_amphora_repo_delete.reset_mock() + create_amp_in_db.revert(result='AMP') + self.assertTrue(mock_amphora_repo_delete.called) + mock_amphora_repo_delete.assert_called_once_with( + 'TEST', + id='AMP') + + # Test revert with exception + mock_amphora_repo_delete.reset_mock() + mock_amphora_repo_delete.side_effect = Exception('fail') + create_amp_in_db.revert(result='AMP') + self.assertTrue(mock_amphora_repo_delete.called) + mock_amphora_repo_delete.assert_called_once_with( + 'TEST', + id='AMP') + + @mock.patch('octavia.db.repositories.ListenerRepository.delete') + def test_delete_listener_in_db(self, + mock_listener_repo_delete, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + delete_listener = database_tasks.DeleteListenerInDB() + delete_listener.execute(_listener_mock) + + repo.ListenerRepository.delete.assert_called_once_with( + 'TEST', + id=LISTENER_ID) + + @mock.patch('octavia.db.repositories.HealthMonitorRepository.update') + @mock.patch('octavia.db.repositories.HealthMonitorRepository.delete') + def test_delete_health_monitor_in_db(self, + mock_health_mon_repo_delete, + mock_health_mon_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + delete_health_mon = database_tasks.DeleteHealthMonitorInDB() + delete_health_mon.execute(self.health_mon_mock) + + repo.HealthMonitorRepository.delete.assert_called_once_with( + 'TEST', id=HM_ID) + + # Test the revert + mock_health_mon_repo_delete.reset_mock() + delete_health_mon.revert(self.health_mon_mock) + + repo.HealthMonitorRepository.update.assert_called_once_with( + 'TEST', id=HM_ID, provisioning_status=constants.ERROR) + + # Test Not Found Exception + mock_health_mon_repo_delete.reset_mock() + mock_health_mon_repo_delete.side_effect = [exc.NoResultFound()] + delete_health_mon.execute(self.health_mon_mock) + + repo.HealthMonitorRepository.delete.assert_called_once_with( + 'TEST', id=HM_ID) + + @mock.patch('octavia.db.repositories.HealthMonitorRepository.update') + @mock.patch('octavia.db.repositories.HealthMonitorRepository.delete') + def test_delete_health_monitor_in_db_by_pool(self, + mock_health_mon_repo_delete, + mock_health_mon_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + delete_health_mon = database_tasks.DeleteHealthMonitorInDBByPool() + delete_health_mon.execute(self.pool_mock) + + repo.HealthMonitorRepository.delete.assert_called_once_with( + 'TEST', + id=HM_ID) + + # Test the revert + mock_health_mon_repo_delete.reset_mock() + delete_health_mon.revert(self.pool_mock) + + repo.HealthMonitorRepository.update.assert_called_once_with( + 'TEST', id=HM_ID, provisioning_status=constants.ERROR) + +# TODO(johnsom) fix once provisioning status added +# repo.HealthMonitorRepository.update.assert_called_once_with( +# 'TEST', +# POOL_ID, +# provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.MemberRepository.delete') + def test_delete_member_in_db(self, + mock_member_repo_delete, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + delete_member = database_tasks.DeleteMemberInDB() + delete_member.execute(self.member_mock) + + repo.MemberRepository.delete.assert_called_once_with( + 'TEST', + id=MEMBER_ID) + + # Test the revert + + mock_member_repo_delete.reset_mock() + delete_member.revert(self.member_mock) + +# TODO(johnsom) Fix +# repo.MemberRepository.delete.assert_called_once_with( +# 'TEST', +# MEMBER_ID) + + @mock.patch('octavia.db.repositories.PoolRepository.delete') + def test_delete_pool_in_db(self, + mock_pool_repo_delete, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + delete_pool = database_tasks.DeletePoolInDB() + delete_pool.execute(_pool_mock) + + repo.PoolRepository.delete.assert_called_once_with( + 'TEST', + id=POOL_ID) + + # Test the revert + + mock_pool_repo_delete.reset_mock() + delete_pool.revert(_pool_mock) + +# TODO(johnsom) Fix +# repo.PoolRepository.update.assert_called_once_with( +# 'TEST', +# POOL_ID, +# operating_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.L7PolicyRepository.delete') + def test_delete_l7policy_in_db(self, + mock_l7policy_repo_delete, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + delete_l7policy = database_tasks.DeleteL7PolicyInDB() + delete_l7policy.execute(_l7policy_mock) + + repo.L7PolicyRepository.delete.assert_called_once_with( + 'TEST', + id=L7POLICY_ID) + + # Test the revert + + mock_l7policy_repo_delete.reset_mock() + delete_l7policy.revert(_l7policy_mock) + +# TODO(sbalukoff) Fix +# repo.ListenerRepository.update.assert_called_once_with( +# 'TEST', +# LISTENER_ID, +# operating_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.L7RuleRepository.delete') + def test_delete_l7rule_in_db(self, + mock_l7rule_repo_delete, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + delete_l7rule = database_tasks.DeleteL7RuleInDB() + delete_l7rule.execute(_l7rule_mock) + + repo.L7RuleRepository.delete.assert_called_once_with( + 'TEST', + id=L7RULE_ID) + + # Test the revert + + mock_l7rule_repo_delete.reset_mock() + delete_l7rule.revert(_l7rule_mock) + +# TODO(sbalukoff) Fix +# repo.ListenerRepository.update.assert_called_once_with( +# 'TEST', +# LISTENER_ID, +# operating_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.AmphoraRepository.get', + return_value=_amphora_mock) + def test_reload_amphora(self, + mock_amp_get, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + reload_amp = database_tasks.ReloadAmphora() + amp = reload_amp.execute(AMP_ID) + + repo.AmphoraRepository.get.assert_called_once_with( + 'TEST', + id=AMP_ID) + + self.assertEqual(_amphora_mock, amp) + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get', + return_value=_loadbalancer_mock) + def test_reload_load_balancer(self, + mock_lb_get, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + reload_lb = database_tasks.ReloadLoadBalancer() + lb = reload_lb.execute(LB_ID) + + repo.LoadBalancerRepository.get.assert_called_once_with( + 'TEST', + id=LB_ID) + + self.assertEqual(_loadbalancer_mock, lb) + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get', + return_value=_loadbalancer_mock) + @mock.patch('octavia.db.repositories.VipRepository.update') + def test_update_vip_after_allocation(self, + mock_vip_update, + mock_loadbalancer_get, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + update_vip = database_tasks.UpdateVIPAfterAllocation() + loadbalancer = update_vip.execute(LB_ID, _vip_mock) + + self.assertEqual(_loadbalancer_mock, loadbalancer) + mock_vip_update.assert_called_once_with('TEST', + LB_ID, + port_id=PORT_ID, + subnet_id=SUBNET_ID, + ip_address=VIP_IP) + mock_loadbalancer_get.assert_called_once_with('TEST', + id=LB_ID) + + def test_update_amphora_vip_data(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + update_amp_vip_data = database_tasks.UpdateAmphoraeVIPData() + update_amp_vip_data.execute(_amphorae) + + mock_amphora_repo_update.assert_called_once_with( + 'TEST', + AMP_ID, + vrrp_ip=VRRP_IP, + ha_ip=HA_IP, + vrrp_port_id=VRRP_PORT_ID, + ha_port_id=HA_PORT_ID, + vrrp_id=1) + + def test_update_amphora_vip_data2(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + update_amp_vip_data2 = database_tasks.UpdateAmphoraVIPData() + update_amp_vip_data2.execute(_amphorae[0]) + + mock_amphora_repo_update.assert_called_once_with( + 'TEST', + AMP_ID, + vrrp_ip=VRRP_IP, + ha_ip=HA_IP, + vrrp_port_id=VRRP_PORT_ID, + ha_port_id=HA_PORT_ID, + vrrp_id=1) + + def test_update_amp_failover_details(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + update_amp_fo_details = database_tasks.UpdateAmpFailoverDetails() + update_amp_fo_details.execute(_amphora_mock, _amphora_mock) + + mock_amphora_repo_update.assert_called_once_with( + 'TEST', + AMP_ID, + vrrp_ip=VRRP_IP, + ha_ip=HA_IP, + vrrp_port_id=VRRP_PORT_ID, + ha_port_id=HA_PORT_ID, + vrrp_id=VRRP_ID) + + @mock.patch('octavia.db.repositories.AmphoraRepository.associate') + def test_associate_failover_amphora_with_lb_id( + self, + mock_associate, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + assoc_fo_amp_lb_id = database_tasks.AssociateFailoverAmphoraWithLBID() + assoc_fo_amp_lb_id.execute(AMP_ID, LB_ID) + + mock_associate.assert_called_once_with('TEST', + load_balancer_id=LB_ID, + amphora_id=AMP_ID) + + # Test revert + assoc_fo_amp_lb_id.revert(AMP_ID) + + mock_amphora_repo_update.assert_called_once_with('TEST', + AMP_ID, + loadbalancer_id=None) + + # Test revert with exception + mock_amphora_repo_update.reset_mock() + mock_amphora_repo_update.side_effect = Exception('fail') + + assoc_fo_amp_lb_id.revert(AMP_ID) + + mock_amphora_repo_update.assert_called_once_with('TEST', + AMP_ID, + loadbalancer_id=None) + + @mock.patch('octavia.db.repositories.AmphoraRepository.' + 'allocate_and_associate', + side_effect=[_amphora_mock, None]) + def test_map_loadbalancer_to_amphora(self, + mock_allocate_and_associate, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + map_lb_to_amp = database_tasks.MapLoadbalancerToAmphora() + amp_id = map_lb_to_amp.execute(self.loadbalancer_mock.id) + + repo.AmphoraRepository.allocate_and_associate.assert_called_once_with( + 'TEST', + LB_ID) + + self.assertEqual(_amphora_mock.id, amp_id) + + amp_id = map_lb_to_amp.execute(self.loadbalancer_mock.id) + + self.assertIsNone(amp_id) + + # Test revert + map_lb_to_amp.revert(None, self.loadbalancer_mock.id) + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + id=LB_ID, + provisioning_status=constants.ERROR) + + # Test revert with exception + repo.LoadBalancerRepository.update.reset_mock() + mock_loadbalancer_repo_update.side_effect = Exception('fail') + map_lb_to_amp.revert(None, self.loadbalancer_mock.id) + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + id=LB_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.AmphoraRepository.get', + return_value=_amphora_mock) + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get', + return_value=_loadbalancer_mock) + def test_mark_lb_amphorae_deleted_in_db(self, + mock_loadbalancer_repo_get, + mock_amphora_repo_get, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_amp_deleted_in_db = (database_tasks. + MarkLBAmphoraeDeletedInDB()) + mark_amp_deleted_in_db.execute(_loadbalancer_mock) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + id=AMP_ID, + status=constants.DELETED) + + @mock.patch('octavia.db.repositories.AmphoraRepository.get', + return_value=_amphora_mock) + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get', + return_value=_loadbalancer_mock) + def test_mark_amphora_allocated_in_db(self, + mock_loadbalancer_repo_get, + mock_amphora_repo_get, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_amp_allocated_in_db = (database_tasks. + MarkAmphoraAllocatedInDB()) + mark_amp_allocated_in_db.execute(_amphora_mock, + self.loadbalancer_mock.id) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + AMP_ID, + status=constants.AMPHORA_ALLOCATED, + compute_id=COMPUTE_ID, + lb_network_ip=LB_NET_IP, + load_balancer_id=LB_ID) + + # Test the revert + + mock_amphora_repo_update.reset_mock() + mark_amp_allocated_in_db.revert(None, _amphora_mock, + self.loadbalancer_mock.id) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + id=AMP_ID, + status=constants.ERROR) + + # Test the revert with exception + + mock_amphora_repo_update.reset_mock() + mock_amphora_repo_update.side_effect = Exception('fail') + mark_amp_allocated_in_db.revert(None, _amphora_mock, + self.loadbalancer_mock.id) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + id=AMP_ID, + status=constants.ERROR) + + def test_mark_amphora_booting_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_amp_booting_in_db = database_tasks.MarkAmphoraBootingInDB() + mark_amp_booting_in_db.execute(_amphora_mock.id, + _amphora_mock.compute_id) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + AMP_ID, + status=constants.AMPHORA_BOOTING, + compute_id=COMPUTE_ID) + + # Test the revert + + mock_amphora_repo_update.reset_mock() + mark_amp_booting_in_db.revert(None, _amphora_mock.id, + _amphora_mock.compute_id) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + AMP_ID, + status=constants.ERROR, + compute_id=COMPUTE_ID) + + # Test the revert with exception + + mock_amphora_repo_update.reset_mock() + mock_amphora_repo_update.side_effect = Exception('fail') + mark_amp_booting_in_db.revert(None, _amphora_mock.id, + _amphora_mock.compute_id) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + AMP_ID, + status=constants.ERROR, + compute_id=COMPUTE_ID) + + def test_mark_amphora_deleted_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_amp_deleted_in_db = database_tasks.MarkAmphoraDeletedInDB() + mark_amp_deleted_in_db.execute(_amphora_mock) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + AMP_ID, + status=constants.DELETED) + + # Test the revert + mock_amphora_repo_update.reset_mock() + mark_amp_deleted_in_db.revert(_amphora_mock) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + id=AMP_ID, + status=constants.ERROR) + + # Test the revert with exception + mock_amphora_repo_update.reset_mock() + mock_amphora_repo_update.side_effect = Exception('fail') + mark_amp_deleted_in_db.revert(_amphora_mock) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + id=AMP_ID, + status=constants.ERROR) + + def test_mark_amphora_pending_delete_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_amp_pending_delete_in_db = (database_tasks. + MarkAmphoraPendingDeleteInDB()) + mark_amp_pending_delete_in_db.execute(_amphora_mock) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + AMP_ID, + status=constants.PENDING_DELETE) + + # Test the revert + mock_amphora_repo_update.reset_mock() + mark_amp_pending_delete_in_db.revert(_amphora_mock) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + id=AMP_ID, + status=constants.ERROR) + + # Test the revert with exception + mock_amphora_repo_update.reset_mock() + mock_amphora_repo_update.side_effect = Exception('fail') + + mark_amp_pending_delete_in_db.revert(_amphora_mock) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + id=AMP_ID, + status=constants.ERROR) + + def test_mark_amphora_pending_update_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_amp_pending_update_in_db = (database_tasks. + MarkAmphoraPendingUpdateInDB()) + mark_amp_pending_update_in_db.execute(_amphora_mock) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + AMP_ID, + status=constants.PENDING_UPDATE) + + # Test the revert + mock_amphora_repo_update.reset_mock() + mark_amp_pending_update_in_db.revert(_amphora_mock) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + id=AMP_ID, + status=constants.ERROR) + + # Test the revert with exception + mock_amphora_repo_update.reset_mock() + mock_amphora_repo_update.side_effect = Exception('fail') + mark_amp_pending_update_in_db.revert(_amphora_mock) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + id=AMP_ID, + status=constants.ERROR) + + def test_mark_amphora_ready_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + _amphora_mock.lb_network_ip = LB_NET_IP + + mark_amp_ready_in_db = database_tasks.MarkAmphoraReadyInDB() + mark_amp_ready_in_db.execute(_amphora_mock) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + AMP_ID, + status=constants.AMPHORA_READY, + compute_id=COMPUTE_ID, + lb_network_ip=LB_NET_IP) + + # Test the revert + + mock_amphora_repo_update.reset_mock() + mark_amp_ready_in_db.revert(_amphora_mock) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + AMP_ID, + status=constants.ERROR, + compute_id=COMPUTE_ID, + lb_network_ip=LB_NET_IP) + + # Test the revert with exception + + mock_amphora_repo_update.reset_mock() + mock_amphora_repo_update.side_effect = Exception('fail') + mark_amp_ready_in_db.revert(_amphora_mock) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + AMP_ID, + status=constants.ERROR, + compute_id=COMPUTE_ID, + lb_network_ip=LB_NET_IP) + + @mock.patch('octavia.db.repositories.AmphoraRepository.get') + def test_update_amphora_info(self, + mock_amphora_repo_get, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + update_amphora_info = database_tasks.UpdateAmphoraInfo() + update_amphora_info.execute(AMP_ID, _compute_mock) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + AMP_ID, + lb_network_ip=LB_NET_IP, + cached_zone=CACHED_ZONE, + image_id=IMAGE_ID, + compute_flavor=COMPUTE_FLAVOR) + + repo.AmphoraRepository.get.assert_called_once_with( + 'TEST', + id=AMP_ID) + + def test_mark_listener_active_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_listener_active = database_tasks.MarkListenerActiveInDB() + mark_listener_active.execute(self.listener_mock) + + repo.ListenerRepository.update.assert_called_once_with( + 'TEST', + LISTENER_ID, + provisioning_status=constants.ACTIVE) + + # Test the revert + mock_listener_repo_update.reset_mock() + mark_listener_active.revert(self.listener_mock) + + repo.ListenerRepository.update.assert_called_once_with( + 'TEST', + id=LISTENER_ID, + provisioning_status=constants.ERROR) + + # Test the revert + mock_listener_repo_update.reset_mock() + mock_listener_repo_update.side_effect = Exception('fail') + mark_listener_active.revert(self.listener_mock) + + repo.ListenerRepository.update.assert_called_once_with( + 'TEST', + id=LISTENER_ID, + provisioning_status=constants.ERROR) + + def test_mark_listener_deleted_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_listener_deleted = database_tasks.MarkListenerDeletedInDB() + mark_listener_deleted.execute(self.listener_mock) + + repo.ListenerRepository.update.assert_called_once_with( + 'TEST', + LISTENER_ID, + provisioning_status=constants.DELETED) + + # Test the revert + mock_listener_repo_update.reset_mock() + mark_listener_deleted.revert(self.listener_mock) + + repo.ListenerRepository.update.assert_called_once_with( + 'TEST', + id=LISTENER_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_listener_repo_update.reset_mock() + mock_listener_repo_update.side_effect = Exception('fail') + mark_listener_deleted.revert(self.listener_mock) + + repo.ListenerRepository.update.assert_called_once_with( + 'TEST', + id=LISTENER_ID, + provisioning_status=constants.ERROR) + + def test_mark_listener_pending_deleted_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_listener_pending_delete = (database_tasks. + MarkListenerPendingDeleteInDB()) + mark_listener_pending_delete.execute(self.listener_mock) + + repo.ListenerRepository.update.assert_called_once_with( + 'TEST', + LISTENER_ID, + provisioning_status=constants.PENDING_DELETE) + + # Test the revert + mock_listener_repo_update.reset_mock() + mark_listener_pending_delete.revert(self.listener_mock) + + repo.ListenerRepository.update.assert_called_once_with( + 'TEST', + id=LISTENER_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_listener_repo_update.reset_mock() + mock_listener_repo_update.side_effect = Exception('fail') + mark_listener_pending_delete.revert(self.listener_mock) + + repo.ListenerRepository.update.assert_called_once_with( + 'TEST', + id=LISTENER_ID, + provisioning_status=constants.ERROR) + + def test_mark_lb_and_listeners_active_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_lb_and_listeners_active = (database_tasks. + MarkLBAndListenersActiveInDB()) + mark_lb_and_listeners_active.execute(self.loadbalancer_mock, + [self.listener_mock]) + + repo.ListenerRepository.update.assert_called_once_with( + 'TEST', + LISTENER_ID, + provisioning_status=constants.ACTIVE) + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + LB_ID, + provisioning_status=constants.ACTIVE) + + # Test the revert + mock_loadbalancer_repo_update.reset_mock() + mock_listener_repo_update.reset_mock() + + mark_lb_and_listeners_active.revert(self.loadbalancer_mock, + [self.listener_mock]) + + repo.ListenerRepository.update.assert_called_once_with( + 'TEST', + id=LISTENER_ID, + provisioning_status=constants.ERROR) + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + id=LB_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exceptions + mock_loadbalancer_repo_update.reset_mock() + mock_loadbalancer_repo_update.side_effect = Exception('fail') + mock_listener_repo_update.reset_mock() + mock_listener_repo_update.side_effect = Exception('fail') + + mark_lb_and_listeners_active.revert(self.loadbalancer_mock, + [self.listener_mock]) + + repo.ListenerRepository.update.assert_called_once_with( + 'TEST', + id=LISTENER_ID, + provisioning_status=constants.ERROR) + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + id=LB_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.common.tls_utils.cert_parser.get_cert_expiration', + return_value=_cert_mock) + def test_update_amphora_db_cert_exp(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete, + mock_get_cert_exp): + + update_amp_cert = database_tasks.UpdateAmphoraDBCertExpiration() + key = utils.get_six_compatible_server_certs_key_passphrase() + fer = fernet.Fernet(key) + _pem_mock = fer.encrypt( + utils.get_six_compatible_value('test_cert') + ) + update_amp_cert.execute(_amphora_mock.id, _pem_mock) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + AMP_ID, + cert_expiration=_cert_mock) + + def test_update_amphora_cert_busy_to_false(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + amp_cert_busy_to_F = database_tasks.UpdateAmphoraCertBusyToFalse() + amp_cert_busy_to_F.execute(_amphora_mock) + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + AMP_ID, + cert_busy=False) + + def test_mark_LB_active_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_loadbalancer_active = database_tasks.MarkLBActiveInDB() + mark_loadbalancer_active.execute(self.loadbalancer_mock) + + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + LB_ID, + provisioning_status=constants.ACTIVE) + self.assertEqual(0, repo.ListenerRepository.update.call_count) + + # Test the revert + mock_loadbalancer_repo_update.reset_mock() + mark_loadbalancer_active.revert(self.loadbalancer_mock) + + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + id=LB_ID, + provisioning_status=constants.ERROR) + self.assertEqual(0, repo.ListenerRepository.update.call_count) + + # Test the revert with exception + mock_loadbalancer_repo_update.reset_mock() + mock_loadbalancer_repo_update.side_effect = Exception('fail') + mark_loadbalancer_active.revert(self.loadbalancer_mock) + + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + id=LB_ID, + provisioning_status=constants.ERROR) + self.assertEqual(0, repo.ListenerRepository.update.call_count) + + def test_mark_LB_active_in_db_and_listeners(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + listeners = [data_models.Listener(id='listener1'), + data_models.Listener(id='listener2')] + lb = data_models.LoadBalancer(id=LB_ID, listeners=listeners) + mark_lb_active = database_tasks.MarkLBActiveInDB(mark_subobjects=True) + mark_lb_active.execute(lb) + + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + lb.id, + provisioning_status=constants.ACTIVE) + self.assertEqual(2, repo.ListenerRepository.update.call_count) + repo.ListenerRepository.update.has_calls( + [mock.call('TEST', listeners[0].id, + provisioning_status=constants.ACTIVE), + mock.call('TEST', listeners[1].id, + provisioning_status=constants.ACTIVE)]) + + mock_loadbalancer_repo_update.reset_mock() + mock_listener_repo_update.reset_mock() + mark_lb_active.revert(lb) + + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + id=lb.id, + provisioning_status=constants.ERROR) + self.assertEqual(2, repo.ListenerRepository.update.call_count) + repo.ListenerRepository.update.has_calls( + [mock.call('TEST', listeners[0].id, + provisioning_status=constants.ERROR), + mock.call('TEST', listeners[1].id, + provisioning_status=constants.ERROR)]) + + @mock.patch('octavia.db.repositories.PoolRepository.update') + @mock.patch('octavia.db.repositories.MemberRepository.update') + @mock.patch('octavia.db.repositories.HealthMonitorRepository.update') + @mock.patch('octavia.db.repositories.L7PolicyRepository.update') + @mock.patch('octavia.db.repositories.L7RuleRepository.update') + def test_mark_LB_active_in_db_full_graph(self, + mock_l7r_repo_update, + mock_l7p_repo_update, + mock_hm_repo_update, + mock_member_repo_update, + mock_pool_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + unused_pool = data_models.Pool(id='unused_pool') + members1 = [data_models.Member(id='member1'), + data_models.Member(id='member2')] + health_monitor = data_models.HealthMonitor(id='hm1') + default_pool = data_models.Pool(id='default_pool', + members=members1, + health_monitor=health_monitor) + listener1 = data_models.Listener(id='listener1', + default_pool=default_pool) + members2 = [data_models.Member(id='member3'), + data_models.Member(id='member4')] + redirect_pool = data_models.Pool(id='redirect_pool', + members=members2) + l7rules = [data_models.L7Rule(id='rule1')] + redirect_policy = data_models.L7Policy(id='redirect_policy', + redirect_pool=redirect_pool, + l7rules=l7rules) + l7policies = [redirect_policy] + listener2 = data_models.Listener(id='listener2', + l7policies=l7policies) + listener2.l7policies = l7policies + listeners = [listener1, listener2] + pools = [default_pool, redirect_pool, unused_pool] + + lb = data_models.LoadBalancer(id=LB_ID, listeners=listeners, + pools=pools) + mark_lb_active = database_tasks.MarkLBActiveInDB(mark_subobjects=True) + mark_lb_active.execute(lb) + + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + lb.id, + provisioning_status=constants.ACTIVE) + self.assertEqual(2, repo.ListenerRepository.update.call_count) + repo.ListenerRepository.update.has_calls( + [mock.call('TEST', listeners[0].id, + provisioning_status=constants.ACTIVE), + mock.call('TEST', listeners[1].id, + provisioning_status=constants.ACTIVE)]) + self.assertEqual(2, repo.PoolRepository.update.call_count) + repo.PoolRepository.update.has_calls( + [mock.call('TEST', default_pool.id, + provisioning_status=constants.ACTIVE), + mock.call('TEST', redirect_pool.id, + provisioning_status=constants.ACTIVE)]) + self.assertEqual(4, repo.MemberRepository.update.call_count) + repo.MemberRepository.update.has_calls( + [mock.call('TEST', members1[0].id, + provisioning_status=constants.ACTIVE), + mock.call('TEST', members1[1].id, + provisioning_status=constants.ACTIVE), + mock.call('TEST', members2[0].id, + provisioning_status=constants.ACTIVE), + mock.call('TEST', members2[1].id, + provisioning_status=constants.ACTIVE)]) + self.assertEqual(1, repo.HealthMonitorRepository.update.call_count) + repo.HealthMonitorRepository.update.has_calls( + [mock.call('TEST', health_monitor.id, + provisioning_status=constants.ACTIVE)]) + self.assertEqual(1, repo.L7PolicyRepository.update.call_count) + repo.L7PolicyRepository.update.has_calls( + [mock.call('TEST', l7policies[0].id, + provisioning_status=constants.ACTIVE)]) + self.assertEqual(1, repo.L7RuleRepository.update.call_count) + repo.L7RuleRepository.update.has_calls( + [mock.call('TEST', l7rules[0].id, + provisioning_status=constants.ACTIVE)]) + + mock_loadbalancer_repo_update.reset_mock() + mock_listener_repo_update.reset_mock() + mock_pool_repo_update.reset_mock() + mock_member_repo_update.reset_mock() + mock_hm_repo_update.reset_mock() + mock_l7p_repo_update.reset_mock() + mock_l7r_repo_update.reset_mock() + mark_lb_active.revert(lb) + + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + id=lb.id, + provisioning_status=constants.ERROR) + self.assertEqual(2, repo.ListenerRepository.update.call_count) + repo.ListenerRepository.update.has_calls( + [mock.call('TEST', listeners[0].id, + provisioning_status=constants.ERROR), + mock.call('TEST', listeners[1].id, + provisioning_status=constants.ERROR)]) + self.assertEqual(2, repo.PoolRepository.update.call_count) + repo.PoolRepository.update.has_calls( + [mock.call('TEST', default_pool.id, + provisioning_status=constants.ERROR), + mock.call('TEST', redirect_pool.id, + provisioning_status=constants.ERROR)]) + self.assertEqual(4, repo.MemberRepository.update.call_count) + repo.MemberRepository.update.has_calls( + [mock.call('TEST', members1[0].id, + provisioning_status=constants.ERROR), + mock.call('TEST', members1[1].id, + provisioning_status=constants.ERROR), + mock.call('TEST', members2[0].id, + provisioning_status=constants.ERROR), + mock.call('TEST', members2[1].id, + provisioning_status=constants.ERROR)]) + self.assertEqual(1, repo.HealthMonitorRepository.update.call_count) + repo.HealthMonitorRepository.update.has_calls( + [mock.call('TEST', health_monitor.id, + provisioning_status=constants.ERROR)]) + self.assertEqual(1, repo.L7PolicyRepository.update.call_count) + repo.L7PolicyRepository.update.has_calls( + [mock.call('TEST', l7policies[0].id, + provisioning_status=constants.ERROR)]) + self.assertEqual(1, repo.L7RuleRepository.update.call_count) + repo.L7RuleRepository.update.has_calls( + [mock.call('TEST', l7rules[0].id, + provisioning_status=constants.ERROR)]) + + def test_mark_LB_deleted_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_loadbalancer_deleted = database_tasks.MarkLBDeletedInDB() + mark_loadbalancer_deleted.execute(self.loadbalancer_mock) + + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + LB_ID, + provisioning_status=constants.DELETED) + + # Test the revert + mock_loadbalancer_repo_update.reset_mock() + mark_loadbalancer_deleted.revert(self.loadbalancer_mock) + + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + id=LB_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_loadbalancer_repo_update.reset_mock() + mock_loadbalancer_repo_update.side_effect = Exception('fail') + mark_loadbalancer_deleted.revert(self.loadbalancer_mock) + + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + id=LB_ID, + provisioning_status=constants.ERROR) + + def test_mark_LB_pending_deleted_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_loadbalancer_pending_delete = (database_tasks. + MarkLBPendingDeleteInDB()) + mark_loadbalancer_pending_delete.execute(self.loadbalancer_mock) + + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + LB_ID, + provisioning_status=constants.PENDING_DELETE) + + # Test the revert + mock_loadbalancer_repo_update.reset_mock() + mark_loadbalancer_pending_delete.revert(self.loadbalancer_mock) + + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + id=LB_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_loadbalancer_repo_update.reset_mock() + mock_loadbalancer_repo_update.side_effect = Exception('fail') + mark_loadbalancer_pending_delete.revert(self.loadbalancer_mock) + + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + id=LB_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.HealthMonitorRepository.update') + def test_update_health_monitor_in_db(self, + mock_health_mon_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + update_health_mon = database_tasks.UpdateHealthMonInDB() + update_health_mon.execute(self.health_mon_mock, + {'delay': 1, 'timeout': 2}) + + repo.HealthMonitorRepository.update.assert_called_once_with( + 'TEST', + HM_ID, + delay=1, timeout=2) + + # Test the revert + mock_health_mon_repo_update.reset_mock() + update_health_mon.revert(self.health_mon_mock) + + repo.HealthMonitorRepository.update.assert_called_once_with( + 'TEST', + HM_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_health_mon_repo_update.reset_mock() + mock_health_mon_repo_update.side_effect = Exception('fail') + update_health_mon.revert(self.health_mon_mock) + + repo.HealthMonitorRepository.update.assert_called_once_with( + 'TEST', + HM_ID, + provisioning_status=constants.ERROR) + + def test_update_load_balancer_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + update_load_balancer = database_tasks.UpdateLoadbalancerInDB() + update_load_balancer.execute(self.loadbalancer_mock, + {'name': 'test', 'description': 'test2'}) + + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + LB_ID, + name='test', description='test2') + + # Test the revert + mock_loadbalancer_repo_update.reset_mock() + update_load_balancer.revert(self.loadbalancer_mock) + + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + id=LB_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_loadbalancer_repo_update.reset_mock() + mock_loadbalancer_repo_update.side_effect = Exception('fail') + update_load_balancer.revert(self.loadbalancer_mock) + + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + id=LB_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.VipRepository.update') + def test_update_vip_in_db_during_update_loadbalancer(self, + mock_vip_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_lb_update, + mock_listener_update, + mock_amphora_update, + mock_amphora_delete): + + self.loadbalancer_mock.vip.load_balancer_id = LB_ID + update_load_balancer = database_tasks.UpdateLoadbalancerInDB() + update_load_balancer.execute(self.loadbalancer_mock, + {'name': 'test', + 'description': 'test2', + 'vip': {'qos_policy_id': 'fool'}}) + + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + LB_ID, + name='test', description='test2') + + repo.VipRepository.update.assert_called_once_with('TEST', LB_ID, + qos_policy_id='fool') + + def test_update_listener_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + update_listener = database_tasks.UpdateListenerInDB() + update_listener.execute(self.listener_mock, + {'name': 'test', 'description': 'test2'}) + + repo.ListenerRepository.update.assert_called_once_with( + 'TEST', + LISTENER_ID, + name='test', description='test2') + + # Test the revert + mock_listener_repo_update.reset_mock() + update_listener.revert(self.listener_mock) + repo.ListenerRepository.update.assert_called_once_with( + 'TEST', + id=LISTENER_ID, + provisioning_status=constants.ERROR) + + # Test the revert + mock_listener_repo_update.reset_mock() + mock_listener_repo_update.side_effect = Exception('fail') + update_listener.revert(self.listener_mock) + repo.ListenerRepository.update.assert_called_once_with( + 'TEST', + id=LISTENER_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.MemberRepository.update') + def test_update_member_in_db(self, + mock_member_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + update_member = database_tasks.UpdateMemberInDB() + update_member.execute(self.member_mock, + {'weight': 1, 'ip_address': '10.1.0.0'}) + + repo.MemberRepository.update.assert_called_once_with( + 'TEST', + MEMBER_ID, + weight=1, ip_address='10.1.0.0') + + # Test the revert + mock_member_repo_update.reset_mock() + update_member.revert(self.member_mock) + + repo.MemberRepository.update.assert_called_once_with( + 'TEST', + MEMBER_ID, + provisioning_status=constants.ERROR) + + # Test the revert + mock_member_repo_update.reset_mock() + mock_member_repo_update.side_effect = Exception('fail') + update_member.revert(self.member_mock) + + repo.MemberRepository.update.assert_called_once_with( + 'TEST', + MEMBER_ID, + provisioning_status=constants.ERROR) + + @mock.patch( + 'octavia.db.repositories.Repositories.update_pool_and_sp') + def test_update_pool_in_db(self, + mock_repos_pool_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + sp_dict = {'type': 'SOURCE_IP', 'cookie_name': None} + update_dict = {'name': 'test', 'description': 'test2', + 'session_persistence': sp_dict} + update_pool = database_tasks.UpdatePoolInDB() + update_pool.execute(self.pool_mock, + update_dict) + + repo.Repositories.update_pool_and_sp.assert_called_once_with( + 'TEST', + POOL_ID, + update_dict) + + # Test the revert + mock_repos_pool_update.reset_mock() + update_pool.revert(self.pool_mock) + + repo.Repositories.update_pool_and_sp.assert_called_once_with( + 'TEST', + POOL_ID, + {'provisioning_status': constants.ERROR}) + + # Test the revert with exception + mock_repos_pool_update.reset_mock() + mock_repos_pool_update.side_effect = Exception('fail') + update_pool.revert(self.pool_mock) + + repo.Repositories.update_pool_and_sp.assert_called_once_with( + 'TEST', + POOL_ID, + {'provisioning_status': constants.ERROR}) + + @mock.patch('octavia.db.repositories.L7PolicyRepository.update') + def test_update_l7policy_in_db(self, + mock_l7policy_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + update_l7policy = database_tasks.UpdateL7PolicyInDB() + update_l7policy.execute(self.l7policy_mock, + {'action': constants.L7POLICY_ACTION_REJECT}) + + repo.L7PolicyRepository.update.assert_called_once_with( + 'TEST', + L7POLICY_ID, + action=constants.L7POLICY_ACTION_REJECT) + + # Test the revert + mock_l7policy_repo_update.reset_mock() + update_l7policy.revert(self.l7policy_mock) + + repo.L7PolicyRepository.update.assert_called_once_with( + 'TEST', + L7POLICY_ID, + provisioning_status=constants.ERROR) + + # Test the revert + mock_l7policy_repo_update.reset_mock() + mock_l7policy_repo_update.side_effect = Exception('fail') + update_l7policy.revert(self.l7policy_mock) + + repo.L7PolicyRepository.update.assert_called_once_with( + 'TEST', + L7POLICY_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.L7RuleRepository.update') + @mock.patch('octavia.db.repositories.L7PolicyRepository.update') + def test_update_l7rule_in_db(self, + mock_l7rule_repo_update, + mock_l7policy_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + update_l7rule = database_tasks.UpdateL7RuleInDB() + update_l7rule.execute( + self.l7rule_mock, + {'type': constants.L7RULE_TYPE_PATH, + 'compare_type': constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + 'value': '/api'}) + + repo.L7RuleRepository.update.assert_called_once_with( + 'TEST', + L7RULE_ID, + type=constants.L7RULE_TYPE_PATH, + compare_type=constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + value='/api') + + # Test the revert + mock_l7rule_repo_update.reset_mock() + update_l7rule.revert(self.l7rule_mock) + + repo.L7PolicyRepository.update.assert_called_once_with( + 'TEST', + L7POLICY_ID, + provisioning_status=constants.ERROR) + + # Test the revert + mock_l7rule_repo_update.reset_mock() + mock_l7rule_repo_update.side_effect = Exception('fail') + update_l7rule.revert(self.l7rule_mock) + + repo.L7PolicyRepository.update.assert_called_once_with( + 'TEST', + L7POLICY_ID, + provisioning_status=constants.ERROR) + + def test_get_amphora_details(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + get_amp_details = database_tasks.GetAmphoraDetails() + new_amp = get_amp_details.execute(_amphora_mock) + + self.assertEqual(AMP_ID, new_amp.id) + self.assertEqual(VRRP_IP, new_amp.vrrp_ip) + self.assertEqual(HA_IP, new_amp.ha_ip) + self.assertEqual(VRRP_PORT_ID, new_amp.vrrp_port_id) + self.assertEqual(AMP_ROLE, new_amp.role) + self.assertEqual(VRRP_ID, new_amp.vrrp_id) + self.assertEqual(VRRP_PRIORITY, new_amp.vrrp_priority) + + def test_mark_amphora_role_indb(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_amp_master_indb = database_tasks.MarkAmphoraMasterInDB() + mark_amp_master_indb.execute(_amphora_mock) + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', AMP_ID, role='MASTER', + vrrp_priority=constants.ROLE_MASTER_PRIORITY) + + mock_amphora_repo_update.reset_mock() + + mark_amp_master_indb.revert("BADRESULT", _amphora_mock) + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', AMP_ID, role=None, vrrp_priority=None) + + mock_amphora_repo_update.reset_mock() + + failure_obj = failure.Failure.from_exception(Exception("TESTEXCEPT")) + mark_amp_master_indb.revert(failure_obj, _amphora_mock) + self.assertFalse(repo.AmphoraRepository.update.called) + + mock_amphora_repo_update.reset_mock() + + mark_amp_backup_indb = database_tasks.MarkAmphoraBackupInDB() + mark_amp_backup_indb.execute(_amphora_mock) + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', AMP_ID, role='BACKUP', + vrrp_priority=constants.ROLE_BACKUP_PRIORITY) + + mock_amphora_repo_update.reset_mock() + + mark_amp_backup_indb.revert("BADRESULT", _amphora_mock) + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', AMP_ID, role=None, vrrp_priority=None) + + mock_amphora_repo_update.reset_mock() + + mark_amp_standalone_indb = database_tasks.MarkAmphoraStandAloneInDB() + mark_amp_standalone_indb.execute(_amphora_mock) + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', AMP_ID, role='STANDALONE', + vrrp_priority=None) + + mock_amphora_repo_update.reset_mock() + + mark_amp_standalone_indb.revert("BADRESULT", _amphora_mock) + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', AMP_ID, role=None, vrrp_priority=None) + + # Test revert with exception + mock_amphora_repo_update.reset_mock() + mock_amphora_repo_update.side_effect = Exception('fail') + mark_amp_standalone_indb.revert("BADRESULT", _amphora_mock) + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', AMP_ID, role=None, vrrp_priority=None) + + @mock.patch('octavia.db.repositories.AmphoraRepository.get') + def test_get_amphorae_from_loadbalancer(self, + mock_amphora_get, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + amp1 = mock.MagicMock() + amp1.id = uuidutils.generate_uuid() + amp2 = mock.MagicMock() + amp2.id = uuidutils.generate_uuid() + lb = mock.MagicMock() + lb.amphorae = [amp1, amp2] + + mock_amphora_get.side_effect = [_amphora_mock, None] + + get_amps_from_lb_obj = database_tasks.GetAmphoraeFromLoadbalancer() + result = get_amps_from_lb_obj.execute(lb) + self.assertEqual([_amphora_mock], result) + + @mock.patch('octavia.db.repositories.ListenerRepository.get') + def test_get_listeners_from_loadbalancer(self, + mock_listener_get, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + mock_listener_get.return_value = _listener_mock + _loadbalancer_mock.listeners = [_listener_mock] + get_list_from_lb_obj = database_tasks.GetListenersFromLoadbalancer() + result = get_list_from_lb_obj.execute(_loadbalancer_mock) + mock_listener_get.assert_called_once_with('TEST', id=_listener_mock.id) + self.assertEqual([_listener_mock], result) + + def test_get_vip_from_loadbalancer(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + _loadbalancer_mock.vip = _vip_mock + get_vip_from_lb_obj = database_tasks.GetVipFromLoadbalancer() + result = get_vip_from_lb_obj.execute(_loadbalancer_mock) + self.assertEqual(_vip_mock, result) + + @mock.patch('octavia.db.repositories.VRRPGroupRepository.create') + def test_create_vrrp_group_for_lb(self, + mock_vrrp_group_create, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mock_get_session.side_effect = ['TEST', + odb_exceptions.DBDuplicateEntry] + create_vrrp_group = database_tasks.CreateVRRPGroupForLB() + create_vrrp_group.execute(_loadbalancer_mock) + mock_vrrp_group_create.assert_called_once_with( + 'TEST', load_balancer_id=LB_ID, + vrrp_group_name=LB_ID.replace('-', ''), + vrrp_auth_type=constants.VRRP_AUTH_DEFAULT, + vrrp_auth_pass=mock_generate_uuid.return_value.replace('-', + '')[0:7], + advert_int=1) + create_vrrp_group.execute(_loadbalancer_mock) + + @mock.patch('octavia.db.repositories.AmphoraHealthRepository.delete') + def test_disable_amphora_health_monitoring(self, + mock_amp_health_repo_delete, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + disable_amp_health = database_tasks.DisableAmphoraHealthMonitoring() + disable_amp_health.execute(_amphora_mock) + mock_amp_health_repo_delete.assert_called_once_with( + 'TEST', amphora_id=AMP_ID) + + @mock.patch('octavia.db.repositories.AmphoraHealthRepository.delete') + def test_disable_lb_amphorae_health_monitoring( + self, + mock_amp_health_repo_delete, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + disable_amp_health = ( + database_tasks.DisableLBAmphoraeHealthMonitoring()) + disable_amp_health.execute(_loadbalancer_mock) + mock_amp_health_repo_delete.assert_called_once_with( + 'TEST', amphora_id=AMP_ID) + + @mock.patch('octavia.db.repositories.AmphoraHealthRepository.update') + def test_mark_amphora_health_monitoring_busy(self, + mock_amp_health_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + mark_busy = database_tasks.MarkAmphoraHealthBusy() + mark_busy.execute(_amphora_mock) + mock_amp_health_repo_update.assert_called_once_with( + 'TEST', amphora_id=AMP_ID, busy=True) + + @mock.patch('octavia.db.repositories.AmphoraHealthRepository.update') + def test_mark_lb_amphorae_health_monitoring_busy( + self, + mock_amp_health_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + mark_busy = ( + database_tasks.MarkLBAmphoraeHealthBusy()) + mark_busy.execute(_loadbalancer_mock) + mock_amp_health_repo_update.assert_called_once_with( + 'TEST', amphora_id=AMP_ID, busy=True) + + def test_update_lb_server_group_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + update_server_group_info = database_tasks.UpdateLBServerGroupInDB() + update_server_group_info.execute(LB_ID, SERVER_GROUP_ID) + + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + id=LB_ID, + server_group_id=SERVER_GROUP_ID) + + # Test the revert + mock_listener_repo_update.reset_mock() + update_server_group_info.revert(LB_ID, SERVER_GROUP_ID) + + # Test the revert with exception + mock_listener_repo_update.reset_mock() + mock_loadbalancer_repo_update.side_effect = Exception('fail') + update_server_group_info.revert(LB_ID, SERVER_GROUP_ID) + + @mock.patch('octavia.db.repositories.HealthMonitorRepository.update') + def test_mark_health_mon_active_in_db(self, + mock_health_mon_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_health_mon_active = (database_tasks.MarkHealthMonitorActiveInDB()) + mark_health_mon_active.execute(self.health_mon_mock) + + mock_health_mon_repo_update.assert_called_once_with( + 'TEST', + HM_ID, + operating_status=constants.ONLINE, + provisioning_status=constants.ACTIVE) + + # Test the revert + mock_health_mon_repo_update.reset_mock() + mark_health_mon_active.revert(self.health_mon_mock) + + mock_health_mon_repo_update.assert_called_once_with( + 'TEST', + id=HM_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_health_mon_repo_update.reset_mock() + mock_health_mon_repo_update.side_effect = Exception('fail') + mark_health_mon_active.revert(self.health_mon_mock) + + mock_health_mon_repo_update.assert_called_once_with( + 'TEST', + id=HM_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.HealthMonitorRepository.update') + def test_mark_health_mon_pending_create_in_db( + self, + mock_health_mon_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_health_mon_pending_create = (database_tasks. + MarkHealthMonitorPendingCreateInDB()) + mark_health_mon_pending_create.execute(self.health_mon_mock) + + mock_health_mon_repo_update.assert_called_once_with( + 'TEST', + HM_ID, + provisioning_status=constants.PENDING_CREATE) + + # Test the revert + mock_health_mon_repo_update.reset_mock() + mark_health_mon_pending_create.revert(self.health_mon_mock) + + mock_health_mon_repo_update.assert_called_once_with( + 'TEST', + id=HM_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_health_mon_repo_update.reset_mock() + mock_health_mon_repo_update.side_effect = Exception('fail') + mark_health_mon_pending_create.revert(self.health_mon_mock) + + mock_health_mon_repo_update.assert_called_once_with( + 'TEST', + id=HM_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.HealthMonitorRepository.update') + def test_mark_health_mon_pending_delete_in_db( + self, + mock_health_mon_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_health_mon_pending_delete = (database_tasks. + MarkHealthMonitorPendingDeleteInDB()) + mark_health_mon_pending_delete.execute(self.health_mon_mock) + + mock_health_mon_repo_update.assert_called_once_with( + 'TEST', + HM_ID, + provisioning_status=constants.PENDING_DELETE) + + # Test the revert + mock_health_mon_repo_update.reset_mock() + mark_health_mon_pending_delete.revert(self.health_mon_mock) + + mock_health_mon_repo_update.assert_called_once_with( + 'TEST', + id=HM_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_health_mon_repo_update.reset_mock() + mock_health_mon_repo_update.side_effect = Exception('fail') + mark_health_mon_pending_delete.revert(self.health_mon_mock) + + mock_health_mon_repo_update.assert_called_once_with( + 'TEST', + id=HM_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.HealthMonitorRepository.update') + def test_mark_health_mon_pending_update_in_db( + self, + mock_health_mon_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_health_mon_pending_update = (database_tasks. + MarkHealthMonitorPendingUpdateInDB()) + mark_health_mon_pending_update.execute(self.health_mon_mock) + + mock_health_mon_repo_update.assert_called_once_with( + 'TEST', + HM_ID, + provisioning_status=constants.PENDING_UPDATE) + + # Test the revert + mock_health_mon_repo_update.reset_mock() + mark_health_mon_pending_update.revert(self.health_mon_mock) + + mock_health_mon_repo_update.assert_called_once_with( + 'TEST', + id=HM_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_health_mon_repo_update.reset_mock() + mock_health_mon_repo_update.side_effect = Exception('fail') + mark_health_mon_pending_update.revert(self.health_mon_mock) + + mock_health_mon_repo_update.assert_called_once_with( + 'TEST', + id=HM_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.L7PolicyRepository.update') + def test_mark_l7policy_active_in_db(self, + mock_l7policy_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_l7policy_active = (database_tasks.MarkL7PolicyActiveInDB()) + mark_l7policy_active.execute(self.l7policy_mock) + + mock_l7policy_repo_update.assert_called_once_with( + 'TEST', + L7POLICY_ID, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE) + + # Test the revert + mock_l7policy_repo_update.reset_mock() + mark_l7policy_active.revert(self.l7policy_mock) + + mock_l7policy_repo_update.assert_called_once_with( + 'TEST', + id=L7POLICY_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_l7policy_repo_update.reset_mock() + mock_l7policy_repo_update.side_effect = Exception('fail') + mark_l7policy_active.revert(self.l7policy_mock) + + mock_l7policy_repo_update.assert_called_once_with( + 'TEST', + id=L7POLICY_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.L7PolicyRepository.update') + def test_mark_l7policy_pending_create_in_db(self, + mock_l7policy_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_l7policy_pending_create = (database_tasks. + MarkL7PolicyPendingCreateInDB()) + mark_l7policy_pending_create.execute(self.l7policy_mock) + + mock_l7policy_repo_update.assert_called_once_with( + 'TEST', + L7POLICY_ID, + provisioning_status=constants.PENDING_CREATE) + + # Test the revert + mock_l7policy_repo_update.reset_mock() + mark_l7policy_pending_create.revert(self.l7policy_mock) + + mock_l7policy_repo_update.assert_called_once_with( + 'TEST', + id=L7POLICY_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_l7policy_repo_update.reset_mock() + mock_l7policy_repo_update.side_effect = Exception('fail') + mark_l7policy_pending_create.revert(self.l7policy_mock) + + mock_l7policy_repo_update.assert_called_once_with( + 'TEST', + id=L7POLICY_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.L7PolicyRepository.update') + def test_mark_l7policy_pending_delete_in_db(self, + mock_l7policy_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_l7policy_pending_delete = (database_tasks. + MarkL7PolicyPendingDeleteInDB()) + mark_l7policy_pending_delete.execute(self.l7policy_mock) + + mock_l7policy_repo_update.assert_called_once_with( + 'TEST', + L7POLICY_ID, + provisioning_status=constants.PENDING_DELETE) + + # Test the revert + mock_l7policy_repo_update.reset_mock() + mark_l7policy_pending_delete.revert(self.l7policy_mock) + + mock_l7policy_repo_update.assert_called_once_with( + 'TEST', + id=L7POLICY_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_l7policy_repo_update.reset_mock() + mock_l7policy_repo_update.side_effect = Exception('fail') + mark_l7policy_pending_delete.revert(self.l7policy_mock) + + mock_l7policy_repo_update.assert_called_once_with( + 'TEST', + id=L7POLICY_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.L7PolicyRepository.update') + def test_mark_l7policy_pending_update_in_db(self, + mock_l7policy_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_l7policy_pending_update = (database_tasks. + MarkL7PolicyPendingUpdateInDB()) + mark_l7policy_pending_update.execute(self.l7policy_mock) + + mock_l7policy_repo_update.assert_called_once_with( + 'TEST', + L7POLICY_ID, + provisioning_status=constants.PENDING_UPDATE) + + # Test the revert + mock_l7policy_repo_update.reset_mock() + mark_l7policy_pending_update.revert(self.l7policy_mock) + + mock_l7policy_repo_update.assert_called_once_with( + 'TEST', + id=L7POLICY_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_l7policy_repo_update.reset_mock() + mock_l7policy_repo_update.side_effect = Exception('fail') + mark_l7policy_pending_update.revert(self.l7policy_mock) + + mock_l7policy_repo_update.assert_called_once_with( + 'TEST', + id=L7POLICY_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.L7RuleRepository.update') + def test_mark_l7rule_active_in_db(self, + mock_l7rule_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_l7rule_active = (database_tasks.MarkL7RuleActiveInDB()) + mark_l7rule_active.execute(self.l7rule_mock) + + mock_l7rule_repo_update.assert_called_once_with( + 'TEST', + L7RULE_ID, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE) + + # Test the revert + mock_l7rule_repo_update.reset_mock() + mark_l7rule_active.revert(self.l7rule_mock) + + mock_l7rule_repo_update.assert_called_once_with( + 'TEST', + id=L7RULE_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_l7rule_repo_update.reset_mock() + mock_l7rule_repo_update.side_effect = Exception('fail') + mark_l7rule_active.revert(self.l7rule_mock) + + mock_l7rule_repo_update.assert_called_once_with( + 'TEST', + id=L7RULE_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.L7RuleRepository.update') + def test_mark_l7rule_pending_create_in_db(self, + mock_l7rule_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_l7rule_pending_create = (database_tasks. + MarkL7RulePendingCreateInDB()) + mark_l7rule_pending_create.execute(self.l7rule_mock) + + mock_l7rule_repo_update.assert_called_once_with( + 'TEST', + L7RULE_ID, + provisioning_status=constants.PENDING_CREATE) + + # Test the revert + mock_l7rule_repo_update.reset_mock() + mark_l7rule_pending_create.revert(self.l7rule_mock) + + mock_l7rule_repo_update.assert_called_once_with( + 'TEST', + id=L7RULE_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_l7rule_repo_update.reset_mock() + mock_l7rule_repo_update.side_effect = Exception('fail') + mark_l7rule_pending_create.revert(self.l7rule_mock) + + mock_l7rule_repo_update.assert_called_once_with( + 'TEST', + id=L7RULE_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.L7RuleRepository.update') + def test_mark_l7rule_pending_delete_in_db(self, + mock_l7rule_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_l7rule_pending_delete = (database_tasks. + MarkL7RulePendingDeleteInDB()) + mark_l7rule_pending_delete.execute(self.l7rule_mock) + + mock_l7rule_repo_update.assert_called_once_with( + 'TEST', + L7RULE_ID, + provisioning_status=constants.PENDING_DELETE) + + # Test the revert + mock_l7rule_repo_update.reset_mock() + mark_l7rule_pending_delete.revert(self.l7rule_mock) + + mock_l7rule_repo_update.assert_called_once_with( + 'TEST', + id=L7RULE_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_l7rule_repo_update.reset_mock() + mock_l7rule_repo_update.side_effect = Exception('fail') + mark_l7rule_pending_delete.revert(self.l7rule_mock) + + mock_l7rule_repo_update.assert_called_once_with( + 'TEST', + id=L7RULE_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.L7RuleRepository.update') + def test_mark_l7rule_pending_update_in_db(self, + mock_l7rule_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_l7rule_pending_update = (database_tasks. + MarkL7RulePendingUpdateInDB()) + mark_l7rule_pending_update.execute(self.l7rule_mock) + + mock_l7rule_repo_update.assert_called_once_with( + 'TEST', + L7RULE_ID, + provisioning_status=constants.PENDING_UPDATE) + + # Test the revert + mock_l7rule_repo_update.reset_mock() + mark_l7rule_pending_update.revert(self.l7rule_mock) + + mock_l7rule_repo_update.assert_called_once_with( + 'TEST', + id=L7RULE_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_l7rule_repo_update.reset_mock() + mock_l7rule_repo_update.side_effect = Exception('fail') + mark_l7rule_pending_update.revert(self.l7rule_mock) + + mock_l7rule_repo_update.assert_called_once_with( + 'TEST', + id=L7RULE_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.MemberRepository.update') + def test_mark_member_active_in_db(self, + mock_member_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_member_active = (database_tasks.MarkMemberActiveInDB()) + mark_member_active.execute(self.member_mock) + + mock_member_repo_update.assert_called_once_with( + 'TEST', + MEMBER_ID, + provisioning_status=constants.ACTIVE) + + # Test the revert + mock_member_repo_update.reset_mock() + mark_member_active.revert(self.member_mock) + + mock_member_repo_update.assert_called_once_with( + 'TEST', + id=MEMBER_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_member_repo_update.reset_mock() + mock_member_repo_update.side_effect = Exception('fail') + mark_member_active.revert(self.member_mock) + + mock_member_repo_update.assert_called_once_with( + 'TEST', + id=MEMBER_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.MemberRepository.update') + def test_mark_member_pending_create_in_db(self, + mock_member_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_member_pending_create = (database_tasks. + MarkMemberPendingCreateInDB()) + mark_member_pending_create.execute(self.member_mock) + + mock_member_repo_update.assert_called_once_with( + 'TEST', + MEMBER_ID, + provisioning_status=constants.PENDING_CREATE) + + # Test the revert + mock_member_repo_update.reset_mock() + mark_member_pending_create.revert(self.member_mock) + + mock_member_repo_update.assert_called_once_with( + 'TEST', + id=MEMBER_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_member_repo_update.reset_mock() + mock_member_repo_update.side_effect = Exception('fail') + mark_member_pending_create.revert(self.member_mock) + + mock_member_repo_update.assert_called_once_with( + 'TEST', + id=MEMBER_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.MemberRepository.update') + def test_mark_member_pending_delete_in_db(self, + mock_member_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_member_pending_delete = (database_tasks. + MarkMemberPendingDeleteInDB()) + mark_member_pending_delete.execute(self.member_mock) + + mock_member_repo_update.assert_called_once_with( + 'TEST', + MEMBER_ID, + provisioning_status=constants.PENDING_DELETE) + + # Test the revert + mock_member_repo_update.reset_mock() + mark_member_pending_delete.revert(self.member_mock) + + mock_member_repo_update.assert_called_once_with( + 'TEST', + id=MEMBER_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_member_repo_update.reset_mock() + mock_member_repo_update.side_effect = Exception('fail') + mark_member_pending_delete.revert(self.member_mock) + + mock_member_repo_update.assert_called_once_with( + 'TEST', + id=MEMBER_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.MemberRepository.update') + def test_mark_member_pending_update_in_db(self, + mock_member_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_member_pending_update = (database_tasks. + MarkMemberPendingUpdateInDB()) + mark_member_pending_update.execute(self.member_mock) + + mock_member_repo_update.assert_called_once_with( + 'TEST', + MEMBER_ID, + provisioning_status=constants.PENDING_UPDATE) + + # Test the revert + mock_member_repo_update.reset_mock() + mark_member_pending_update.revert(self.member_mock) + + mock_member_repo_update.assert_called_once_with( + 'TEST', + id=MEMBER_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_member_repo_update.reset_mock() + mock_member_repo_update.side_effect = Exception('fail') + mark_member_pending_update.revert(self.member_mock) + + mock_member_repo_update.assert_called_once_with( + 'TEST', + id=MEMBER_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.PoolRepository.update') + def test_mark_pool_active_in_db(self, + mock_pool_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_pool_active = (database_tasks.MarkPoolActiveInDB()) + mark_pool_active.execute(self.pool_mock) + + mock_pool_repo_update.assert_called_once_with( + 'TEST', + POOL_ID, + provisioning_status=constants.ACTIVE) + + # Test the revert + mock_pool_repo_update.reset_mock() + mark_pool_active.revert(self.pool_mock) + + mock_pool_repo_update.assert_called_once_with( + 'TEST', + id=POOL_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_pool_repo_update.reset_mock() + mock_pool_repo_update.side_effect = Exception('fail') + mark_pool_active.revert(self.pool_mock) + + mock_pool_repo_update.assert_called_once_with( + 'TEST', + id=POOL_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.PoolRepository.update') + def test_mark_pool_pending_create_in_db(self, + mock_pool_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_pool_pending_create = (database_tasks.MarkPoolPendingCreateInDB()) + mark_pool_pending_create.execute(self.pool_mock) + + mock_pool_repo_update.assert_called_once_with( + 'TEST', + POOL_ID, + provisioning_status=constants.PENDING_CREATE) + + # Test the revert + mock_pool_repo_update.reset_mock() + mark_pool_pending_create.revert(self.pool_mock) + + mock_pool_repo_update.assert_called_once_with( + 'TEST', + id=POOL_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_pool_repo_update.reset_mock() + mock_pool_repo_update.side_effect = Exception('fail') + mark_pool_pending_create.revert(self.pool_mock) + + mock_pool_repo_update.assert_called_once_with( + 'TEST', + id=POOL_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.PoolRepository.update') + def test_mark_pool_pending_delete_in_db(self, + mock_pool_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_pool_pending_delete = (database_tasks.MarkPoolPendingDeleteInDB()) + mark_pool_pending_delete.execute(self.pool_mock) + + mock_pool_repo_update.assert_called_once_with( + 'TEST', + POOL_ID, + provisioning_status=constants.PENDING_DELETE) + + # Test the revert + mock_pool_repo_update.reset_mock() + mark_pool_pending_delete.revert(self.pool_mock) + + mock_pool_repo_update.assert_called_once_with( + 'TEST', + id=POOL_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_pool_repo_update.reset_mock() + mock_pool_repo_update.side_effect = Exception('fail') + mark_pool_pending_delete.revert(self.pool_mock) + + mock_pool_repo_update.assert_called_once_with( + 'TEST', + id=POOL_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.PoolRepository.update') + def test_mark_pool_pending_update_in_db(self, + mock_pool_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_pool_pending_update = (database_tasks. + MarkPoolPendingUpdateInDB()) + mark_pool_pending_update.execute(self.pool_mock) + + mock_pool_repo_update.assert_called_once_with( + 'TEST', + POOL_ID, + provisioning_status=constants.PENDING_UPDATE) + + # Test the revert + mock_pool_repo_update.reset_mock() + mark_pool_pending_update.revert(self.pool_mock) + + mock_pool_repo_update.assert_called_once_with( + 'TEST', + id=POOL_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_pool_repo_update.reset_mock() + mock_pool_repo_update.side_effect = Exception('fail') + mark_pool_pending_update.revert(self.pool_mock) + + mock_pool_repo_update.assert_called_once_with( + 'TEST', + id=POOL_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.MemberRepository.update_pool_members') + def test_update_pool_members_operating_status_in_db( + self, + mock_member_repo_update_pool_members, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + update_members = database_tasks.UpdatePoolMembersOperatingStatusInDB() + update_members.execute(self.pool_mock, constants.ONLINE) + + mock_member_repo_update_pool_members.assert_called_once_with( + 'TEST', + POOL_ID, + operating_status=constants.ONLINE) diff --git a/octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks_quota.py b/octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks_quota.py new file mode 100644 index 0000000000..576109d433 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks_quota.py @@ -0,0 +1,322 @@ +# Copyright 2017 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import mock +from oslo_utils import uuidutils +from taskflow.types import failure + +from octavia.common import data_models +from octavia.common import exceptions +from octavia.controller.worker.v2.tasks import database_tasks +import octavia.tests.unit.base as base + + +class TestDatabaseTasksQuota(base.TestCase): + + def setUp(self): + + self._tf_failure_mock = mock.Mock(spec=failure.Failure) + self.zero_pool_child_count = {'HM': 0, 'member': 0} + + super(TestDatabaseTasksQuota, self).setUp() + + @mock.patch('octavia.db.api.get_session', return_value='TEST') + @mock.patch('octavia.db.repositories.Repositories.decrement_quota') + @mock.patch('octavia.db.repositories.Repositories.check_quota_met') + def _test_decrement_quota(self, + task, + data_model, + mock_check_quota_met, + mock_decrement_quota, + mock_get_session): + + project_id = uuidutils.generate_uuid() + test_object = mock.MagicMock() + test_object.project_id = project_id + + # execute without exception + mock_decrement_quota.reset_mock() + with mock.patch('octavia.db.api.' + 'get_session') as mock_get_session_local: + mock_session = mock.MagicMock() + mock_get_session_local.return_value = mock_session + + if data_model == data_models.Pool: + task.execute(test_object, self.zero_pool_child_count) + else: + task.execute(test_object) + + mock_decrement_quota.assert_called_once_with( + mock_session, data_model, project_id) + + mock_session.commit.assert_called_once_with() + + # execute with exception + mock_decrement_quota.reset_mock() + with mock.patch('octavia.db.api.' + 'get_session') as mock_get_session_local: + mock_session = mock.MagicMock() + mock_get_session_local.return_value = mock_session + + mock_decrement_quota.side_effect = ( + exceptions.OctaviaException('fail')) + if data_model == data_models.Pool: + self.assertRaises(exceptions.OctaviaException, + task.execute, + test_object, + self.zero_pool_child_count) + else: + self.assertRaises(exceptions.OctaviaException, + task.execute, + test_object) + + mock_decrement_quota.assert_called_once_with( + mock_session, data_model, project_id) + + mock_session.rollback.assert_called_once_with() + + # revert with instance of failure + mock_get_session.reset_mock() + mock_check_quota_met.reset_mock() + if data_model == data_models.Pool: + task.revert(test_object, + self.zero_pool_child_count, + self._tf_failure_mock) + else: + task.revert(test_object, self._tf_failure_mock) + self.assertFalse(mock_get_session.called) + self.assertFalse(mock_check_quota_met.called) + + # revert + mock_check_quota_met.reset_mock() + with mock.patch('octavia.db.api.' + 'get_session') as mock_get_session_local: + mock_session = mock.MagicMock() + mock_lock_session = mock.MagicMock() + mock_get_session_local.side_effect = [mock_session, + mock_lock_session] + + if data_model == data_models.Pool: + task.revert(test_object, self.zero_pool_child_count, None) + else: + task.revert(test_object, None) + + mock_check_quota_met.assert_called_once_with( + mock_session, mock_lock_session, data_model, + project_id) + + mock_lock_session.commit.assert_called_once_with() + + # revert with rollback + with mock.patch('octavia.db.api.' + 'get_session') as mock_get_session_local: + mock_session = mock.MagicMock() + mock_lock_session = mock.MagicMock() + mock_get_session_local.side_effect = [mock_session, + mock_lock_session] + mock_check_quota_met.side_effect = ( + exceptions.OctaviaException('fail')) + + if data_model == data_models.Pool: + task.revert(test_object, self.zero_pool_child_count, None) + else: + task.revert(test_object, None) + + mock_lock_session.rollback.assert_called_once_with() + + # revert with db exception + mock_check_quota_met.reset_mock() + with mock.patch('octavia.db.api.' + 'get_session') as mock_get_session_local: + mock_get_session_local.side_effect = Exception('fail') + + if data_model == data_models.Pool: + task.revert(test_object, self.zero_pool_child_count, None) + else: + task.revert(test_object, None) + + self.assertFalse(mock_check_quota_met.called) + + def test_decrement_health_monitor_quota(self): + task = database_tasks.DecrementHealthMonitorQuota() + data_model = data_models.HealthMonitor + self._test_decrement_quota(task, data_model) + + def test_decrement_listener_quota(self): + task = database_tasks.DecrementListenerQuota() + data_model = data_models.Listener + self._test_decrement_quota(task, data_model) + + def test_decrement_loadbalancer_quota(self): + task = database_tasks.DecrementLoadBalancerQuota() + data_model = data_models.LoadBalancer + self._test_decrement_quota(task, data_model) + + def test_decrement_pool_quota(self): + task = database_tasks.DecrementPoolQuota() + data_model = data_models.Pool + self._test_decrement_quota(task, data_model) + + def test_decrement_member_quota(self): + task = database_tasks.DecrementMemberQuota() + data_model = data_models.Member + self._test_decrement_quota(task, data_model) + + @mock.patch('octavia.db.repositories.Repositories.decrement_quota') + @mock.patch('octavia.db.repositories.Repositories.check_quota_met') + def test_decrement_pool_quota_pool_children(self, + mock_check_quota_met, + mock_decrement_quota): + pool_child_count = {'HM': 1, 'member': 2} + project_id = uuidutils.generate_uuid() + test_object = mock.MagicMock() + test_object.project_id = project_id + task = database_tasks.DecrementPoolQuota() + mock_session = mock.MagicMock() + + with mock.patch('octavia.db.api.' + 'get_session') as mock_get_session_local: + mock_get_session_local.return_value = mock_session + + task.execute(test_object, pool_child_count) + + calls = [mock.call(mock_session, data_models.Pool, project_id), + mock.call(mock_session, data_models.HealthMonitor, + project_id), + mock.call(mock_session, data_models.Member, project_id, + quantity=2)] + + mock_decrement_quota.assert_has_calls(calls) + + mock_session.commit.assert_called_once_with() + + # revert + mock_session.reset_mock() + with mock.patch('octavia.db.api.' + 'get_session') as mock_get_session_local: + mock_lock_session = mock.MagicMock() + mock_get_session_local.side_effect = [mock_session, + mock_lock_session, + mock_lock_session, + mock_lock_session, + mock_lock_session] + + task.revert(test_object, pool_child_count, None) + + calls = [mock.call(mock_session, mock_lock_session, + data_models.Pool, project_id), + mock.call(mock_session, mock_lock_session, + data_models.HealthMonitor, project_id), + mock.call(mock_session, mock_lock_session, + data_models.Member, project_id), + mock.call(mock_session, mock_lock_session, + data_models.Member, project_id)] + + mock_check_quota_met.assert_has_calls(calls) + + self.assertEqual(4, mock_lock_session.commit.call_count) + + # revert with health monitor quota exception + mock_session.reset_mock() + mock_check_quota_met.side_effect = [None, Exception('fail'), None, + None] + with mock.patch('octavia.db.api.' + 'get_session') as mock_get_session_local: + mock_lock_session = mock.MagicMock() + mock_get_session_local.side_effect = [mock_session, + mock_lock_session, + mock_lock_session, + mock_lock_session, + mock_lock_session] + + task.revert(test_object, pool_child_count, None) + + calls = [mock.call(mock_session, mock_lock_session, + data_models.Pool, project_id), + mock.call(mock_session, mock_lock_session, + data_models.HealthMonitor, project_id), + mock.call(mock_session, mock_lock_session, + data_models.Member, project_id), + mock.call(mock_session, mock_lock_session, + data_models.Member, project_id)] + + mock_check_quota_met.assert_has_calls(calls) + + self.assertEqual(3, mock_lock_session.commit.call_count) + self.assertEqual(1, mock_lock_session.rollback.call_count) + + # revert with member quota exception + mock_session.reset_mock() + mock_check_quota_met.side_effect = [None, None, None, + Exception('fail')] + with mock.patch('octavia.db.api.' + 'get_session') as mock_get_session_local: + mock_lock_session = mock.MagicMock() + mock_get_session_local.side_effect = [mock_session, + mock_lock_session, + mock_lock_session, + mock_lock_session, + mock_lock_session] + + task.revert(test_object, pool_child_count, None) + + calls = [mock.call(mock_session, mock_lock_session, + data_models.Pool, project_id), + mock.call(mock_session, mock_lock_session, + data_models.HealthMonitor, project_id), + mock.call(mock_session, mock_lock_session, + data_models.Member, project_id), + mock.call(mock_session, mock_lock_session, + data_models.Member, project_id)] + + mock_check_quota_met.assert_has_calls(calls) + + self.assertEqual(3, mock_lock_session.commit.call_count) + self.assertEqual(1, mock_lock_session.rollback.call_count) + + def test_count_pool_children_for_quota(self): + project_id = uuidutils.generate_uuid() + member1 = data_models.Member(id=1, project_id=project_id) + member2 = data_models.Member(id=2, project_id=project_id) + healtmon = data_models.HealthMonitor(id=1, project_id=project_id) + pool_no_children = data_models.Pool(id=1, project_id=project_id) + pool_1_mem = data_models.Pool(id=1, project_id=project_id, + members=[member1]) + pool_hm = data_models.Pool(id=1, project_id=project_id, + health_monitor=healtmon) + pool_hm_2_mem = data_models.Pool(id=1, project_id=project_id, + health_monitor=healtmon, + members=[member1, member2]) + task = database_tasks.CountPoolChildrenForQuota() + + # Test pool with no children + result = task.execute(pool_no_children) + + self.assertEqual({'HM': 0, 'member': 0}, result) + + # Test pool with one member + result = task.execute(pool_1_mem) + + self.assertEqual({'HM': 0, 'member': 1}, result) + + # Test pool with health monitor and no members + result = task.execute(pool_hm) + + self.assertEqual({'HM': 1, 'member': 0}, result) + + # Test pool with health monitor and two members + result = task.execute(pool_hm_2_mem) + + self.assertEqual({'HM': 1, 'member': 2}, result) diff --git a/octavia/tests/unit/controller/worker/v2/tasks/test_lifecycle_tasks.py b/octavia/tests/unit/controller/worker/v2/tasks/test_lifecycle_tasks.py new file mode 100644 index 0000000000..8946ee3c31 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/tasks/test_lifecycle_tasks.py @@ -0,0 +1,401 @@ +# Copyright 2016 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_utils import uuidutils + +from octavia.controller.worker.v2.tasks import lifecycle_tasks +import octavia.tests.unit.base as base + + +class TestLifecycleTasks(base.TestCase): + + def setUp(self): + + self.AMPHORA = mock.MagicMock() + self.AMPHORA_ID = uuidutils.generate_uuid() + self.AMPHORA.id = self.AMPHORA_ID + self.HEALTH_MON = mock.MagicMock() + self.HEALTH_MON_ID = uuidutils.generate_uuid() + self.HEALTH_MON.pool_id = self.HEALTH_MON_ID + self.L7POLICY = mock.MagicMock() + self.L7POLICY_ID = uuidutils.generate_uuid() + self.L7POLICY.id = self.L7POLICY_ID + self.L7RULE = mock.MagicMock() + self.L7RULE_ID = uuidutils.generate_uuid() + self.L7RULE.id = self.L7RULE_ID + self.LISTENER = mock.MagicMock() + self.LISTENER_ID = uuidutils.generate_uuid() + self.LISTENER.id = self.LISTENER_ID + self.LISTENERS = [self.LISTENER] + self.LOADBALANCER = mock.MagicMock() + self.LOADBALANCER_ID = uuidutils.generate_uuid() + self.LOADBALANCER.id = self.LOADBALANCER_ID + self.LISTENER.load_balancer = self.LOADBALANCER + self.MEMBER = mock.MagicMock() + self.MEMBER_ID = uuidutils.generate_uuid() + self.MEMBER.id = self.MEMBER_ID + self.MEMBERS = [self.MEMBER] + self.POOL = mock.MagicMock() + self.POOL_ID = uuidutils.generate_uuid() + self.POOL.id = self.POOL_ID + + super(TestLifecycleTasks, self).setUp() + + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'unmark_amphora_health_busy') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_amphora_status_error') + def test_AmphoraIDToErrorOnRevertTask(self, mock_amp_status_error, + mock_amp_health_busy): + + amp_id_to_error_on_revert = (lifecycle_tasks. + AmphoraIDToErrorOnRevertTask()) + + # Execute + amp_id_to_error_on_revert.execute(self.AMPHORA_ID) + + self.assertFalse(mock_amp_status_error.called) + + # Revert + amp_id_to_error_on_revert.revert(self.AMPHORA_ID) + + mock_amp_status_error.assert_called_once_with(self.AMPHORA_ID) + self.assertFalse(mock_amp_health_busy.called) + + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'unmark_amphora_health_busy') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_amphora_status_error') + def test_AmphoraToErrorOnRevertTask(self, mock_amp_status_error, + mock_amp_health_busy): + + amp_to_error_on_revert = lifecycle_tasks.AmphoraToErrorOnRevertTask() + + # Execute + amp_to_error_on_revert.execute(self.AMPHORA) + + self.assertFalse(mock_amp_status_error.called) + + # Revert + amp_to_error_on_revert.revert(self.AMPHORA) + + mock_amp_status_error.assert_called_once_with(self.AMPHORA_ID) + self.assertFalse(mock_amp_health_busy.called) + + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_health_mon_prov_status_error') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_loadbalancer_prov_status_active') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_listener_prov_status_active') + def test_HealthMonitorToErrorOnRevertTask( + self, + mock_listener_prov_status_active, + mock_loadbalancer_prov_status_active, + mock_health_mon_prov_status_error): + + health_mon_to_error_on_revert = (lifecycle_tasks. + HealthMonitorToErrorOnRevertTask()) + + # Execute + health_mon_to_error_on_revert.execute(self.HEALTH_MON, + self.LISTENERS, + self.LOADBALANCER) + + self.assertFalse(mock_health_mon_prov_status_error.called) + + # Revert + health_mon_to_error_on_revert.revert(self.HEALTH_MON, + self.LISTENERS, + self.LOADBALANCER) + + mock_health_mon_prov_status_error.assert_called_once_with( + self.HEALTH_MON_ID) + mock_loadbalancer_prov_status_active.assert_called_once_with( + self.LOADBALANCER_ID) + mock_listener_prov_status_active.assert_called_once_with( + self.LISTENER_ID) + + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_l7policy_prov_status_error') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_loadbalancer_prov_status_active') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_listener_prov_status_active') + def test_L7PolicyToErrorOnRevertTask( + self, + mock_listener_prov_status_active, + mock_loadbalancer_prov_status_active, + mock_l7policy_prov_status_error): + + l7policy_to_error_on_revert = (lifecycle_tasks. + L7PolicyToErrorOnRevertTask()) + + # Execute + l7policy_to_error_on_revert.execute(self.L7POLICY, + self.LISTENERS, + self.LOADBALANCER) + + self.assertFalse(mock_l7policy_prov_status_error.called) + + # Revert + l7policy_to_error_on_revert.revert(self.L7POLICY, + self.LISTENERS, + self.LOADBALANCER) + + mock_l7policy_prov_status_error.assert_called_once_with( + self.L7POLICY_ID) + mock_loadbalancer_prov_status_active.assert_called_once_with( + self.LOADBALANCER_ID) + mock_listener_prov_status_active.assert_called_once_with( + self.LISTENER_ID) + + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_l7rule_prov_status_error') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_loadbalancer_prov_status_active') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_listener_prov_status_active') + def test_L7RuleToErrorOnRevertTask( + self, + mock_listener_prov_status_active, + mock_loadbalancer_prov_status_active, + mock_l7rule_prov_status_error): + + l7rule_to_error_on_revert = (lifecycle_tasks. + L7RuleToErrorOnRevertTask()) + + # Execute + l7rule_to_error_on_revert.execute(self.L7RULE, + self.LISTENERS, + self.LOADBALANCER) + + self.assertFalse(mock_l7rule_prov_status_error.called) + + # Revert + l7rule_to_error_on_revert.revert(self.L7RULE, + self.LISTENERS, + self.LOADBALANCER) + + mock_l7rule_prov_status_error.assert_called_once_with( + self.L7RULE_ID) + mock_loadbalancer_prov_status_active.assert_called_once_with( + self.LOADBALANCER_ID) + mock_listener_prov_status_active.assert_called_once_with( + self.LISTENER_ID) + + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_loadbalancer_prov_status_active') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_listener_prov_status_error') + def test_ListenerToErrorOnRevertTask( + self, + mock_listener_prov_status_error, + mock_loadbalancer_prov_status_active): + + listener_to_error_on_revert = (lifecycle_tasks. + ListenerToErrorOnRevertTask()) + + # Execute + listener_to_error_on_revert.execute(self.LISTENER) + + self.assertFalse(mock_listener_prov_status_error.called) + + # Revert + listener_to_error_on_revert.revert(self.LISTENER) + + mock_listener_prov_status_error.assert_called_once_with( + self.LISTENER_ID) + mock_loadbalancer_prov_status_active.assert_called_once_with( + self.LOADBALANCER_ID) + + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_loadbalancer_prov_status_active') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_listener_prov_status_error') + def test_ListenersToErrorOnRevertTask( + self, + mock_listener_prov_status_error, + mock_loadbalancer_prov_status_active): + + listeners_to_error_on_revert = (lifecycle_tasks. + ListenersToErrorOnRevertTask()) + + # Execute + listeners_to_error_on_revert.execute(self.LISTENERS, + self.LOADBALANCER) + + self.assertFalse(mock_listener_prov_status_error.called) + + # Revert + listeners_to_error_on_revert.revert(self.LISTENERS, + self.LOADBALANCER) + + mock_listener_prov_status_error.assert_called_once_with( + self.LISTENER_ID) + mock_loadbalancer_prov_status_active.assert_called_once_with( + self.LOADBALANCER_ID) + + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_loadbalancer_prov_status_error') + def test_LoadBalancerIDToErrorOnRevertTask( + self, + mock_loadbalancer_prov_status_error): + + loadbalancer_id_to_error_on_revert = ( + lifecycle_tasks.LoadBalancerIDToErrorOnRevertTask()) + + # Execute + loadbalancer_id_to_error_on_revert.execute(self.LOADBALANCER_ID) + + self.assertFalse(mock_loadbalancer_prov_status_error.called) + + # Revert + loadbalancer_id_to_error_on_revert.revert(self.LOADBALANCER_ID) + + mock_loadbalancer_prov_status_error.assert_called_once_with( + self.LOADBALANCER_ID) + + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_loadbalancer_prov_status_error') + def test_LoadBalancerToErrorOnRevertTask( + self, + mock_loadbalancer_prov_status_error): + + loadbalancer_to_error_on_revert = ( + lifecycle_tasks.LoadBalancerToErrorOnRevertTask()) + + # Execute + loadbalancer_to_error_on_revert.execute(self.LOADBALANCER) + + self.assertFalse(mock_loadbalancer_prov_status_error.called) + + # Revert + loadbalancer_to_error_on_revert.revert(self.LOADBALANCER) + + mock_loadbalancer_prov_status_error.assert_called_once_with( + self.LOADBALANCER_ID) + + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_member_prov_status_error') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_loadbalancer_prov_status_active') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_listener_prov_status_active') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_pool_prov_status_active') + def test_MemberToErrorOnRevertTask( + self, + mock_pool_prov_status_active, + mock_listener_prov_status_active, + mock_loadbalancer_prov_status_active, + mock_member_prov_status_error): + member_to_error_on_revert = lifecycle_tasks.MemberToErrorOnRevertTask() + + # Execute + member_to_error_on_revert.execute(self.MEMBER, + self.LISTENERS, + self.LOADBALANCER, + self.POOL) + + self.assertFalse(mock_member_prov_status_error.called) + + # Revert + member_to_error_on_revert.revert(self.MEMBER, + self.LISTENERS, + self.LOADBALANCER, + self.POOL) + + mock_member_prov_status_error.assert_called_once_with( + self.MEMBER_ID) + mock_loadbalancer_prov_status_active.assert_called_once_with( + self.LOADBALANCER_ID) + mock_listener_prov_status_active.assert_called_once_with( + self.LISTENER_ID) + mock_pool_prov_status_active.assert_called_once_with( + self.POOL_ID) + + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_member_prov_status_error') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_loadbalancer_prov_status_active') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_listener_prov_status_active') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_pool_prov_status_active') + def test_MembersToErrorOnRevertTask( + self, + mock_pool_prov_status_active, + mock_listener_prov_status_active, + mock_loadbalancer_prov_status_active, + mock_member_prov_status_error): + members_to_error_on_revert = ( + lifecycle_tasks.MembersToErrorOnRevertTask()) + + # Execute + members_to_error_on_revert.execute(self.MEMBERS, + self.LISTENERS, + self.LOADBALANCER, + self.POOL) + + self.assertFalse(mock_member_prov_status_error.called) + + # Revert + members_to_error_on_revert.revert(self.MEMBERS, + self.LISTENERS, + self.LOADBALANCER, + self.POOL) + + mock_member_prov_status_error.assert_called_once_with( + self.MEMBER_ID) + mock_loadbalancer_prov_status_active.assert_called_once_with( + self.LOADBALANCER_ID) + mock_listener_prov_status_active.assert_called_once_with( + self.LISTENER_ID) + mock_pool_prov_status_active.assert_called_once_with( + self.POOL_ID) + + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_pool_prov_status_error') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_loadbalancer_prov_status_active') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_listener_prov_status_active') + def test_PoolToErrorOnRevertTask( + self, + mock_listener_prov_status_active, + mock_loadbalancer_prov_status_active, + mock_pool_prov_status_error): + + pool_to_error_on_revert = lifecycle_tasks.PoolToErrorOnRevertTask() + + # Execute + pool_to_error_on_revert.execute(self.POOL, + self.LISTENERS, + self.LOADBALANCER) + + self.assertFalse(mock_pool_prov_status_error.called) + + # Revert + pool_to_error_on_revert.revert(self.POOL, + self.LISTENERS, + self.LOADBALANCER) + + mock_pool_prov_status_error.assert_called_once_with( + self.POOL_ID) + mock_loadbalancer_prov_status_active.assert_called_once_with( + self.LOADBALANCER_ID) + mock_listener_prov_status_active.assert_called_once_with( + self.LISTENER_ID) diff --git a/octavia/tests/unit/controller/worker/v2/tasks/test_model_tasks.py b/octavia/tests/unit/controller/worker/v2/tasks/test_model_tasks.py new file mode 100644 index 0000000000..02e76b5283 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/tasks/test_model_tasks.py @@ -0,0 +1,44 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import mock + +from octavia.controller.worker.v2.tasks import model_tasks +import octavia.tests.unit.base as base + + +class TestObjectUpdateTasks(base.TestCase): + + def setUp(self): + + self.listener_mock = mock.MagicMock() + self.listener_mock.name = 'TEST' + + super(TestObjectUpdateTasks, self).setUp() + + def test_delete_model_object(self): + + delete_object = model_tasks.DeleteModelObject() + delete_object.execute(self.listener_mock) + + self.listener_mock.delete.assert_called_once_with() + + def test_update_listener(self): + + update_attr = model_tasks.UpdateAttributes() + update_attr.execute(self.listener_mock, + {'name': 'TEST2'}) + + self.listener_mock.update.assert_called_once_with({'name': 'TEST2'}) diff --git a/octavia/tests/unit/controller/worker/v2/tasks/test_network_tasks.py b/octavia/tests/unit/controller/worker/v2/tasks/test_network_tasks.py new file mode 100644 index 0000000000..d15392fe8e --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/tasks/test_network_tasks.py @@ -0,0 +1,801 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import mock +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils +from taskflow.types import failure + +from octavia.common import constants +from octavia.common import data_models as o_data_models +from octavia.controller.worker.v2.tasks import network_tasks +from octavia.network import base as net_base +from octavia.network import data_models +from octavia.tests.common import constants as t_constants +import octavia.tests.unit.base as base + + +AMPHORA_ID = 7 +COMPUTE_ID = uuidutils.generate_uuid() +PORT_ID = uuidutils.generate_uuid() +SUBNET_ID = uuidutils.generate_uuid() +NETWORK_ID = uuidutils.generate_uuid() +IP_ADDRESS = "172.24.41.1" +VIP = o_data_models.Vip(port_id=t_constants.MOCK_PORT_ID, + subnet_id=t_constants.MOCK_SUBNET_ID, + qos_policy_id=t_constants.MOCK_QOS_POLICY_ID1) +VIP2 = o_data_models.Vip(port_id=t_constants.MOCK_PORT_ID2, + subnet_id=t_constants.MOCK_SUBNET_ID2, + qos_policy_id=t_constants.MOCK_QOS_POLICY_ID2) +LB = o_data_models.LoadBalancer(vip=VIP) +LB2 = o_data_models.LoadBalancer(vip=VIP2) +FIRST_IP = {"ip_address": IP_ADDRESS, "subnet_id": SUBNET_ID} +FIXED_IPS = [FIRST_IP] +INTERFACE = data_models.Interface(id=uuidutils.generate_uuid(), + compute_id=COMPUTE_ID, fixed_ips=FIXED_IPS, + port_id=PORT_ID) +AMPS_DATA = [o_data_models.Amphora(id=t_constants.MOCK_AMP_ID1, + vrrp_port_id=t_constants.MOCK_VRRP_PORT_ID1, + vrrp_ip=t_constants.MOCK_VRRP_IP1), + o_data_models.Amphora(id=t_constants.MOCK_AMP_ID2, + vrrp_port_id=t_constants.MOCK_VRRP_PORT_ID2, + vrrp_ip=t_constants.MOCK_VRRP_IP2) + ] +UPDATE_DICT = {constants.TOPOLOGY: None} + + +class TestException(Exception): + + def __init__(self, value): + self.value = value + + def __str__(self): + return repr(self.value) + + +@mock.patch('octavia.common.utils.get_network_driver') +class TestNetworkTasks(base.TestCase): + def setUp(self): + network_tasks.LOG = mock.MagicMock() + self.amphora_mock = mock.MagicMock() + self.load_balancer_mock = mock.MagicMock() + self.vip_mock = mock.MagicMock() + self.vip_mock.subnet_id = SUBNET_ID + self.load_balancer_mock.vip = self.vip_mock + self.load_balancer_mock.amphorae = [] + self.amphora_mock.id = AMPHORA_ID + self.amphora_mock.compute_id = COMPUTE_ID + self.amphora_mock.status = constants.AMPHORA_ALLOCATED + conf = oslo_fixture.Config(cfg.CONF) + conf.config(group="controller_worker", amp_boot_network_list=['netid']) + + super(TestNetworkTasks, self).setUp() + + def test_calculate_delta(self, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + EMPTY = {} + empty_deltas = {self.amphora_mock.id: data_models.Delta( + amphora_id=self.amphora_mock.id, + compute_id=self.amphora_mock.compute_id, + add_nics=[], + delete_nics=[])} + + calc_delta = network_tasks.CalculateDelta() + + self.assertEqual(EMPTY, calc_delta.execute(self.load_balancer_mock)) + + # Test with one amp and no pools, nothing plugged + # Delta should be empty + mock_driver.reset_mock() + + self.amphora_mock.load_balancer = self.load_balancer_mock + self.load_balancer_mock.amphorae = [self.amphora_mock] + self.load_balancer_mock.pools = [] + + self.assertEqual(empty_deltas, + calc_delta.execute(self.load_balancer_mock)) + mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID) + + # Pool mock should be configured explicitly for each test + pool_mock = mock.MagicMock() + self.load_balancer_mock.pools = [pool_mock] + + # Test with one amp and one pool but no members, nothing plugged + # Delta should be empty + pool_mock.members = [] + self.assertEqual(empty_deltas, + calc_delta.execute(self.load_balancer_mock)) + + # Test with one amp and one pool and one member, nothing plugged + # Delta should be one additional subnet to plug + mock_driver.reset_mock() + member_mock = mock.MagicMock() + member_mock.subnet_id = 1 + pool_mock.members = [member_mock] + mock_driver.get_subnet.return_value = data_models.Subnet(id=2, + network_id=3) + + ndm = data_models.Delta(amphora_id=self.amphora_mock.id, + compute_id=self.amphora_mock.compute_id, + add_nics=[ + data_models.Interface(network_id=2)], + delete_nics=[]) + self.assertEqual({self.amphora_mock.id: ndm}, + calc_delta.execute(self.load_balancer_mock)) + + vrrp_port_call = mock.call(self.amphora_mock.vrrp_port_id) + mock_driver.get_port.assert_has_calls([vrrp_port_call]) + self.assertEqual(1, mock_driver.get_port.call_count) + + member_subnet_call = mock.call(member_mock.subnet_id) + mock_driver.get_subnet.assert_has_calls([member_subnet_call]) + self.assertEqual(1, mock_driver.get_subnet.call_count) + + # Test with one amp and one pool and one member, already plugged + # Delta should be empty + mock_driver.reset_mock() + member_mock = mock.MagicMock() + member_mock.subnet_id = 1 + pool_mock.members = [member_mock] + mock_driver.get_plugged_networks.return_value = [ + data_models.Interface(network_id=2)] + + self.assertEqual(empty_deltas, + calc_delta.execute(self.load_balancer_mock)) + + # Test with one amp and one pool and one member, wrong network plugged + # Delta should be one network to add and one to remove + mock_driver.reset_mock() + member_mock = mock.MagicMock() + member_mock.subnet_id = 1 + pool_mock.members = [member_mock] + mock_driver.get_plugged_networks.return_value = [ + data_models.Interface(network_id=3)] + + ndm = data_models.Delta(amphora_id=self.amphora_mock.id, + compute_id=self.amphora_mock.compute_id, + add_nics=[ + data_models.Interface(network_id=2)], + delete_nics=[ + data_models.Interface(network_id=3)]) + self.assertEqual({self.amphora_mock.id: ndm}, + calc_delta.execute(self.load_balancer_mock)) + + # Test with one amp and one pool and no members, one network plugged + # Delta should be one network to remove + mock_driver.reset_mock() + pool_mock.members = [] + mock_driver.get_plugged_networks.return_value = [ + data_models.Interface(network_id=2)] + + ndm = data_models.Delta(amphora_id=self.amphora_mock.id, + compute_id=self.amphora_mock.compute_id, + add_nics=[], + delete_nics=[ + data_models.Interface(network_id=2)]) + self.assertEqual({self.amphora_mock.id: ndm}, + calc_delta.execute(self.load_balancer_mock)) + + def test_get_plumbed_networks(self, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + mock_driver.get_plugged_networks.side_effect = [['blah']] + net = network_tasks.GetPlumbedNetworks() + + self.assertEqual(['blah'], net.execute(self.amphora_mock)) + mock_driver.get_plugged_networks.assert_called_once_with( + COMPUTE_ID) + + def test_plug_networks(self, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + + def _interface(network_id): + return [data_models.Interface(network_id=network_id)] + + net = network_tasks.PlugNetworks() + + net.execute(self.amphora_mock, None) + self.assertFalse(mock_driver.plug_network.called) + + delta = data_models.Delta(amphora_id=self.amphora_mock.id, + compute_id=self.amphora_mock.compute_id, + add_nics=[], + delete_nics=[]) + net.execute(self.amphora_mock, delta) + self.assertFalse(mock_driver.plug_network.called) + + delta = data_models.Delta(amphora_id=self.amphora_mock.id, + compute_id=self.amphora_mock.compute_id, + add_nics=_interface(1), + delete_nics=[]) + net.execute(self.amphora_mock, delta) + mock_driver.plug_network.assert_called_once_with(COMPUTE_ID, 1) + + # revert + net.revert(self.amphora_mock, None) + self.assertFalse(mock_driver.unplug_network.called) + + delta = data_models.Delta(amphora_id=self.amphora_mock.id, + compute_id=self.amphora_mock.compute_id, + add_nics=[], + delete_nics=[]) + net.revert(self.amphora_mock, delta) + self.assertFalse(mock_driver.unplug_network.called) + + delta = data_models.Delta(amphora_id=self.amphora_mock.id, + compute_id=self.amphora_mock.compute_id, + add_nics=_interface(1), + delete_nics=[]) + net.revert(self.amphora_mock, delta) + mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) + + mock_driver.reset_mock() + mock_driver.unplug_network.side_effect = net_base.NetworkNotFound + net.revert(self.amphora_mock, delta) # No exception + mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) + + mock_driver.reset_mock() + mock_driver.unplug_network.side_effect = TestException('test') + self.assertRaises(TestException, + net.revert, + self.amphora_mock, + delta) + mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) + + def test_unplug_networks(self, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + + def _interface(network_id): + return [data_models.Interface(network_id=network_id)] + + net = network_tasks.UnPlugNetworks() + + net.execute(self.amphora_mock, None) + self.assertFalse(mock_driver.unplug_network.called) + + delta = data_models.Delta(amphora_id=self.amphora_mock.id, + compute_id=self.amphora_mock.compute_id, + add_nics=[], + delete_nics=[]) + net.execute(self.amphora_mock, delta) + self.assertFalse(mock_driver.unplug_network.called) + + delta = data_models.Delta(amphora_id=self.amphora_mock.id, + compute_id=self.amphora_mock.compute_id, + add_nics=[], + delete_nics=_interface(1)) + net.execute(self.amphora_mock, delta) + mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) + + mock_driver.reset_mock() + mock_driver.unplug_network.side_effect = net_base.NetworkNotFound + net.execute(self.amphora_mock, delta) # No exception + mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) + + # Do a test with a general exception in case behavior changes + mock_driver.reset_mock() + mock_driver.unplug_network.side_effect = Exception() + net.execute(self.amphora_mock, delta) # No exception + mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) + + def test_get_member_ports(self, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + + def _interface(port_id): + return [data_models.Interface(port_id=port_id)] + + net_task = network_tasks.GetMemberPorts() + net_task.execute(LB, self.amphora_mock) + mock_driver.get_port.assert_called_once_with(t_constants.MOCK_PORT_ID) + mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID) + + mock_driver.reset_mock() + net_task = network_tasks.GetMemberPorts() + mock_driver.get_plugged_networks.return_value = _interface(1) + mock_driver.get_port.side_effect = [ + data_models.Port(network_id=NETWORK_ID), + data_models.Port(network_id=NETWORK_ID)] + net_task.execute(self.load_balancer_mock, self.amphora_mock) + self.assertEqual(2, mock_driver.get_port.call_count) + self.assertFalse(mock_driver.get_network.called) + + mock_driver.reset_mock() + port_mock = mock.MagicMock() + fixed_ip_mock = mock.MagicMock() + fixed_ip_mock.subnet_id = 1 + port_mock.fixed_ips = [fixed_ip_mock] + net_task = network_tasks.GetMemberPorts() + mock_driver.get_plugged_networks.return_value = _interface(1) + mock_driver.get_port.side_effect = [ + data_models.Port(network_id=NETWORK_ID), port_mock] + ports = net_task.execute(self.load_balancer_mock, self.amphora_mock) + mock_driver.get_subnet.assert_called_once_with(1) + self.assertEqual([port_mock], ports) + + def test_handle_network_delta(self, mock_get_net_driver): + mock_net_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_net_driver + + nic1 = mock.MagicMock() + nic1.network_id = uuidutils.generate_uuid() + nic2 = mock.MagicMock() + nic2.network_id = uuidutils.generate_uuid() + interface1 = mock.MagicMock() + interface1.port_id = uuidutils.generate_uuid() + port1 = mock.MagicMock() + port1.network_id = uuidutils.generate_uuid() + fixed_ip = mock.MagicMock() + fixed_ip.subnet_id = uuidutils.generate_uuid() + port1.fixed_ips = [fixed_ip] + subnet = mock.MagicMock() + network = mock.MagicMock() + + delta = data_models.Delta(amphora_id=self.amphora_mock.id, + compute_id=self.amphora_mock.compute_id, + add_nics=[nic1], + delete_nics=[nic2, nic2, nic2]) + + mock_net_driver.plug_network.return_value = interface1 + mock_net_driver.get_port.return_value = port1 + mock_net_driver.get_network.return_value = network + mock_net_driver.get_subnet.return_value = subnet + + mock_net_driver.unplug_network.side_effect = [ + None, net_base.NetworkNotFound, Exception] + + handle_net_delta_obj = network_tasks.HandleNetworkDelta() + result = handle_net_delta_obj.execute(self.amphora_mock, delta) + + mock_net_driver.plug_network.assert_called_once_with( + self.amphora_mock.compute_id, nic1.network_id) + mock_net_driver.get_port.assert_called_once_with(interface1.port_id) + mock_net_driver.get_network.assert_called_once_with(port1.network_id) + mock_net_driver.get_subnet.assert_called_once_with(fixed_ip.subnet_id) + + self.assertEqual({self.amphora_mock.id: [port1]}, result) + + mock_net_driver.unplug_network.assert_called_with( + self.amphora_mock.compute_id, nic2.network_id) + + # Revert + delta2 = data_models.Delta(amphora_id=self.amphora_mock.id, + compute_id=self.amphora_mock.compute_id, + add_nics=[nic1, nic1], + delete_nics=[nic2, nic2, nic2]) + + mock_net_driver.unplug_network.reset_mock() + handle_net_delta_obj.revert( + failure.Failure.from_exception(Exception('boom')), None, None) + mock_net_driver.unplug_network.assert_not_called() + + mock_net_driver.unplug_network.reset_mock() + handle_net_delta_obj.revert(None, None, None) + mock_net_driver.unplug_network.assert_not_called() + + mock_net_driver.unplug_network.reset_mock() + handle_net_delta_obj.revert(None, None, delta2) + + def test_handle_network_deltas(self, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + + def _interface(network_id): + return [data_models.Interface(network_id=network_id)] + + net = network_tasks.HandleNetworkDeltas() + + net.execute({}) + self.assertFalse(mock_driver.plug_network.called) + + delta = data_models.Delta(amphora_id=self.amphora_mock.id, + compute_id=self.amphora_mock.compute_id, + add_nics=[], + delete_nics=[]) + net.execute({self.amphora_mock.id: delta}) + self.assertFalse(mock_driver.plug_network.called) + + delta = data_models.Delta(amphora_id=self.amphora_mock.id, + compute_id=self.amphora_mock.compute_id, + add_nics=_interface(1), + delete_nics=[]) + net.execute({self.amphora_mock.id: delta}) + mock_driver.plug_network.assert_called_once_with(COMPUTE_ID, 1) + + # revert + net.execute({self.amphora_mock.id: delta}) + self.assertFalse(mock_driver.unplug_network.called) + + delta = data_models.Delta(amphora_id=self.amphora_mock.id, + compute_id=self.amphora_mock.compute_id, + add_nics=[], + delete_nics=[]) + net.execute({self.amphora_mock.id: delta}) + self.assertFalse(mock_driver.unplug_network.called) + + delta = data_models.Delta(amphora_id=self.amphora_mock.id, + compute_id=self.amphora_mock.compute_id, + add_nics=_interface(1), + delete_nics=[]) + + mock_driver.reset_mock() + mock_driver.unplug_network.side_effect = net_base.NetworkNotFound + + mock_driver.reset_mock() + mock_driver.unplug_network.side_effect = TestException('test') + self.assertRaises(TestException, net.revert, mock.ANY, + {self.amphora_mock.id: delta}) + mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) + + mock_driver.reset_mock() + net.execute({}) + self.assertFalse(mock_driver.unplug_network.called) + + delta = data_models.Delta(amphora_id=self.amphora_mock.id, + compute_id=self.amphora_mock.compute_id, + add_nics=[], + delete_nics=[]) + net.execute({self.amphora_mock.id: delta}) + self.assertFalse(mock_driver.unplug_network.called) + + delta = data_models.Delta(amphora_id=self.amphora_mock.id, + compute_id=self.amphora_mock.compute_id, + add_nics=[], + delete_nics=_interface(1)) + net.execute({self.amphora_mock.id: delta}) + mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) + + mock_driver.reset_mock() + mock_driver.unplug_network.side_effect = net_base.NetworkNotFound + net.execute({self.amphora_mock.id: delta}) + mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) + + # Do a test with a general exception in case behavior changes + mock_driver.reset_mock() + mock_driver.unplug_network.side_effect = Exception() + net.execute({self.amphora_mock.id: delta}) + mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) + + def test_plug_vip(self, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + net = network_tasks.PlugVIP() + + mock_driver.plug_vip.return_value = ["vip"] + + data = net.execute(LB) + mock_driver.plug_vip.assert_called_once_with(LB, LB.vip) + self.assertEqual(["vip"], data) + + # revert + net.revert(["vip"], LB) + mock_driver.unplug_vip.assert_called_once_with(LB, LB.vip) + + # revert with exception + mock_driver.reset_mock() + mock_driver.unplug_vip.side_effect = Exception('UnplugVipException') + net.revert(["vip"], LB) + mock_driver.unplug_vip.assert_called_once_with(LB, LB.vip) + + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'get_current_loadbalancer_from_db') + def test_apply_qos_on_creation(self, mock_get_lb_db, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + net = network_tasks.ApplyQos() + mock_get_lb_db.return_value = LB + + # execute + UPDATE_DICT[constants.TOPOLOGY] = constants.TOPOLOGY_SINGLE + update_dict = UPDATE_DICT + net.execute(LB, [AMPS_DATA[0]], update_dict) + mock_driver.apply_qos_on_port.assert_called_once_with( + VIP.qos_policy_id, AMPS_DATA[0].vrrp_port_id) + self.assertEqual(1, mock_driver.apply_qos_on_port.call_count) + standby_topology = constants.TOPOLOGY_ACTIVE_STANDBY + mock_driver.reset_mock() + update_dict[constants.TOPOLOGY] = standby_topology + net.execute(LB, AMPS_DATA, update_dict) + mock_driver.apply_qos_on_port.assert_called_with( + t_constants.MOCK_QOS_POLICY_ID1, mock.ANY) + self.assertEqual(2, mock_driver.apply_qos_on_port.call_count) + + # revert + mock_driver.reset_mock() + update_dict = UPDATE_DICT + net.revert(None, LB, [AMPS_DATA[0]], update_dict) + self.assertEqual(0, mock_driver.apply_qos_on_port.call_count) + mock_driver.reset_mock() + update_dict[constants.TOPOLOGY] = standby_topology + net.revert(None, LB, AMPS_DATA, update_dict) + self.assertEqual(0, mock_driver.apply_qos_on_port.call_count) + + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'get_current_loadbalancer_from_db') + def test_apply_qos_on_update(self, mock_get_lb_db, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + net = network_tasks.ApplyQos() + null_qos_vip = o_data_models.Vip(qos_policy_id=None) + null_qos_lb = o_data_models.LoadBalancer( + vip=null_qos_vip, topology=constants.TOPOLOGY_SINGLE, + amphorae=[AMPS_DATA[0]]) + + tmp_vip_object = o_data_models.Vip( + qos_policy_id=t_constants.MOCK_QOS_POLICY_ID1) + tmp_lb = o_data_models.LoadBalancer( + vip=tmp_vip_object, topology=constants.TOPOLOGY_SINGLE, + amphorae=[AMPS_DATA[0]]) + + # execute + update_dict = {'description': 'fool'} + net.execute(tmp_lb, update_dict=update_dict) + mock_driver.apply_qos_on_port.assert_called_once_with( + t_constants.MOCK_QOS_POLICY_ID1, AMPS_DATA[0].vrrp_port_id) + self.assertEqual(1, mock_driver.apply_qos_on_port.call_count) + + mock_driver.reset_mock() + update_dict = {'vip': {'qos_policy_id': None}} + net.execute(null_qos_lb, update_dict=update_dict) + mock_driver.apply_qos_on_port.assert_called_once_with( + None, AMPS_DATA[0].vrrp_port_id) + self.assertEqual(1, mock_driver.apply_qos_on_port.call_count) + + mock_driver.reset_mock() + update_dict = {'name': '123'} + net.execute(null_qos_lb, update_dict=update_dict) + self.assertEqual(0, mock_driver.apply_qos_on_port.call_count) + + mock_driver.reset_mock() + update_dict = {'description': 'fool'} + tmp_lb.amphorae = AMPS_DATA + tmp_lb.topology = constants.TOPOLOGY_ACTIVE_STANDBY + net.execute(tmp_lb, update_dict=update_dict) + mock_driver.apply_qos_on_port.assert_called_with( + t_constants.MOCK_QOS_POLICY_ID1, mock.ANY) + self.assertEqual(2, mock_driver.apply_qos_on_port.call_count) + + # revert + mock_driver.reset_mock() + tmp_lb.amphorae = [AMPS_DATA[0]] + tmp_lb.topology = constants.TOPOLOGY_SINGLE + update_dict = {'description': 'fool'} + mock_get_lb_db.return_value = tmp_lb + net.revert(None, tmp_lb, update_dict=update_dict) + self.assertEqual(0, mock_driver.apply_qos_on_port.call_count) + + mock_driver.reset_mock() + update_dict = {'vip': {'qos_policy_id': None}} + ori_lb_db = LB2 + ori_lb_db.amphorae = [AMPS_DATA[0]] + mock_get_lb_db.return_value = ori_lb_db + net.revert(None, null_qos_lb, update_dict=update_dict) + mock_driver.apply_qos_on_port.assert_called_once_with( + t_constants.MOCK_QOS_POLICY_ID2, AMPS_DATA[0].vrrp_port_id) + self.assertEqual(1, mock_driver.apply_qos_on_port.call_count) + + mock_driver.reset_mock() + update_dict = {'vip': { + 'qos_policy_id': t_constants.MOCK_QOS_POLICY_ID2}} + tmp_lb.amphorae = AMPS_DATA + tmp_lb.topology = constants.TOPOLOGY_ACTIVE_STANDBY + ori_lb_db = LB2 + ori_lb_db.amphorae = [AMPS_DATA[0]] + mock_get_lb_db.return_value = ori_lb_db + net.revert(None, tmp_lb, update_dict=update_dict) + mock_driver.apply_qos_on_port.assert_called_with( + t_constants.MOCK_QOS_POLICY_ID2, mock.ANY) + self.assertEqual(2, mock_driver.apply_qos_on_port.call_count) + + def test_unplug_vip(self, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + net = network_tasks.UnplugVIP() + + net.execute(LB) + mock_driver.unplug_vip.assert_called_once_with(LB, LB.vip) + + def test_allocate_vip(self, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + net = network_tasks.AllocateVIP() + + mock_driver.allocate_vip.return_value = LB.vip + + mock_driver.reset_mock() + self.assertEqual(LB.vip, net.execute(LB)) + mock_driver.allocate_vip.assert_called_once_with(LB) + + # revert + vip_mock = mock.MagicMock() + net.revert(vip_mock, LB) + mock_driver.deallocate_vip.assert_called_once_with(vip_mock) + + # revert exception + mock_driver.reset_mock() + mock_driver.deallocate_vip.side_effect = Exception('DeallVipException') + vip_mock = mock.MagicMock() + net.revert(vip_mock, LB) + mock_driver.deallocate_vip.assert_called_once_with(vip_mock) + + def test_deallocate_vip(self, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + net = network_tasks.DeallocateVIP() + vip = o_data_models.Vip() + lb = o_data_models.LoadBalancer(vip=vip) + net.execute(lb) + mock_driver.deallocate_vip.assert_called_once_with(lb.vip) + + def test_update_vip(self, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + vip = o_data_models.Vip() + lb = o_data_models.LoadBalancer(vip=vip) + net_task = network_tasks.UpdateVIP() + net_task.execute(lb) + mock_driver.update_vip.assert_called_once_with(lb) + + def test_update_vip_for_delete(self, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + vip = o_data_models.Vip() + lb = o_data_models.LoadBalancer(vip=vip) + net_task = network_tasks.UpdateVIPForDelete() + net_task.execute(lb) + mock_driver.update_vip.assert_called_once_with(lb, for_delete=True) + + def test_get_amphorae_network_configs(self, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + lb = o_data_models.LoadBalancer() + net_task = network_tasks.GetAmphoraeNetworkConfigs() + net_task.execute(lb) + mock_driver.get_network_configs.assert_called_once_with(lb) + + def test_failover_preparation_for_amphora(self, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + failover = network_tasks.FailoverPreparationForAmphora() + amphora = o_data_models.Amphora(id=AMPHORA_ID, + lb_network_ip=IP_ADDRESS) + failover.execute(amphora) + mock_driver.failover_preparation.assert_called_once_with(amphora) + + def test_retrieve_portids_on_amphora_except_lb_network( + self, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + + def _interface(port_id): + return [data_models.Interface(port_id=port_id)] + + net_task = network_tasks.RetrievePortIDsOnAmphoraExceptLBNetwork() + amphora = o_data_models.Amphora(id=AMPHORA_ID, compute_id=COMPUTE_ID, + lb_network_ip=IP_ADDRESS) + + mock_driver.get_plugged_networks.return_value = [] + net_task.execute(amphora) + mock_driver.get_plugged_networks.assert_called_once_with( + compute_id=COMPUTE_ID) + self.assertFalse(mock_driver.get_port.called) + + mock_driver.reset_mock() + net_task = network_tasks.RetrievePortIDsOnAmphoraExceptLBNetwork() + mock_driver.get_plugged_networks.return_value = _interface(1) + net_task.execute(amphora) + mock_driver.get_port.assert_called_once_with(port_id=1) + + mock_driver.reset_mock() + net_task = network_tasks.RetrievePortIDsOnAmphoraExceptLBNetwork() + port_mock = mock.MagicMock() + fixed_ip_mock = mock.MagicMock() + fixed_ip_mock.ip_address = IP_ADDRESS + port_mock.fixed_ips = [fixed_ip_mock] + mock_driver.get_plugged_networks.return_value = _interface(1) + mock_driver.get_port.return_value = port_mock + ports = net_task.execute(amphora) + self.assertEqual([], ports) + + mock_driver.reset_mock() + net_task = network_tasks.RetrievePortIDsOnAmphoraExceptLBNetwork() + port_mock = mock.MagicMock() + fixed_ip_mock = mock.MagicMock() + fixed_ip_mock.ip_address = "172.17.17.17" + port_mock.fixed_ips = [fixed_ip_mock] + mock_driver.get_plugged_networks.return_value = _interface(1) + mock_driver.get_port.return_value = port_mock + ports = net_task.execute(amphora) + self.assertEqual(1, len(ports)) + + def test_plug_ports(self, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + + amphora = mock.MagicMock() + port1 = mock.MagicMock() + port2 = mock.MagicMock() + + plugports = network_tasks.PlugPorts() + plugports.execute(amphora, [port1, port2]) + + mock_driver.plug_port.assert_any_call(amphora, port1) + mock_driver.plug_port.assert_any_call(amphora, port2) + + def test_plug_vip_port(self, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + vrrp_port = mock.MagicMock() + + amphorae_network_config = mock.MagicMock() + amphorae_network_config.get().vrrp_port = vrrp_port + + plugvipport = network_tasks.PlugVIPPort() + plugvipport.execute(self.amphora_mock, amphorae_network_config) + mock_driver.plug_port.assert_any_call(self.amphora_mock, vrrp_port) + + # test revert + plugvipport.revert(None, self.amphora_mock, amphorae_network_config) + mock_driver.unplug_port.assert_any_call(self.amphora_mock, vrrp_port) + + def test_wait_for_port_detach(self, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + + amphora = o_data_models.Amphora(id=AMPHORA_ID, + lb_network_ip=IP_ADDRESS) + + waitforportdetach = network_tasks.WaitForPortDetach() + waitforportdetach.execute(amphora) + + mock_driver.wait_for_port_detach.assert_called_once_with(amphora) + + def test_update_vip_sg(self, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + net = network_tasks.UpdateVIPSecurityGroup() + + net.execute(LB) + mock_driver.update_vip_sg.assert_called_once_with(LB, LB.vip) + + def test_get_subnet_from_vip(self, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + net = network_tasks.GetSubnetFromVIP() + + net.execute(LB) + mock_driver.get_subnet.assert_called_once_with(LB.vip.subnet_id) + + def test_plug_vip_amphora(self, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + net = network_tasks.PlugVIPAmpphora() + mockSubnet = mock.MagicMock() + net.execute(LB, self.amphora_mock, mockSubnet) + mock_driver.plug_aap_port.assert_called_once_with( + LB, LB.vip, self.amphora_mock, mockSubnet) + + def test_revert_plug_vip_amphora(self, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + net = network_tasks.PlugVIPAmpphora() + mockSubnet = mock.MagicMock() + net.revert(AMPS_DATA[0], LB, self.amphora_mock, mockSubnet) + mock_driver.unplug_aap_port.assert_called_once_with( + LB.vip, self.amphora_mock, mockSubnet) diff --git a/octavia/tests/unit/controller/worker/v2/test_controller_worker.py b/octavia/tests/unit/controller/worker/v2/test_controller_worker.py new file mode 100644 index 0000000000..ef22478ce7 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/test_controller_worker.py @@ -0,0 +1,1465 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import mock +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils + +from octavia.common import base_taskflow +from octavia.common import constants +from octavia.common import data_models +from octavia.controller.worker.v2 import controller_worker +import octavia.tests.unit.base as base + + +AMP_ID = uuidutils.generate_uuid() +LB_ID = uuidutils.generate_uuid() +POOL_ID = uuidutils.generate_uuid() +HM_ID = uuidutils.generate_uuid() +MEMBER_ID = uuidutils.generate_uuid() +COMPUTE_ID = uuidutils.generate_uuid() +L7POLICY_ID = uuidutils.generate_uuid() +L7RULE_ID = uuidutils.generate_uuid() +HEALTH_UPDATE_DICT = {'delay': 1, 'timeout': 2} +LISTENER_UPDATE_DICT = {'name': 'test', 'description': 'test2'} +MEMBER_UPDATE_DICT = {'weight': 1, 'ip_address': '10.0.0.0'} +POOL_UPDATE_DICT = {'name': 'test', 'description': 'test2'} +L7POLICY_UPDATE_DICT = {'action': constants.L7POLICY_ACTION_REJECT} +L7RULE_UPDATE_DICT = { + 'type': constants.L7RULE_TYPE_PATH, + 'compare_type': constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + 'value': '/api'} + +_amphora_mock = mock.MagicMock() +_flow_mock = mock.MagicMock() +_health_mon_mock = mock.MagicMock() +_vip_mock = mock.MagicMock() +_listener_mock = mock.MagicMock() +_load_balancer_mock = mock.MagicMock() +_member_mock = mock.MagicMock() +_pool_mock = mock.MagicMock() +_l7policy_mock = mock.MagicMock() +_l7rule_mock = mock.MagicMock() +_create_map_flow_mock = mock.MagicMock() +_amphora_mock.load_balancer_id = LB_ID +_amphora_mock.id = AMP_ID +_db_session = mock.MagicMock() + +CONF = cfg.CONF + + +class TestException(Exception): + + def __init__(self, value): + self.value = value + + def __str__(self): + return repr(self.value) + + +@mock.patch('octavia.db.repositories.AmphoraRepository.get', + return_value=_amphora_mock) +@mock.patch('octavia.db.repositories.HealthMonitorRepository.get', + return_value=_health_mon_mock) +@mock.patch('octavia.db.repositories.LoadBalancerRepository.get', + return_value=_load_balancer_mock) +@mock.patch('octavia.db.repositories.ListenerRepository.get', + return_value=_listener_mock) +@mock.patch('octavia.db.repositories.L7PolicyRepository.get', + return_value=_l7policy_mock) +@mock.patch('octavia.db.repositories.L7RuleRepository.get', + return_value=_l7rule_mock) +@mock.patch('octavia.db.repositories.MemberRepository.get', + return_value=_member_mock) +@mock.patch('octavia.db.repositories.PoolRepository.get', + return_value=_pool_mock) +@mock.patch('octavia.common.base_taskflow.BaseTaskFlowEngine._taskflow_load', + return_value=_flow_mock) +@mock.patch('taskflow.listeners.logging.DynamicLoggingListener') +@mock.patch('octavia.db.api.get_session', return_value=_db_session) +class TestControllerWorker(base.TestCase): + + def setUp(self): + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + + _pool_mock.listeners = [_listener_mock] + _pool_mock.load_balancer = _load_balancer_mock + _health_mon_mock.pool = _pool_mock + _load_balancer_mock.amphorae = _amphora_mock + _load_balancer_mock.vip = _vip_mock + _listener_mock.load_balancer = _load_balancer_mock + _member_mock.pool = _pool_mock + _l7policy_mock.listener = _listener_mock + _l7rule_mock.l7policy = _l7policy_mock + + fetch_mock = mock.MagicMock(return_value=AMP_ID) + _flow_mock.storage.fetch = fetch_mock + + _pool_mock.id = POOL_ID + _health_mon_mock.pool_id = POOL_ID + _health_mon_mock.id = HM_ID + + super(TestControllerWorker, self).setUp() + + @mock.patch('octavia.controller.worker.v2.flows.' + 'amphora_flows.AmphoraFlows.get_create_amphora_flow', + return_value='TEST') + def test_create_amphora(self, + mock_api_get_session, + mock_get_create_amp_flow, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + + cw = controller_worker.ControllerWorker() + amp = cw.create_amphora() + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with( + 'TEST', + store={constants.BUILD_TYPE_PRIORITY: + constants.LB_CREATE_SPARES_POOL_PRIORITY, + constants.FLAVOR: None})) + + _flow_mock.run.assert_called_once_with() + + _flow_mock.storage.fetch.assert_called_once_with('amphora') + + self.assertEqual(AMP_ID, amp) + + @mock.patch('octavia.controller.worker.v2.flows.' + 'amphora_flows.AmphoraFlows.get_delete_amphora_flow', + return_value='TEST') + def test_delete_amphora(self, + mock_get_delete_amp_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + + cw = controller_worker.ControllerWorker() + cw.delete_amphora(AMP_ID) + + mock_amp_repo_get.assert_called_once_with( + _db_session, + id=AMP_ID) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with('TEST', + store={constants.AMPHORA: _amphora_mock})) + + _flow_mock.run.assert_called_once_with() + + @mock.patch('octavia.controller.worker.v2.flows.' + 'health_monitor_flows.HealthMonitorFlows.' + 'get_create_health_monitor_flow', + return_value=_flow_mock) + def test_create_health_monitor(self, + mock_get_create_hm_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + mock_health_mon_repo_get.side_effect = [None, _health_mon_mock] + + cw = controller_worker.ControllerWorker() + cw.create_health_monitor(_health_mon_mock) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={constants.HEALTH_MON: + _health_mon_mock, + constants.LISTENERS: + [_listener_mock], + constants.LOADBALANCER: + _load_balancer_mock, + constants.POOL: + _pool_mock})) + + _flow_mock.run.assert_called_once_with() + self.assertEqual(2, mock_health_mon_repo_get.call_count) + + @mock.patch('octavia.controller.worker.v2.flows.' + 'health_monitor_flows.HealthMonitorFlows.' + 'get_delete_health_monitor_flow', + return_value=_flow_mock) + def test_delete_health_monitor(self, + mock_get_delete_hm_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + + cw = controller_worker.ControllerWorker() + cw.delete_health_monitor(HM_ID) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={constants.HEALTH_MON: + _health_mon_mock, + constants.LISTENERS: + [_listener_mock], + constants.LOADBALANCER: + _load_balancer_mock, + constants.POOL: + _pool_mock})) + + _flow_mock.run.assert_called_once_with() + + @mock.patch('octavia.controller.worker.v2.flows.' + 'health_monitor_flows.HealthMonitorFlows.' + 'get_update_health_monitor_flow', + return_value=_flow_mock) + def test_update_health_monitor(self, + mock_get_update_hm_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + _health_mon_mock.provisioning_status = constants.PENDING_UPDATE + + cw = controller_worker.ControllerWorker() + cw.update_health_monitor(_health_mon_mock.id, + HEALTH_UPDATE_DICT) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={constants.HEALTH_MON: + _health_mon_mock, + constants.POOL: + _pool_mock, + constants.LISTENERS: + [_listener_mock], + constants.LOADBALANCER: + _load_balancer_mock, + constants.UPDATE_DICT: + HEALTH_UPDATE_DICT})) + + _flow_mock.run.assert_called_once_with() + + @mock.patch('octavia.controller.worker.v2.flows.' + 'listener_flows.ListenerFlows.get_create_listener_flow', + return_value=_flow_mock) + def test_create_listener(self, + mock_get_create_listener_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + mock_listener_repo_get.side_effect = [None, _listener_mock] + + cw = controller_worker.ControllerWorker() + cw.create_listener(LB_ID) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={constants.LOADBALANCER: + _load_balancer_mock, + constants.LISTENERS: + [_listener_mock]})) + + _flow_mock.run.assert_called_once_with() + self.assertEqual(2, mock_listener_repo_get.call_count) + + @mock.patch('octavia.controller.worker.v2.flows.' + 'listener_flows.ListenerFlows.get_delete_listener_flow', + return_value=_flow_mock) + def test_delete_listener(self, + mock_get_delete_listener_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + + cw = controller_worker.ControllerWorker() + cw.delete_listener(LB_ID) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with( + _flow_mock, store={constants.LISTENER: _listener_mock, + constants.LOADBALANCER: _load_balancer_mock})) + + _flow_mock.run.assert_called_once_with() + + @mock.patch('octavia.controller.worker.v2.flows.' + 'listener_flows.ListenerFlows.get_update_listener_flow', + return_value=_flow_mock) + def test_update_listener(self, + mock_get_update_listener_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + _listener_mock.provisioning_status = constants.PENDING_UPDATE + + cw = controller_worker.ControllerWorker() + cw.update_listener(LB_ID, LISTENER_UPDATE_DICT) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={constants.LISTENER: _listener_mock, + constants.LOADBALANCER: + _load_balancer_mock, + constants.UPDATE_DICT: + LISTENER_UPDATE_DICT, + constants.LISTENERS: + [_listener_mock]})) + + _flow_mock.run.assert_called_once_with() + + @mock.patch('octavia.controller.worker.v2.flows.load_balancer_flows.' + 'LoadBalancerFlows.get_create_load_balancer_flow', + return_value=_flow_mock) + def test_create_load_balancer_single( + self, + mock_get_create_load_balancer_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + # Test the code path with an SINGLE topology + self.conf.config(group="controller_worker", + loadbalancer_topology=constants.TOPOLOGY_SINGLE) + _flow_mock.reset_mock() + mock_taskflow_load.reset_mock() + mock_eng = mock.Mock() + mock_taskflow_load.return_value = mock_eng + store = { + constants.LOADBALANCER_ID: LB_ID, + 'update_dict': {'topology': constants.TOPOLOGY_SINGLE}, + constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY, + constants.FLAVOR: None + } + lb_mock = mock.MagicMock() + lb_mock.listeners = [] + lb_mock.topology = constants.TOPOLOGY_SINGLE + mock_lb_repo_get.side_effect = [None, None, None, lb_mock] + + cw = controller_worker.ControllerWorker() + cw.create_load_balancer(LB_ID) + + mock_get_create_load_balancer_flow.assert_called_with( + topology=constants.TOPOLOGY_SINGLE, listeners=[]) + mock_taskflow_load.assert_called_with( + mock_get_create_load_balancer_flow.return_value, store=store) + mock_eng.run.assert_any_call() + self.assertEqual(4, mock_lb_repo_get.call_count) + + @mock.patch('octavia.controller.worker.v2.flows.load_balancer_flows.' + 'LoadBalancerFlows.get_create_load_balancer_flow', + return_value=_flow_mock) + def test_create_load_balancer_active_standby( + self, + mock_get_create_load_balancer_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + self.conf.config( + group="controller_worker", + loadbalancer_topology=constants.TOPOLOGY_ACTIVE_STANDBY) + + _flow_mock.reset_mock() + mock_taskflow_load.reset_mock() + mock_eng = mock.Mock() + mock_taskflow_load.return_value = mock_eng + store = { + constants.LOADBALANCER_ID: LB_ID, + 'update_dict': {'topology': constants.TOPOLOGY_ACTIVE_STANDBY}, + constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY, + constants.FLAVOR: None + } + setattr(mock_lb_repo_get.return_value, 'topology', + constants.TOPOLOGY_ACTIVE_STANDBY) + setattr(mock_lb_repo_get.return_value, 'listeners', []) + + cw = controller_worker.ControllerWorker() + cw.create_load_balancer(LB_ID) + + mock_get_create_load_balancer_flow.assert_called_with( + topology=constants.TOPOLOGY_ACTIVE_STANDBY, listeners=[]) + mock_taskflow_load.assert_called_with( + mock_get_create_load_balancer_flow.return_value, store=store) + mock_eng.run.assert_any_call() + + @mock.patch('octavia.controller.worker.v2.flows.load_balancer_flows.' + 'LoadBalancerFlows.get_create_load_balancer_flow') + def test_create_load_balancer_full_graph_single( + self, + mock_get_create_load_balancer_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + self.conf.config( + group="controller_worker", + loadbalancer_topology=constants.TOPOLOGY_SINGLE) + + listeners = [data_models.Listener(id='listener1'), + data_models.Listener(id='listener2')] + lb = data_models.LoadBalancer(id=LB_ID, listeners=listeners, + topology=constants.TOPOLOGY_SINGLE) + mock_lb_repo_get.return_value = lb + mock_eng = mock.Mock() + mock_taskflow_load.return_value = mock_eng + store = { + constants.LOADBALANCER_ID: LB_ID, + 'update_dict': {'topology': constants.TOPOLOGY_SINGLE}, + constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY, + constants.FLAVOR: None + } + + cw = controller_worker.ControllerWorker() + cw.create_load_balancer(LB_ID) + + # mock_create_single_topology.assert_called_once() + # mock_create_active_standby_topology.assert_not_called() + mock_get_create_load_balancer_flow.assert_called_with( + topology=constants.TOPOLOGY_SINGLE, listeners=lb.listeners) + mock_taskflow_load.assert_called_with( + mock_get_create_load_balancer_flow.return_value, store=store) + mock_eng.run.assert_any_call() + + @mock.patch('octavia.controller.worker.v2.flows.load_balancer_flows.' + 'LoadBalancerFlows.get_create_load_balancer_flow') + @mock.patch('octavia.controller.worker.v2.flows.load_balancer_flows.' + 'LoadBalancerFlows._create_single_topology') + @mock.patch('octavia.controller.worker.v2.flows.load_balancer_flows.' + 'LoadBalancerFlows._create_active_standby_topology') + def test_create_load_balancer_full_graph_active_standby( + self, + mock_create_active_standby_topology, + mock_create_single_topology, + mock_get_create_load_balancer_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + self.conf.config( + group="controller_worker", + loadbalancer_topology=constants.TOPOLOGY_ACTIVE_STANDBY) + + listeners = [data_models.Listener(id='listener1'), + data_models.Listener(id='listener2')] + lb = data_models.LoadBalancer( + id=LB_ID, listeners=listeners, + topology=constants.TOPOLOGY_ACTIVE_STANDBY) + mock_lb_repo_get.return_value = lb + mock_eng = mock.Mock() + mock_taskflow_load.return_value = mock_eng + store = { + constants.LOADBALANCER_ID: LB_ID, + 'update_dict': {'topology': constants.TOPOLOGY_ACTIVE_STANDBY}, + constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY, + constants.FLAVOR: None + } + + cw = controller_worker.ControllerWorker() + cw.create_load_balancer(LB_ID) + + mock_get_create_load_balancer_flow.assert_called_with( + topology=constants.TOPOLOGY_ACTIVE_STANDBY, listeners=lb.listeners) + mock_taskflow_load.assert_called_with( + mock_get_create_load_balancer_flow.return_value, store=store) + mock_eng.run.assert_any_call() + + @mock.patch('octavia.controller.worker.v2.flows.load_balancer_flows.' + 'LoadBalancerFlows.get_delete_load_balancer_flow', + return_value=(_flow_mock, {'test': 'test'})) + def test_delete_load_balancer_without_cascade(self, + mock_get_delete_lb_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + + cw = controller_worker.ControllerWorker() + cw.delete_load_balancer(LB_ID, cascade=False) + + mock_lb_repo_get.assert_called_once_with( + _db_session, + id=LB_ID) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={constants.LOADBALANCER: + _load_balancer_mock, + constants.SERVER_GROUP_ID: + _load_balancer_mock.server_group_id, + 'test': 'test' + } + ) + ) + _flow_mock.run.assert_called_once_with() + + @mock.patch('octavia.controller.worker.v2.flows.load_balancer_flows.' + 'LoadBalancerFlows.get_cascade_delete_load_balancer_flow', + return_value=(_flow_mock, {'test': 'test'})) + def test_delete_load_balancer_with_cascade(self, + mock_get_delete_lb_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + + cw = controller_worker.ControllerWorker() + cw.delete_load_balancer(LB_ID, cascade=True) + + mock_lb_repo_get.assert_called_once_with( + _db_session, + id=LB_ID) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={constants.LOADBALANCER: + _load_balancer_mock, + constants.SERVER_GROUP_ID: + _load_balancer_mock.server_group_id, + 'test': 'test' + } + ) + ) + _flow_mock.run.assert_called_once_with() + + @mock.patch('octavia.controller.worker.v2.flows.load_balancer_flows.' + 'LoadBalancerFlows.get_update_load_balancer_flow', + return_value=_flow_mock) + @mock.patch('octavia.db.repositories.ListenerRepository.get_all', + return_value=([_listener_mock], None)) + def test_update_load_balancer(self, + mock_listener_repo_get_all, + mock_get_update_lb_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + _load_balancer_mock.provisioning_status = constants.PENDING_UPDATE + + cw = controller_worker.ControllerWorker() + change = 'TEST2' + cw.update_load_balancer(LB_ID, change) + + mock_lb_repo_get.assert_called_once_with( + _db_session, + id=LB_ID) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={constants.UPDATE_DICT: change, + constants.LOADBALANCER: + _load_balancer_mock, + constants.LISTENERS: + [_listener_mock]})) + + _flow_mock.run.assert_called_once_with() + + @mock.patch('octavia.controller.worker.v2.flows.' + 'member_flows.MemberFlows.get_create_member_flow', + return_value=_flow_mock) + def test_create_member(self, + mock_get_create_member_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + mock_member_repo_get.side_effect = [None, _member_mock] + + cw = controller_worker.ControllerWorker() + cw.create_member(MEMBER_ID) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={constants.MEMBER: _member_mock, + constants.LISTENERS: + [_listener_mock], + constants.LOADBALANCER: + _load_balancer_mock, + constants.POOL: + _pool_mock})) + + _flow_mock.run.assert_called_once_with() + self.assertEqual(2, mock_member_repo_get.call_count) + + @mock.patch('octavia.controller.worker.v2.flows.' + 'member_flows.MemberFlows.get_delete_member_flow', + return_value=_flow_mock) + def test_delete_member(self, + mock_get_delete_member_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + + cw = controller_worker.ControllerWorker() + cw.delete_member(MEMBER_ID) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with( + _flow_mock, store={constants.MEMBER: _member_mock, + constants.LISTENERS: + [_listener_mock], + constants.LOADBALANCER: + _load_balancer_mock, + constants.POOL: + _pool_mock})) + + _flow_mock.run.assert_called_once_with() + + @mock.patch('octavia.controller.worker.v2.flows.' + 'member_flows.MemberFlows.get_update_member_flow', + return_value=_flow_mock) + def test_update_member(self, + mock_get_update_member_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + _member_mock.provisioning_status = constants.PENDING_UPDATE + + cw = controller_worker.ControllerWorker() + cw.update_member(MEMBER_ID, MEMBER_UPDATE_DICT) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={constants.MEMBER: _member_mock, + constants.LISTENERS: + [_listener_mock], + constants.LOADBALANCER: + _load_balancer_mock, + constants.POOL: + _pool_mock, + constants.UPDATE_DICT: + MEMBER_UPDATE_DICT})) + + _flow_mock.run.assert_called_once_with() + + @mock.patch('octavia.controller.worker.v2.flows.' + 'member_flows.MemberFlows.get_batch_update_members_flow', + return_value=_flow_mock) + def test_batch_update_members(self, + mock_get_batch_update_members_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + + cw = controller_worker.ControllerWorker() + cw.batch_update_members([9], [11], [MEMBER_UPDATE_DICT]) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={ + constants.LISTENERS: [_listener_mock], + constants.LOADBALANCER: + _load_balancer_mock, + constants.POOL: _pool_mock})) + + _flow_mock.run.assert_called_once_with() + + @mock.patch('octavia.controller.worker.v2.flows.' + 'pool_flows.PoolFlows.get_create_pool_flow', + return_value=_flow_mock) + def test_create_pool(self, + mock_get_create_listener_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + mock_pool_repo_get.side_effect = [None, _pool_mock] + + cw = controller_worker.ControllerWorker() + cw.create_pool(POOL_ID) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={constants.POOL: _pool_mock, + constants.LISTENERS: + [_listener_mock], + constants.LOADBALANCER: + _load_balancer_mock})) + + _flow_mock.run.assert_called_once_with() + self.assertEqual(2, mock_pool_repo_get.call_count) + + @mock.patch('octavia.controller.worker.v2.flows.' + 'pool_flows.PoolFlows.get_delete_pool_flow', + return_value=_flow_mock) + def test_delete_pool(self, + mock_get_delete_listener_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + + cw = controller_worker.ControllerWorker() + cw.delete_pool(POOL_ID) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={constants.POOL: _pool_mock, + constants.LISTENERS: + [_listener_mock], + constants.LOADBALANCER: + _load_balancer_mock})) + + _flow_mock.run.assert_called_once_with() + + @mock.patch('octavia.controller.worker.v2.flows.' + 'pool_flows.PoolFlows.get_update_pool_flow', + return_value=_flow_mock) + def test_update_pool(self, + mock_get_update_listener_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + _pool_mock.provisioning_status = constants.PENDING_UPDATE + + cw = controller_worker.ControllerWorker() + cw.update_pool(POOL_ID, POOL_UPDATE_DICT) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={constants.POOL: _pool_mock, + constants.LISTENERS: + [_listener_mock], + constants.LOADBALANCER: + _load_balancer_mock, + constants.UPDATE_DICT: + POOL_UPDATE_DICT})) + + _flow_mock.run.assert_called_once_with() + + @mock.patch('octavia.controller.worker.v2.flows.' + 'l7policy_flows.L7PolicyFlows.get_create_l7policy_flow', + return_value=_flow_mock) + def test_create_l7policy(self, + mock_get_create_listener_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + mock_l7policy_repo_get.side_effect = [None, _l7policy_mock] + + cw = controller_worker.ControllerWorker() + cw.create_l7policy(L7POLICY_ID) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={constants.L7POLICY: _l7policy_mock, + constants.LISTENERS: + [_listener_mock], + constants.LOADBALANCER: + _load_balancer_mock})) + + _flow_mock.run.assert_called_once_with() + self.assertEqual(2, mock_l7policy_repo_get.call_count) + + @mock.patch('octavia.controller.worker.v2.flows.' + 'l7policy_flows.L7PolicyFlows.get_delete_l7policy_flow', + return_value=_flow_mock) + def test_delete_l7policy(self, + mock_get_delete_listener_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + + cw = controller_worker.ControllerWorker() + cw.delete_l7policy(L7POLICY_ID) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={constants.L7POLICY: _l7policy_mock, + constants.LISTENERS: + [_listener_mock], + constants.LOADBALANCER: + _load_balancer_mock})) + + _flow_mock.run.assert_called_once_with() + + @mock.patch('octavia.controller.worker.v2.flows.' + 'l7policy_flows.L7PolicyFlows.get_update_l7policy_flow', + return_value=_flow_mock) + def test_update_l7policy(self, + mock_get_update_listener_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + _l7policy_mock.provisioning_status = constants.PENDING_UPDATE + + cw = controller_worker.ControllerWorker() + cw.update_l7policy(L7POLICY_ID, L7POLICY_UPDATE_DICT) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={constants.L7POLICY: _l7policy_mock, + constants.LISTENERS: + [_listener_mock], + constants.LOADBALANCER: + _load_balancer_mock, + constants.UPDATE_DICT: + L7POLICY_UPDATE_DICT})) + + _flow_mock.run.assert_called_once_with() + + @mock.patch('octavia.controller.worker.v2.flows.' + 'l7rule_flows.L7RuleFlows.get_create_l7rule_flow', + return_value=_flow_mock) + def test_create_l7rule(self, + mock_get_create_listener_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + mock_l7rule_repo_get.side_effect = [None, _l7rule_mock] + + cw = controller_worker.ControllerWorker() + cw.create_l7rule(L7RULE_ID) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={constants.L7RULE: _l7rule_mock, + constants.L7POLICY: _l7policy_mock, + constants.LISTENERS: + [_listener_mock], + constants.LOADBALANCER: + _load_balancer_mock})) + + _flow_mock.run.assert_called_once_with() + self.assertEqual(2, mock_l7rule_repo_get.call_count) + + @mock.patch('octavia.controller.worker.v2.flows.' + 'l7rule_flows.L7RuleFlows.get_delete_l7rule_flow', + return_value=_flow_mock) + def test_delete_l7rule(self, + mock_get_delete_listener_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + + cw = controller_worker.ControllerWorker() + cw.delete_l7rule(L7RULE_ID) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={constants.L7RULE: _l7rule_mock, + constants.L7POLICY: _l7policy_mock, + constants.LISTENERS: + [_listener_mock], + constants.LOADBALANCER: + _load_balancer_mock})) + + _flow_mock.run.assert_called_once_with() + + @mock.patch('octavia.controller.worker.v2.flows.' + 'l7rule_flows.L7RuleFlows.get_update_l7rule_flow', + return_value=_flow_mock) + def test_update_l7rule(self, + mock_get_update_listener_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + _l7rule_mock.provisioning_status = constants.PENDING_UPDATE + + cw = controller_worker.ControllerWorker() + cw.update_l7rule(L7RULE_ID, L7RULE_UPDATE_DICT) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={constants.L7RULE: _l7rule_mock, + constants.L7POLICY: _l7policy_mock, + constants.LISTENERS: + [_listener_mock], + constants.LOADBALANCER: + _load_balancer_mock, + constants.UPDATE_DICT: + L7RULE_UPDATE_DICT})) + + _flow_mock.run.assert_called_once_with() + + @mock.patch('octavia.db.repositories.FlavorRepository.' + 'get_flavor_metadata_dict', return_value={}) + @mock.patch('octavia.controller.worker.v2.flows.' + 'amphora_flows.AmphoraFlows.get_failover_flow', + return_value=_flow_mock) + @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') + def test_failover_amphora(self, + mock_update, + mock_get_failover_flow, + mock_get_flavor_meta, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + + cw = controller_worker.ControllerWorker() + cw.failover_amphora(AMP_ID) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with( + _flow_mock, + store={constants.FAILED_AMPHORA: _amphora_mock, + constants.LOADBALANCER_ID: + _amphora_mock.load_balancer_id, + constants.BUILD_TYPE_PRIORITY: + constants.LB_CREATE_FAILOVER_PRIORITY, + constants.FLAVOR: {} + })) + + _flow_mock.run.assert_called_once_with() + mock_update.assert_called_with(_db_session, LB_ID, + provisioning_status=constants.ACTIVE) + + @mock.patch('octavia.controller.worker.v2.controller_worker.' + 'ControllerWorker._perform_amphora_failover') + def test_failover_amp_missing_amp(self, + mock_perform_amp_failover, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + mock_amp_repo_get.return_value = None + + cw = controller_worker.ControllerWorker() + cw.failover_amphora(AMP_ID) + + mock_perform_amp_failover.assert_not_called() + + @mock.patch('octavia.controller.worker.v2.controller_worker.' + 'ControllerWorker._perform_amphora_failover') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') + def test_failover_amp_flow_exception(self, + mock_update, + mock_perform_amp_failover, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + mock_perform_amp_failover.side_effect = TestException('boom') + cw = controller_worker.ControllerWorker() + self.assertRaises(TestException, cw.failover_amphora, AMP_ID) + mock_update.assert_called_with(_db_session, LB_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.controller.worker.v2.controller_worker.' + 'ControllerWorker._perform_amphora_failover') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') + def test_failover_amp_no_lb(self, + mock_lb_update, + mock_perform_amp_failover, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + amphora = mock.MagicMock() + amphora.load_balancer_id = None + mock_amp_repo_get.return_value = amphora + + cw = controller_worker.ControllerWorker() + cw.failover_amphora(AMP_ID) + + mock_lb_update.assert_not_called() + mock_perform_amp_failover.assert_called_once_with( + amphora, constants.LB_CREATE_FAILOVER_PRIORITY) + + @mock.patch('octavia.db.repositories.AmphoraHealthRepository.delete') + def test_failover_deleted_amphora(self, + mock_delete, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + mock_taskflow_load.reset_mock() + mock_amphora = mock.MagicMock() + mock_amphora.id = AMP_ID + mock_amphora.status = constants.DELETED + + cw = controller_worker.ControllerWorker() + cw._perform_amphora_failover(mock_amphora, 10) + + mock_delete.assert_called_with(_db_session, amphora_id=AMP_ID) + mock_taskflow_load.assert_not_called() + + @mock.patch('octavia.controller.worker.v2.' + 'controller_worker.ControllerWorker._perform_amphora_failover') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') + def test_failover_loadbalancer(self, + mock_update, + mock_perform, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + _amphora_mock2 = mock.MagicMock() + _amphora_mock3 = mock.MagicMock() + _amphora_mock3.status = constants.DELETED + _load_balancer_mock.amphorae = [ + _amphora_mock, _amphora_mock2, _amphora_mock3] + cw = controller_worker.ControllerWorker() + cw.failover_loadbalancer('123') + mock_perform.assert_called_with( + _amphora_mock2, constants.LB_CREATE_ADMIN_FAILOVER_PRIORITY) + mock_update.assert_called_with(_db_session, '123', + provisioning_status=constants.ACTIVE) + + mock_perform.reset + _load_balancer_mock.amphorae = [ + _amphora_mock, _amphora_mock2, _amphora_mock3] + _amphora_mock2.role = constants.ROLE_BACKUP + cw.failover_loadbalancer('123') + # because mock2 gets failed over earlier now _amphora_mock + # is the last one + mock_perform.assert_called_with( + _amphora_mock, constants.LB_CREATE_ADMIN_FAILOVER_PRIORITY) + mock_update.assert_called_with(_db_session, '123', + provisioning_status=constants.ACTIVE) + + mock_perform.reset + mock_perform.side_effect = OverflowError() + self.assertRaises(OverflowError, cw.failover_loadbalancer, 123) + mock_update.assert_called_with(_db_session, 123, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.FlavorRepository.' + 'get_flavor_metadata_dict', return_value={}) + @mock.patch('octavia.controller.worker.v2.flows.' + 'amphora_flows.AmphoraFlows.get_failover_flow', + return_value=_flow_mock) + @mock.patch( + 'octavia.db.repositories.AmphoraRepository.get_lb_for_amphora', + return_value=_load_balancer_mock) + @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') + def test_failover_amphora_anti_affinity(self, + mock_update, + mock_get_lb_for_amphora, + mock_get_update_listener_flow, + mock_get_flavor_meta, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + self.conf.config(group="nova", enable_anti_affinity=True) + _flow_mock.reset_mock() + _load_balancer_mock.server_group_id = "123" + + cw = controller_worker.ControllerWorker() + cw.failover_amphora(AMP_ID) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with( + _flow_mock, + store={constants.FAILED_AMPHORA: _amphora_mock, + constants.LOADBALANCER_ID: + _amphora_mock.load_balancer_id, + constants.BUILD_TYPE_PRIORITY: + constants.LB_CREATE_FAILOVER_PRIORITY, + constants.SERVER_GROUP_ID: "123", + constants.FLAVOR: {} + })) + + _flow_mock.run.assert_called_once_with() + mock_update.assert_called_with(_db_session, LB_ID, + provisioning_status=constants.ACTIVE) + + @mock.patch('octavia.controller.worker.v2.flows.' + 'amphora_flows.AmphoraFlows.cert_rotate_amphora_flow', + return_value=_flow_mock) + def test_amphora_cert_rotation(self, + mock_get_update_listener_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + _flow_mock.reset_mock() + cw = controller_worker.ControllerWorker() + cw.amphora_cert_rotation(AMP_ID) + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={constants.AMPHORA: _amphora_mock, + constants.AMPHORA_ID: + _amphora_mock.id})) + _flow_mock.run.assert_called_once_with() + + @mock.patch('octavia.db.repositories.FlavorRepository.' + 'get_flavor_metadata_dict') + @mock.patch('octavia.db.repositories.AmphoraRepository.get_lb_for_amphora') + @mock.patch('octavia.controller.worker.v2.flows.' + 'amphora_flows.AmphoraFlows.update_amphora_config_flow', + return_value=_flow_mock) + def test_update_amphora_agent_config(self, + mock_update_flow, + mock_get_lb_for_amp, + mock_flavor_meta, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + _flow_mock.reset_mock() + mock_lb = mock.MagicMock() + mock_lb.flavor_id = 'vanilla' + mock_get_lb_for_amp.return_value = mock_lb + mock_flavor_meta.return_value = {'test': 'dict'} + cw = controller_worker.ControllerWorker() + cw.update_amphora_agent_config(AMP_ID) + + mock_amp_repo_get.assert_called_once_with(_db_session, id=AMP_ID) + mock_get_lb_for_amp.assert_called_once_with(_db_session, AMP_ID) + mock_flavor_meta.assert_called_once_with(_db_session, 'vanilla') + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={constants.AMPHORA: _amphora_mock, + constants.FLAVOR: {'test': 'dict'}})) + _flow_mock.run.assert_called_once_with() + + # Test with no flavor + _flow_mock.reset_mock() + mock_amp_repo_get.reset_mock() + mock_get_lb_for_amp.reset_mock() + mock_flavor_meta.reset_mock() + base_taskflow.BaseTaskFlowEngine._taskflow_load.reset_mock() + mock_lb.flavor_id = None + cw.update_amphora_agent_config(AMP_ID) + mock_amp_repo_get.assert_called_once_with(_db_session, id=AMP_ID) + mock_get_lb_for_amp.assert_called_once_with(_db_session, AMP_ID) + mock_flavor_meta.assert_not_called() + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={constants.AMPHORA: _amphora_mock, + constants.FLAVOR: {}})) + _flow_mock.run.assert_called_once_with() diff --git a/setup.cfg b/setup.cfg index 4734bc3e33..ef41aeab05 100644 --- a/setup.cfg +++ b/setup.cfg @@ -57,9 +57,10 @@ console_scripts = octavia.api.drivers = noop_driver = octavia.api.drivers.noop_driver.driver:NoopProviderDriver noop_driver-alt = octavia.api.drivers.noop_driver.driver:NoopProviderDriver - amphora = octavia.api.drivers.amphora_driver.driver:AmphoraProviderDriver + amphora = octavia.api.drivers.amphora_driver.v1.driver:AmphoraProviderDriver # octavia is an alias for backward compatibility - octavia = octavia.api.drivers.amphora_driver.driver:AmphoraProviderDriver + octavia = octavia.api.drivers.amphora_driver.v1.driver:AmphoraProviderDriver + amphorav2 = octavia.api.drivers.amphora_driver.v2.driver:AmphoraProviderDriver octavia.amphora.drivers = amphora_noop_driver = octavia.amphorae.drivers.noop_driver.driver:NoopAmphoraLoadBalancerDriver amphora_haproxy_rest_driver = octavia.amphorae.drivers.haproxy.rest_api_driver:HaproxyAmphoraLoadBalancerDriver @@ -91,7 +92,7 @@ octavia.cert_manager = octavia.barbican_auth = barbican_acl_auth = octavia.certificates.common.auth.barbican_acl:BarbicanACLAuth octavia.plugins = - hot_plug_plugin = octavia.controller.worker.controller_worker:ControllerWorker + hot_plug_plugin = octavia.controller.worker.v1.controller_worker:ControllerWorker oslo.config.opts = octavia = octavia.opts:list_opts oslo.policy.policies = diff --git a/tools/flow-list.txt b/tools/flow-list.txt index 76e153bf11..87dce44286 100644 --- a/tools/flow-list.txt +++ b/tools/flow-list.txt @@ -2,30 +2,30 @@ # Some flows are used by other flows, so just list the primary flows here # Format: # module class flow -octavia.controller.worker.flows.amphora_flows AmphoraFlows get_create_amphora_flow -octavia.controller.worker.flows.amphora_flows AmphoraFlows get_failover_flow -octavia.controller.worker.flows.amphora_flows AmphoraFlows cert_rotate_amphora_flow -octavia.controller.worker.flows.load_balancer_flows LoadBalancerFlows get_create_load_balancer_flow -octavia.controller.worker.flows.load_balancer_flows LoadBalancerFlows get_delete_load_balancer_flow -octavia.controller.worker.flows.load_balancer_flows LoadBalancerFlows get_cascade_delete_load_balancer_flow -octavia.controller.worker.flows.load_balancer_flows LoadBalancerFlows get_update_load_balancer_flow -octavia.controller.worker.flows.listener_flows ListenerFlows get_create_listener_flow -octavia.controller.worker.flows.listener_flows ListenerFlows get_create_all_listeners_flow -octavia.controller.worker.flows.listener_flows ListenerFlows get_delete_listener_flow -octavia.controller.worker.flows.listener_flows ListenerFlows get_update_listener_flow -octavia.controller.worker.flows.pool_flows PoolFlows get_create_pool_flow -octavia.controller.worker.flows.pool_flows PoolFlows get_delete_pool_flow -octavia.controller.worker.flows.pool_flows PoolFlows get_update_pool_flow -octavia.controller.worker.flows.member_flows MemberFlows get_create_member_flow -octavia.controller.worker.flows.member_flows MemberFlows get_delete_member_flow -octavia.controller.worker.flows.member_flows MemberFlows get_update_member_flow -octavia.controller.worker.flows.member_flows MemberFlows get_batch_update_members_flow -octavia.controller.worker.flows.health_monitor_flows HealthMonitorFlows get_create_health_monitor_flow -octavia.controller.worker.flows.health_monitor_flows HealthMonitorFlows get_delete_health_monitor_flow -octavia.controller.worker.flows.health_monitor_flows HealthMonitorFlows get_update_health_monitor_flow -octavia.controller.worker.flows.l7policy_flows L7PolicyFlows get_create_l7policy_flow -octavia.controller.worker.flows.l7policy_flows L7PolicyFlows get_delete_l7policy_flow -octavia.controller.worker.flows.l7policy_flows L7PolicyFlows get_update_l7policy_flow -octavia.controller.worker.flows.l7rule_flows L7RuleFlows get_create_l7rule_flow -octavia.controller.worker.flows.l7rule_flows L7RuleFlows get_delete_l7rule_flow -octavia.controller.worker.flows.l7rule_flows L7RuleFlows get_update_l7rule_flow +octavia.controller.worker.v1.flows.amphora_flows AmphoraFlows get_create_amphora_flow +octavia.controller.worker.v1.flows.amphora_flows AmphoraFlows get_failover_flow +octavia.controller.worker.v1.flows.amphora_flows AmphoraFlows cert_rotate_amphora_flow +octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_create_load_balancer_flow +octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_delete_load_balancer_flow +octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_cascade_delete_load_balancer_flow +octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_update_load_balancer_flow +octavia.controller.worker.v1.flows.listener_flows ListenerFlows get_create_listener_flow +octavia.controller.worker.v1.flows.listener_flows ListenerFlows get_create_all_listeners_flow +octavia.controller.worker.v1.flows.listener_flows ListenerFlows get_delete_listener_flow +octavia.controller.worker.v1.flows.listener_flows ListenerFlows get_update_listener_flow +octavia.controller.worker.v1.flows.pool_flows PoolFlows get_create_pool_flow +octavia.controller.worker.v1.flows.pool_flows PoolFlows get_delete_pool_flow +octavia.controller.worker.v1.flows.pool_flows PoolFlows get_update_pool_flow +octavia.controller.worker.v1.flows.member_flows MemberFlows get_create_member_flow +octavia.controller.worker.v1.flows.member_flows MemberFlows get_delete_member_flow +octavia.controller.worker.v1.flows.member_flows MemberFlows get_update_member_flow +octavia.controller.worker.v1.flows.member_flows MemberFlows get_batch_update_members_flow +octavia.controller.worker.v1.flows.health_monitor_flows HealthMonitorFlows get_create_health_monitor_flow +octavia.controller.worker.v1.flows.health_monitor_flows HealthMonitorFlows get_delete_health_monitor_flow +octavia.controller.worker.v1.flows.health_monitor_flows HealthMonitorFlows get_update_health_monitor_flow +octavia.controller.worker.v1.flows.l7policy_flows L7PolicyFlows get_create_l7policy_flow +octavia.controller.worker.v1.flows.l7policy_flows L7PolicyFlows get_delete_l7policy_flow +octavia.controller.worker.v1.flows.l7policy_flows L7PolicyFlows get_update_l7policy_flow +octavia.controller.worker.v1.flows.l7rule_flows L7RuleFlows get_create_l7rule_flow +octavia.controller.worker.v1.flows.l7rule_flows L7RuleFlows get_delete_l7rule_flow +octavia.controller.worker.v1.flows.l7rule_flows L7RuleFlows get_update_l7rule_flow diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml index fd8e3c5168..769aa73f7b 100644 --- a/zuul.d/jobs.yaml +++ b/zuul.d/jobs.yaml @@ -79,3 +79,19 @@ - openstack/octavia-lib - openstack/octavia-tempest-plugin - openstack/python-octaviaclient + +- job: + name: octavia-v2-dsvm-scenario-amphora-v2 + parent: octavia-v2-dsvm-scenario + vars: + devstack_local_conf: + post-config: + $OCTAVIA_CONF: + api_settings: + default_provider_driver: amphorav2 + enabled_provider_drivers: amphorav2:The v2 amphora driver. + test-config: + "$TEMPEST_CONFIG": + load_balancer: + enabled_provider_drivers: amphorav2:The v2 amphora driver. + provider: amphorav2 diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml index 4ef20b1128..c365c81eaa 100644 --- a/zuul.d/projects.yaml +++ b/zuul.d/projects.yaml @@ -58,6 +58,14 @@ - ^octavia/tests/unit/.*$ - ^releasenotes/.*$ voting: false + - octavia-v2-dsvm-scenario-amphora-v2: + irrelevant-files: + - ^.*\.rst$ + - ^api-ref/.*$ + - ^doc/.*$ + - ^octavia/tests/unit/.*$ + - ^releasenotes/.*$ + voting: false gate: queue: octavia jobs: